aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-08-04 18:15:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-04 18:15:15 -0400
commit03da30986793385af57eeca3296253c887b742e6 (patch)
tree9c46dbe51c9d0856990649dd917ab45474b7be87
parent6ba74014c1ab0e37af7de6f64b4eccbbae3cb9e7 (diff)
parent339f4f4eab80caa6cf0d39fb057ad6ddb84ba91e (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (276 commits) [SCSI] zfcp: Trigger logging in the FCP channel on qdio error conditions [SCSI] zfcp: Introduce experimental support for DIF/DIX [SCSI] zfcp: Enable data division support for FCP devices [SCSI] zfcp: Prevent access on uninitialized memory. [SCSI] zfcp: Post events through FC transport class [SCSI] zfcp: Cleanup QDIO attachment and improve processing. [SCSI] zfcp: Cleanup function parameters for sbal value. [SCSI] zfcp: Use correct width for timer_interval field [SCSI] zfcp: Remove SCSI device when removing unit [SCSI] zfcp: Use memdup_user and kstrdup [SCSI] zfcp: Fix retry after failed "open port" erp action [SCSI] zfcp: Fail erp after timeout [SCSI] zfcp: Use forced_reopen in terminate_rport_io callback [SCSI] zfcp: Register SCSI devices after successful fc_remote_port_add [SCSI] zfcp: Do not try "forced close" when port is already closed [SCSI] zfcp: Do not unblock rport from REOPEN_PORT_FORCED [SCSI] sd: add support for runtime PM [SCSI] implement runtime Power Management [SCSI] convert to the new PM framework [SCSI] Unify SAM_ and SAM_STAT_ macros ...
-rw-r--r--Documentation/scsi/hpsa.txt107
-rw-r--r--MAINTAINERS8
-rw-r--r--arch/s390/include/asm/qdio.h6
-rw-r--r--drivers/message/fusion/mptbase.c49
-rw-r--r--drivers/message/fusion/mptbase.h13
-rw-r--r--drivers/message/fusion/mptctl.c38
-rw-r--r--drivers/message/fusion/mptfc.c9
-rw-r--r--drivers/message/fusion/mptlan.c4
-rw-r--r--drivers/message/fusion/mptsas.c278
-rw-r--r--drivers/message/fusion/mptsas.h1
-rw-r--r--drivers/message/fusion/mptscsih.c54
-rw-r--r--drivers/message/fusion/mptspi.c9
-rw-r--r--drivers/misc/enclosure.c7
-rw-r--r--drivers/s390/cio/qdio_setup.c2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c10
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c12
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c5
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h1
-rw-r--r--drivers/s390/scsi/zfcp_def.h5
-rw-r--r--drivers/s390/scsi/zfcp_erp.c24
-rw-r--r--drivers/s390/scsi/zfcp_ext.h11
-rw-r--r--drivers/s390/scsi/zfcp_fc.c54
-rw-r--r--drivers/s390/scsi/zfcp_fc.h27
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c169
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h34
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c206
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h95
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c103
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c12
-rw-r--r--drivers/scsi/Kconfig4
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aacraid/linit.c14
-rw-r--r--drivers/scsi/aic7xxx/aic7770.c12
-rw-r--r--drivers/scsi/aic7xxx/aic7770_osm.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c624
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c108
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h7
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm_pci.c8
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c56
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_proc.c13
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_93cx6.c10
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c430
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c76
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h7
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c8
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_pci.c74
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_proc.c15
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h315
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c2309
-rw-r--r--drivers/scsi/be2iscsi/Kconfig2
-rw-r--r--drivers/scsi/be2iscsi/be.h6
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c116
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h27
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c199
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c333
-rw-r--r--drivers/scsi/be2iscsi/be_main.h29
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c64
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h19
-rw-r--r--drivers/scsi/bfa/Makefile2
-rw-r--r--drivers/scsi/bfa/bfa_cb_ioim_macros.h7
-rw-r--r--drivers/scsi/bfa/bfa_core.c1
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c29
-rw-r--r--drivers/scsi/bfa/bfa_fcpim_priv.h6
-rw-r--r--drivers/scsi/bfa/bfa_fcport.c139
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c10
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c3
-rw-r--r--drivers/scsi/bfa/bfa_fcxp.c14
-rw-r--r--drivers/scsi/bfa/bfa_fwimg_priv.h25
-rw-r--r--drivers/scsi/bfa/bfa_hw_cb.c7
-rw-r--r--drivers/scsi/bfa/bfa_hw_ct.c7
-rw-r--r--drivers/scsi/bfa/bfa_intr.c1
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c172
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h12
-rw-r--r--drivers/scsi/bfa/bfa_ioc_cb.c36
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c71
-rw-r--r--drivers/scsi/bfa/bfa_iocfc.c107
-rw-r--r--drivers/scsi/bfa/bfa_iocfc.h19
-rw-r--r--drivers/scsi/bfa/bfa_ioim.c63
-rw-r--r--drivers/scsi/bfa/bfa_log_module.c86
-rw-r--r--drivers/scsi/bfa/bfa_lps.c6
-rw-r--r--drivers/scsi/bfa/bfa_port.c31
-rw-r--r--drivers/scsi/bfa/bfa_port_priv.h7
-rw-r--r--drivers/scsi/bfa/bfa_priv.h3
-rw-r--r--drivers/scsi/bfa/bfa_rport.c5
-rw-r--r--drivers/scsi/bfa/bfa_sgpg.c5
-rw-r--r--drivers/scsi/bfa/bfa_uf.c10
-rw-r--r--drivers/scsi/bfa/bfad.c114
-rw-r--r--drivers/scsi/bfa/bfad_attr.c46
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c547
-rw-r--r--drivers/scsi/bfa/bfad_drv.h36
-rw-r--r--drivers/scsi/bfa/bfad_fwimg.c76
-rw-r--r--drivers/scsi/bfa/bfad_im.c33
-rw-r--r--drivers/scsi/bfa/bfad_im_compat.h13
-rw-r--r--drivers/scsi/bfa/bfad_intr.c8
-rw-r--r--drivers/scsi/bfa/fabric.c44
-rw-r--r--drivers/scsi/bfa/fcpim.c32
-rw-r--r--drivers/scsi/bfa/fcs_fabric.h5
-rw-r--r--drivers/scsi/bfa/fcs_rport.h3
-rw-r--r--drivers/scsi/bfa/fcs_vport.h1
-rw-r--r--drivers/scsi/bfa/fdmi.c6
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_ioc.h8
-rw-r--r--drivers/scsi/bfa/include/bfa.h4
-rw-r--r--drivers/scsi/bfa/include/bfa_fcpim.h20
-rw-r--r--drivers/scsi/bfa/include/bfa_svc.h1
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_ctreg.h3
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_ioc.h20
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_iocfc.h2
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_pbc.h62
-rw-r--r--drivers/scsi/bfa/include/cna/port/bfa_port.h1
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_debug.h3
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_adapter.h3
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_auth.h6
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_boot.h10
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_driver.h2
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_fcport.h26
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_ioc.h7
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h12
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_itnim.h10
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_mfg.h41
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_pci.h11
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_port.h14
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_pport.h29
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_status.h46
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb_vport.h3
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs.h4
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_fcpim.h1
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_rport.h1
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_vport.h4
-rw-r--r--drivers/scsi/bfa/include/log/bfa_log_linux.h6
-rw-r--r--drivers/scsi/bfa/include/protocol/fc.h1
-rw-r--r--drivers/scsi/bfa/lport_api.c30
-rw-r--r--drivers/scsi/bfa/ms.c9
-rw-r--r--drivers/scsi/bfa/ns.c14
-rw-r--r--drivers/scsi/bfa/rport.c88
-rw-r--r--drivers/scsi/bfa/rport_api.c11
-rw-r--r--drivers/scsi/bfa/rport_ftrs.c14
-rw-r--r--drivers/scsi/bfa/scn.c2
-rw-r--r--drivers/scsi/bfa/vport.c54
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h14
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c4
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c37
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c236
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.c2
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c5
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c1
-rw-r--r--drivers/scsi/dpt_i2o.c26
-rw-r--r--drivers/scsi/fcoe/fcoe.c147
-rw-r--r--drivers/scsi/fcoe/libfcoe.c1519
-rw-r--r--drivers/scsi/fnic/fnic_main.c11
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c22
-rw-r--r--drivers/scsi/hosts.c14
-rw-r--r--drivers/scsi/hpsa.c754
-rw-r--r--drivers/scsi/hpsa.h1
-rw-r--r--drivers/scsi/hpsa_cmd.h4
-rw-r--r--drivers/scsi/hptiop.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c85
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h6
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c157
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h4
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c4
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c16
-rw-r--r--drivers/scsi/ipr.c148
-rw-r--r--drivers/scsi/ipr.h30
-rw-r--r--drivers/scsi/libfc/fc_disc.c39
-rw-r--r--drivers/scsi/libfc/fc_elsct.c2
-rw-r--r--drivers/scsi/libfc/fc_exch.c215
-rw-r--r--drivers/scsi/libfc/fc_fcp.c15
-rw-r--r--drivers/scsi/libfc/fc_libfc.c78
-rw-r--r--drivers/scsi/libfc/fc_libfc.h2
-rw-r--r--drivers/scsi/libfc/fc_lport.c210
-rw-r--r--drivers/scsi/libfc/fc_rport.c707
-rw-r--r--drivers/scsi/libsas/sas_ata.c12
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c4
-rw-r--r--drivers/scsi/libsas/sas_task.c6
-rw-r--r--drivers/scsi/lpfc/lpfc.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c96
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c36
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c112
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c220
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c279
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c23
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c31
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c86
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c345
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h17
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h193
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_init.h17
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h119
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c172
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h34
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c6
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c411
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c744
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c575
-rw-r--r--drivers/scsi/mvsas/mv_sas.c20
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c14
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c4
-rw-r--r--drivers/scsi/pmcraid.c893
-rw-r--r--drivers/scsi/pmcraid.h305
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c33
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c294
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h54
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h29
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c74
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c178
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c257
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c206
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c158
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c61
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c550
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h43
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c109
-rw-r--r--drivers/scsi/qla2xxx/qla_settings.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c49
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h8
-rw-r--r--drivers/scsi/qla4xxx/Kconfig8
-rw-r--r--drivers/scsi/qla4xxx/Makefile2
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h143
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h139
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h106
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c240
-rw-r--r--drivers/scsi/qla4xxx/ql4_inline.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c73
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c396
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c191
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.h10
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c2321
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h779
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c758
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_debug.c6
-rw-r--r--drivers/scsi/scsi_error.c29
-rw-r--r--drivers/scsi/scsi_pm.c206
-rw-r--r--drivers/scsi/scsi_priv.h19
-rw-r--r--drivers/scsi/scsi_scan.c24
-rw-r--r--drivers/scsi/scsi_sysfs.c68
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c81
-rw-r--r--drivers/scsi/sd.c21
-rw-r--r--drivers/scsi/sg.c12
-rw-r--r--include/scsi/fc/fc_els.h11
-rw-r--r--include/scsi/fc/fc_fcoe.h15
-rw-r--r--include/scsi/fc/fc_fip.h46
-rw-r--r--include/scsi/fc/fc_ns.h7
-rw-r--r--include/scsi/fc_encode.h7
-rw-r--r--include/scsi/fc_frame.h52
-rw-r--r--include/scsi/iscsi_if.h2
-rw-r--r--include/scsi/libfc.h75
-rw-r--r--include/scsi/libfcoe.h72
-rw-r--r--include/scsi/libsas.h11
-rw-r--r--include/scsi/scsi_device.h8
-rw-r--r--include/scsi/scsi_transport_iscsi.h2
268 files changed, 19599 insertions, 6501 deletions
diff --git a/Documentation/scsi/hpsa.txt b/Documentation/scsi/hpsa.txt
new file mode 100644
index 000000000000..dca658362cbf
--- /dev/null
+++ b/Documentation/scsi/hpsa.txt
@@ -0,0 +1,107 @@
1
2HPSA - Hewlett Packard Smart Array driver
3-----------------------------------------
4
5This file describes the hpsa SCSI driver for HP Smart Array controllers.
6The hpsa driver is intended to supplant the cciss driver for newer
7Smart Array controllers. The hpsa driver is a SCSI driver, while the
8cciss driver is a "block" driver. Actually cciss is both a block
9driver (for logical drives) AND a SCSI driver (for tape drives). This
10"split-brained" design of the cciss driver is a source of excess
11complexity and eliminating that complexity is one of the reasons
12for hpsa to exist.
13
14Supported devices:
15------------------
16
17Smart Array P212
18Smart Array P410
19Smart Array P410i
20Smart Array P411
21Smart Array P812
22Smart Array P712m
23Smart Array P711m
24StorageWorks P1210m
25
26Additionally, older Smart Arrays may work with the hpsa driver if the kernel
27boot parameter "hpsa_allow_any=1" is specified, however these are not tested
28nor supported by HP with this driver. For older Smart Arrays, the cciss
29driver should still be used.
30
31HPSA specific entries in /sys
32-----------------------------
33
34 In addition to the generic SCSI attributes available in /sys, hpsa supports
35 the following attributes:
36
37 HPSA specific host attributes:
38 ------------------------------
39
40 /sys/class/scsi_host/host*/rescan
41 /sys/class/scsi_host/host*/firmware_revision
42
43 the host "rescan" attribute is a write only attribute. Writing to this
44 attribute will cause the driver to scan for new, changed, or removed devices
45 (e.g. hot-plugged tape drives, or newly configured or deleted logical drives,
46 etc.) and notify the SCSI midlayer of any changes detected. Normally this is
47 triggered automatically by HP's Array Configuration Utility (either the GUI or
48 command line variety) so for logical drive changes, the user should not
49 normally have to use this. It may be useful when hot plugging devices like
50 tape drives, or entire storage boxes containing pre-configured logical drives.
51
52 The "firmware_revision" attribute contains the firmware version of the Smart Array.
53 For example:
54
55 root@host:/sys/class/scsi_host/host4# cat firmware_revision
56 7.14
57
58 HPSA specific disk attributes:
59 ------------------------------
60
61 /sys/class/scsi_disk/c:b:t:l/device/unique_id
62 /sys/class/scsi_disk/c:b:t:l/device/raid_level
63 /sys/class/scsi_disk/c:b:t:l/device/lunid
64
65 (where c:b:t:l are the controller, bus, target and lun of the device)
66
67 For example:
68
69 root@host:/sys/class/scsi_disk/4:0:0:0/device# cat unique_id
70 600508B1001044395355323037570F77
71 root@host:/sys/class/scsi_disk/4:0:0:0/device# cat lunid
72 0x0000004000000000
73 root@host:/sys/class/scsi_disk/4:0:0:0/device# cat raid_level
74 RAID 0
75
76HPSA specific ioctls:
77---------------------
78
79 For compatibility with applications written for the cciss driver, many, but
80 not all of the ioctls supported by the cciss driver are also supported by the
81 hpsa driver. The data structures used by these are described in
82 include/linux/cciss_ioctl.h
83
84 CCISS_DEREGDISK
85 CCISS_REGNEWDISK
86 CCISS_REGNEWD
87
88 The above three ioctls all do exactly the same thing, which is to cause the driver
89 to rescan for new devices. This does exactly the same thing as writing to the
90 hpsa specific host "rescan" attribute.
91
92 CCISS_GETPCIINFO
93
94 Returns PCI domain, bus, device and function and "board ID" (PCI subsystem ID).
95
96 CCISS_GETDRIVVER
97
98 Returns driver version in three bytes encoded as:
99 (major_version << 16) | (minor_version << 8) | (subminor_version)
100
101 CCISS_PASSTHRU
102 CCISS_BIG_PASSTHRU
103
104 Allows "BMIC" and "CISS" commands to be passed through to the Smart Array.
105 These are used extensively by the HP Array Configuration Utility, SNMP storage
106 agents, etc. See cciss_vol_status at http://cciss.sf.net for some examples.
107
diff --git a/MAINTAINERS b/MAINTAINERS
index 7c3b67c34e52..88ec0447a4f3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2625,6 +2625,14 @@ S: Maintained
2625F: Documentation/blockdev/cpqarray.txt 2625F: Documentation/blockdev/cpqarray.txt
2626F: drivers/block/cpqarray.* 2626F: drivers/block/cpqarray.*
2627 2627
2628HEWLETT-PACKARD SMART ARRAY RAID DRIVER (hpsa)
2629M: Stephen M. Cameron <scameron@beardog.cce.hp.com>
2630L: iss_storagedev@hp.com
2631S: Supported
2632F: Documentation/scsi/hpsa.txt
2633F: drivers/scsi/hpsa*.[ch]
2634F: include/linux/cciss*.h
2635
2628HEWLETT-PACKARD SMART CISS RAID DRIVER (cciss) 2636HEWLETT-PACKARD SMART CISS RAID DRIVER (cciss)
2629M: Mike Miller <mike.miller@hp.com> 2637M: Mike Miller <mike.miller@hp.com>
2630L: iss_storagedev@hp.com 2638L: iss_storagedev@hp.com
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 0eaae6260274..2ba630276295 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -84,6 +84,7 @@ struct qdr {
84 84
85#define QIB_AC_OUTBOUND_PCI_SUPPORTED 0x40 85#define QIB_AC_OUTBOUND_PCI_SUPPORTED 0x40
86#define QIB_RFLAGS_ENABLE_QEBSM 0x80 86#define QIB_RFLAGS_ENABLE_QEBSM 0x80
87#define QIB_RFLAGS_ENABLE_DATA_DIV 0x02
87 88
88/** 89/**
89 * struct qib - queue information block (QIB) 90 * struct qib - queue information block (QIB)
@@ -284,6 +285,9 @@ struct slsb {
284 u8 val[QDIO_MAX_BUFFERS_PER_Q]; 285 u8 val[QDIO_MAX_BUFFERS_PER_Q];
285} __attribute__ ((packed, aligned(256))); 286} __attribute__ ((packed, aligned(256)));
286 287
288#define CHSC_AC2_DATA_DIV_AVAILABLE 0x0010
289#define CHSC_AC2_DATA_DIV_ENABLED 0x0002
290
287struct qdio_ssqd_desc { 291struct qdio_ssqd_desc {
288 u8 flags; 292 u8 flags;
289 u8:8; 293 u8:8;
@@ -332,6 +336,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
332 * @adapter_name: name for the adapter 336 * @adapter_name: name for the adapter
333 * @qib_param_field_format: format for qib_parm_field 337 * @qib_param_field_format: format for qib_parm_field
334 * @qib_param_field: pointer to 128 bytes or NULL, if no param field 338 * @qib_param_field: pointer to 128 bytes or NULL, if no param field
339 * @qib_rflags: rflags to set
335 * @input_slib_elements: pointer to no_input_qs * 128 words of data or NULL 340 * @input_slib_elements: pointer to no_input_qs * 128 words of data or NULL
336 * @output_slib_elements: pointer to no_output_qs * 128 words of data or NULL 341 * @output_slib_elements: pointer to no_output_qs * 128 words of data or NULL
337 * @no_input_qs: number of input queues 342 * @no_input_qs: number of input queues
@@ -348,6 +353,7 @@ struct qdio_initialize {
348 unsigned char adapter_name[8]; 353 unsigned char adapter_name[8];
349 unsigned int qib_param_field_format; 354 unsigned int qib_param_field_format;
350 unsigned char *qib_param_field; 355 unsigned char *qib_param_field;
356 unsigned char qib_rflags;
351 unsigned long *input_slib_elements; 357 unsigned long *input_slib_elements;
352 unsigned long *output_slib_elements; 358 unsigned long *output_slib_elements;
353 unsigned int no_input_qs; 359 unsigned int no_input_qs;
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index a6a57011ba6c..2a52559058a9 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -115,6 +115,7 @@ MODULE_PARM_DESC(mpt_fwfault_debug, "Enable detection of Firmware fault"
115 " and halt Firmware on fault - (default=0)"); 115 " and halt Firmware on fault - (default=0)");
116 116
117 117
118static char MptCallbacksName[MPT_MAX_PROTOCOL_DRIVERS][50];
118 119
119#ifdef MFCNT 120#ifdef MFCNT
120static int mfcounter = 0; 121static int mfcounter = 0;
@@ -213,7 +214,7 @@ static int ProcessEventNotification(MPT_ADAPTER *ioc,
213static void mpt_iocstatus_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf); 214static void mpt_iocstatus_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf);
214static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info); 215static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info);
215static void mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info); 216static void mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info);
216static void mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info); 217static void mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info , u8 cb_idx);
217static int mpt_read_ioc_pg_3(MPT_ADAPTER *ioc); 218static int mpt_read_ioc_pg_3(MPT_ADAPTER *ioc);
218static void mpt_inactive_raid_list_free(MPT_ADAPTER *ioc); 219static void mpt_inactive_raid_list_free(MPT_ADAPTER *ioc);
219 220
@@ -490,7 +491,7 @@ mpt_reply(MPT_ADAPTER *ioc, u32 pa)
490 else if (ioc->bus_type == SPI) 491 else if (ioc->bus_type == SPI)
491 mpt_spi_log_info(ioc, log_info); 492 mpt_spi_log_info(ioc, log_info);
492 else if (ioc->bus_type == SAS) 493 else if (ioc->bus_type == SAS)
493 mpt_sas_log_info(ioc, log_info); 494 mpt_sas_log_info(ioc, log_info, cb_idx);
494 } 495 }
495 496
496 if (ioc_stat & MPI_IOCSTATUS_MASK) 497 if (ioc_stat & MPI_IOCSTATUS_MASK)
@@ -644,7 +645,7 @@ mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
644 * considered an error by the caller. 645 * considered an error by the caller.
645 */ 646 */
646u8 647u8
647mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass) 648mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass, char *func_name)
648{ 649{
649 u8 cb_idx; 650 u8 cb_idx;
650 last_drv_idx = MPT_MAX_PROTOCOL_DRIVERS; 651 last_drv_idx = MPT_MAX_PROTOCOL_DRIVERS;
@@ -659,6 +660,8 @@ mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass)
659 MptDriverClass[cb_idx] = dclass; 660 MptDriverClass[cb_idx] = dclass;
660 MptEvHandlers[cb_idx] = NULL; 661 MptEvHandlers[cb_idx] = NULL;
661 last_drv_idx = cb_idx; 662 last_drv_idx = cb_idx;
663 memcpy(MptCallbacksName[cb_idx], func_name,
664 strlen(func_name) > 50 ? 50 : strlen(func_name));
662 break; 665 break;
663 } 666 }
664 } 667 }
@@ -1632,6 +1635,7 @@ mpt_mapresources(MPT_ADAPTER *ioc)
1632 } else { 1635 } else {
1633 printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n", 1636 printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
1634 ioc->name, pci_name(pdev)); 1637 ioc->name, pci_name(pdev));
1638 pci_release_selected_regions(pdev, ioc->bars);
1635 return r; 1639 return r;
1636 } 1640 }
1637 } else { 1641 } else {
@@ -1645,6 +1649,7 @@ mpt_mapresources(MPT_ADAPTER *ioc)
1645 } else { 1649 } else {
1646 printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n", 1650 printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
1647 ioc->name, pci_name(pdev)); 1651 ioc->name, pci_name(pdev));
1652 pci_release_selected_regions(pdev, ioc->bars);
1648 return r; 1653 return r;
1649 } 1654 }
1650 } 1655 }
@@ -1675,6 +1680,7 @@ mpt_mapresources(MPT_ADAPTER *ioc)
1675 if (mem == NULL) { 1680 if (mem == NULL) {
1676 printk(MYIOC_s_ERR_FMT ": ERROR - Unable to map adapter" 1681 printk(MYIOC_s_ERR_FMT ": ERROR - Unable to map adapter"
1677 " memory!\n", ioc->name); 1682 " memory!\n", ioc->name);
1683 pci_release_selected_regions(pdev, ioc->bars);
1678 return -EINVAL; 1684 return -EINVAL;
1679 } 1685 }
1680 ioc->memmap = mem; 1686 ioc->memmap = mem;
@@ -1770,7 +1776,6 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1770 ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */ 1776 ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */
1771 ioc->reply_sz = MPT_REPLY_FRAME_SIZE; 1777 ioc->reply_sz = MPT_REPLY_FRAME_SIZE;
1772 1778
1773 ioc->pcidev = pdev;
1774 1779
1775 spin_lock_init(&ioc->taskmgmt_lock); 1780 spin_lock_init(&ioc->taskmgmt_lock);
1776 mutex_init(&ioc->internal_cmds.mutex); 1781 mutex_init(&ioc->internal_cmds.mutex);
@@ -1913,6 +1918,9 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1913 ioc->msi_enable = 0; 1918 ioc->msi_enable = 0;
1914 break; 1919 break;
1915 } 1920 }
1921
1922 ioc->fw_events_off = 1;
1923
1916 if (ioc->errata_flag_1064) 1924 if (ioc->errata_flag_1064)
1917 pci_disable_io_access(pdev); 1925 pci_disable_io_access(pdev);
1918 1926
@@ -2051,7 +2059,6 @@ mpt_detach(struct pci_dev *pdev)
2051 2059
2052 mpt_adapter_dispose(ioc); 2060 mpt_adapter_dispose(ioc);
2053 2061
2054 pci_set_drvdata(pdev, NULL);
2055} 2062}
2056 2063
2057/************************************************************************** 2064/**************************************************************************
@@ -5062,8 +5069,9 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
5062 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) 5069 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
5063 goto out; 5070 goto out;
5064 if (!timeleft) { 5071 if (!timeleft) {
5065 printk(KERN_DEBUG "%s: Issuing Reset from %s!!\n", 5072 printk(MYIOC_s_WARN_FMT
5066 ioc->name, __func__); 5073 "Issuing Reset from %s!!, doorbell=0x%08x\n",
5074 ioc->name, __func__, mpt_GetIocState(ioc, 0));
5067 mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP); 5075 mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
5068 mpt_free_msg_frame(ioc, mf); 5076 mpt_free_msg_frame(ioc, mf);
5069 } 5077 }
@@ -6454,8 +6462,9 @@ out:
6454 mutex_unlock(&ioc->mptbase_cmds.mutex); 6462 mutex_unlock(&ioc->mptbase_cmds.mutex);
6455 if (issue_hard_reset) { 6463 if (issue_hard_reset) {
6456 issue_hard_reset = 0; 6464 issue_hard_reset = 0;
6457 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n", 6465 printk(MYIOC_s_WARN_FMT
6458 ioc->name, __func__); 6466 "Issuing Reset from %s!!, doorbell=0x%08x\n",
6467 ioc->name, __func__, mpt_GetIocState(ioc, 0));
6459 if (retry_count == 0) { 6468 if (retry_count == 0) {
6460 if (mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP) != 0) 6469 if (mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP) != 0)
6461 retry_count++; 6470 retry_count++;
@@ -6971,6 +6980,7 @@ mpt_SoftResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
6971 6980
6972 spin_lock_irqsave(&ioc->taskmgmt_lock, flags); 6981 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
6973 if (ioc->taskmgmt_in_progress) { 6982 if (ioc->taskmgmt_in_progress) {
6983 ioc->ioc_reset_in_progress = 0;
6974 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); 6984 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6975 return -1; 6985 return -1;
6976 } 6986 }
@@ -7144,7 +7154,8 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
7144 rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag); 7154 rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag);
7145 if (rc != 0) { 7155 if (rc != 0) {
7146 printk(KERN_WARNING MYNAM 7156 printk(KERN_WARNING MYNAM
7147 ": WARNING - (%d) Cannot recover %s\n", rc, ioc->name); 7157 ": WARNING - (%d) Cannot recover %s, doorbell=0x%08x\n",
7158 rc, ioc->name, mpt_GetIocState(ioc, 0));
7148 } else { 7159 } else {
7149 if (ioc->hard_resets < -1) 7160 if (ioc->hard_resets < -1)
7150 ioc->hard_resets++; 7161 ioc->hard_resets++;
@@ -7997,7 +8008,7 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
7997 * Refer to lsi/mpi_log_sas.h. 8008 * Refer to lsi/mpi_log_sas.h.
7998 **/ 8009 **/
7999static void 8010static void
8000mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info) 8011mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info, u8 cb_idx)
8001{ 8012{
8002union loginfo_type { 8013union loginfo_type {
8003 u32 loginfo; 8014 u32 loginfo;
@@ -8051,21 +8062,22 @@ union loginfo_type {
8051 if (sub_code_desc != NULL) 8062 if (sub_code_desc != NULL)
8052 printk(MYIOC_s_INFO_FMT 8063 printk(MYIOC_s_INFO_FMT
8053 "LogInfo(0x%08x): Originator={%s}, Code={%s}," 8064 "LogInfo(0x%08x): Originator={%s}, Code={%s},"
8054 " SubCode={%s}\n", 8065 " SubCode={%s} cb_idx %s\n",
8055 ioc->name, log_info, originator_desc, code_desc, 8066 ioc->name, log_info, originator_desc, code_desc,
8056 sub_code_desc); 8067 sub_code_desc, MptCallbacksName[cb_idx]);
8057 else if (code_desc != NULL) 8068 else if (code_desc != NULL)
8058 printk(MYIOC_s_INFO_FMT 8069 printk(MYIOC_s_INFO_FMT
8059 "LogInfo(0x%08x): Originator={%s}, Code={%s}," 8070 "LogInfo(0x%08x): Originator={%s}, Code={%s},"
8060 " SubCode(0x%04x)\n", 8071 " SubCode(0x%04x) cb_idx %s\n",
8061 ioc->name, log_info, originator_desc, code_desc, 8072 ioc->name, log_info, originator_desc, code_desc,
8062 sas_loginfo.dw.subcode); 8073 sas_loginfo.dw.subcode, MptCallbacksName[cb_idx]);
8063 else 8074 else
8064 printk(MYIOC_s_INFO_FMT 8075 printk(MYIOC_s_INFO_FMT
8065 "LogInfo(0x%08x): Originator={%s}, Code=(0x%02x)," 8076 "LogInfo(0x%08x): Originator={%s}, Code=(0x%02x),"
8066 " SubCode(0x%04x)\n", 8077 " SubCode(0x%04x) cb_idx %s\n",
8067 ioc->name, log_info, originator_desc, 8078 ioc->name, log_info, originator_desc,
8068 sas_loginfo.dw.code, sas_loginfo.dw.subcode); 8079 sas_loginfo.dw.code, sas_loginfo.dw.subcode,
8080 MptCallbacksName[cb_idx]);
8069} 8081}
8070 8082
8071/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 8083/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -8430,7 +8442,8 @@ fusion_init(void)
8430 /* Register ourselves (mptbase) in order to facilitate 8442 /* Register ourselves (mptbase) in order to facilitate
8431 * EventNotification handling. 8443 * EventNotification handling.
8432 */ 8444 */
8433 mpt_base_index = mpt_register(mptbase_reply, MPTBASE_DRIVER); 8445 mpt_base_index = mpt_register(mptbase_reply, MPTBASE_DRIVER,
8446 "mptbase_reply");
8434 8447
8435 /* Register for hard reset handling callbacks. 8448 /* Register for hard reset handling callbacks.
8436 */ 8449 */
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index b613eb3d4706..23ed3dec72a5 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
76#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR 76#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
77#endif 77#endif
78 78
79#define MPT_LINUX_VERSION_COMMON "3.04.15" 79#define MPT_LINUX_VERSION_COMMON "3.04.17"
80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.15" 80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.17"
81#define WHAT_MAGIC_STRING "@" "(" "#" ")" 81#define WHAT_MAGIC_STRING "@" "(" "#" ")"
82 82
83#define show_mptmod_ver(s,ver) \ 83#define show_mptmod_ver(s,ver) \
@@ -396,6 +396,8 @@ typedef struct _VirtTarget {
396 u8 raidVolume; /* set, if RAID Volume */ 396 u8 raidVolume; /* set, if RAID Volume */
397 u8 type; /* byte 0 of Inquiry data */ 397 u8 type; /* byte 0 of Inquiry data */
398 u8 deleted; /* target in process of being removed */ 398 u8 deleted; /* target in process of being removed */
399 u8 inDMD; /* currently in the device
400 removal delay timer */
399 u32 num_luns; 401 u32 num_luns;
400} VirtTarget; 402} VirtTarget;
401 403
@@ -580,6 +582,7 @@ struct mptfc_rport_info
580typedef void (*MPT_ADD_SGE)(void *pAddr, u32 flagslength, dma_addr_t dma_addr); 582typedef void (*MPT_ADD_SGE)(void *pAddr, u32 flagslength, dma_addr_t dma_addr);
581typedef void (*MPT_ADD_CHAIN)(void *pAddr, u8 next, u16 length, 583typedef void (*MPT_ADD_CHAIN)(void *pAddr, u8 next, u16 length,
582 dma_addr_t dma_addr); 584 dma_addr_t dma_addr);
585typedef void (*MPT_SCHEDULE_TARGET_RESET)(void *ioc);
583 586
584/* 587/*
585 * Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS 588 * Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS
@@ -601,7 +604,7 @@ typedef struct _MPT_ADAPTER
601 u16 nvdata_version_default; 604 u16 nvdata_version_default;
602 int debug_level; 605 int debug_level;
603 u8 io_missing_delay; 606 u8 io_missing_delay;
604 u8 device_missing_delay; 607 u16 device_missing_delay;
605 SYSIF_REGS __iomem *chip; /* == c8817000 (mmap) */ 608 SYSIF_REGS __iomem *chip; /* == c8817000 (mmap) */
606 SYSIF_REGS __iomem *pio_chip; /* Programmed IO (downloadboot) */ 609 SYSIF_REGS __iomem *pio_chip; /* Programmed IO (downloadboot) */
607 u8 bus_type; 610 u8 bus_type;
@@ -738,6 +741,7 @@ typedef struct _MPT_ADAPTER
738 int taskmgmt_in_progress; 741 int taskmgmt_in_progress;
739 u8 taskmgmt_quiesce_io; 742 u8 taskmgmt_quiesce_io;
740 u8 ioc_reset_in_progress; 743 u8 ioc_reset_in_progress;
744 MPT_SCHEDULE_TARGET_RESET schedule_target_reset;
741 struct work_struct sas_persist_task; 745 struct work_struct sas_persist_task;
742 746
743 struct work_struct fc_setup_reset_work; 747 struct work_struct fc_setup_reset_work;
@@ -922,7 +926,8 @@ extern void mpt_detach(struct pci_dev *pdev);
922extern int mpt_suspend(struct pci_dev *pdev, pm_message_t state); 926extern int mpt_suspend(struct pci_dev *pdev, pm_message_t state);
923extern int mpt_resume(struct pci_dev *pdev); 927extern int mpt_resume(struct pci_dev *pdev);
924#endif 928#endif
925extern u8 mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass); 929extern u8 mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass,
930 char *func_name);
926extern void mpt_deregister(u8 cb_idx); 931extern void mpt_deregister(u8 cb_idx);
927extern int mpt_event_register(u8 cb_idx, MPT_EVHANDLER ev_cbfunc); 932extern int mpt_event_register(u8 cb_idx, MPT_EVHANDLER ev_cbfunc);
928extern void mpt_event_deregister(u8 cb_idx); 933extern void mpt_event_deregister(u8 cb_idx);
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index f06b29193b4e..d8ddfdf8be14 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -261,10 +261,16 @@ mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
261 /* We are done, issue wake up 261 /* We are done, issue wake up
262 */ 262 */
263 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) { 263 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) {
264 if (req->u.hdr.Function == MPI_FUNCTION_SCSI_TASK_MGMT) 264 if (req->u.hdr.Function == MPI_FUNCTION_SCSI_TASK_MGMT) {
265 mpt_clear_taskmgmt_in_progress_flag(ioc); 265 mpt_clear_taskmgmt_in_progress_flag(ioc);
266 ioc->ioctl_cmds.status &= ~MPT_MGMT_STATUS_PENDING; 266 ioc->ioctl_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
267 complete(&ioc->ioctl_cmds.done); 267 complete(&ioc->ioctl_cmds.done);
268 if (ioc->bus_type == SAS)
269 ioc->schedule_target_reset(ioc);
270 } else {
271 ioc->ioctl_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
272 complete(&ioc->ioctl_cmds.done);
273 }
268 } 274 }
269 275
270 out_continuation: 276 out_continuation:
@@ -298,6 +304,8 @@ mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
298 mpt_clear_taskmgmt_in_progress_flag(ioc); 304 mpt_clear_taskmgmt_in_progress_flag(ioc);
299 ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING; 305 ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
300 complete(&ioc->taskmgmt_cmds.done); 306 complete(&ioc->taskmgmt_cmds.done);
307 if (ioc->bus_type == SAS)
308 ioc->schedule_target_reset(ioc);
301 return 1; 309 return 1;
302 } 310 }
303 return 0; 311 return 0;
@@ -946,9 +954,12 @@ retry_wait:
946 mpt_free_msg_frame(iocp, mf); 954 mpt_free_msg_frame(iocp, mf);
947 goto fwdl_out; 955 goto fwdl_out;
948 } 956 }
949 if (!timeleft) 957 if (!timeleft) {
958 printk(MYIOC_s_WARN_FMT
959 "FW download timeout, doorbell=0x%08x\n",
960 iocp->name, mpt_GetIocState(iocp, 0));
950 mptctl_timeout_expired(iocp, mf); 961 mptctl_timeout_expired(iocp, mf);
951 else 962 } else
952 goto retry_wait; 963 goto retry_wait;
953 goto fwdl_out; 964 goto fwdl_out;
954 } 965 }
@@ -2293,6 +2304,10 @@ retry_wait:
2293 goto done_free_mem; 2304 goto done_free_mem;
2294 } 2305 }
2295 if (!timeleft) { 2306 if (!timeleft) {
2307 printk(MYIOC_s_WARN_FMT
2308 "mpt cmd timeout, doorbell=0x%08x"
2309 " function=0x%x\n",
2310 ioc->name, mpt_GetIocState(ioc, 0), function);
2296 if (function == MPI_FUNCTION_SCSI_TASK_MGMT) 2311 if (function == MPI_FUNCTION_SCSI_TASK_MGMT)
2297 mutex_unlock(&ioc->taskmgmt_cmds.mutex); 2312 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
2298 mptctl_timeout_expired(ioc, mf); 2313 mptctl_timeout_expired(ioc, mf);
@@ -2600,9 +2615,12 @@ retry_wait:
2600 mpt_free_msg_frame(ioc, mf); 2615 mpt_free_msg_frame(ioc, mf);
2601 goto out; 2616 goto out;
2602 } 2617 }
2603 if (!timeleft) 2618 if (!timeleft) {
2619 printk(MYIOC_s_WARN_FMT
2620 "HOST INFO command timeout, doorbell=0x%08x\n",
2621 ioc->name, mpt_GetIocState(ioc, 0));
2604 mptctl_timeout_expired(ioc, mf); 2622 mptctl_timeout_expired(ioc, mf);
2605 else 2623 } else
2606 goto retry_wait; 2624 goto retry_wait;
2607 goto out; 2625 goto out;
2608 } 2626 }
@@ -3000,7 +3018,8 @@ static int __init mptctl_init(void)
3000 * Install our handler 3018 * Install our handler
3001 */ 3019 */
3002 ++where; 3020 ++where;
3003 mptctl_id = mpt_register(mptctl_reply, MPTCTL_DRIVER); 3021 mptctl_id = mpt_register(mptctl_reply, MPTCTL_DRIVER,
3022 "mptctl_reply");
3004 if (!mptctl_id || mptctl_id >= MPT_MAX_PROTOCOL_DRIVERS) { 3023 if (!mptctl_id || mptctl_id >= MPT_MAX_PROTOCOL_DRIVERS) {
3005 printk(KERN_ERR MYNAM ": ERROR: Failed to register with Fusion MPT base driver\n"); 3024 printk(KERN_ERR MYNAM ": ERROR: Failed to register with Fusion MPT base driver\n");
3006 misc_deregister(&mptctl_miscdev); 3025 misc_deregister(&mptctl_miscdev);
@@ -3008,7 +3027,8 @@ static int __init mptctl_init(void)
3008 goto out_fail; 3027 goto out_fail;
3009 } 3028 }
3010 3029
3011 mptctl_taskmgmt_id = mpt_register(mptctl_taskmgmt_reply, MPTCTL_DRIVER); 3030 mptctl_taskmgmt_id = mpt_register(mptctl_taskmgmt_reply, MPTCTL_DRIVER,
3031 "mptctl_taskmgmt_reply");
3012 if (!mptctl_taskmgmt_id || mptctl_taskmgmt_id >= MPT_MAX_PROTOCOL_DRIVERS) { 3032 if (!mptctl_taskmgmt_id || mptctl_taskmgmt_id >= MPT_MAX_PROTOCOL_DRIVERS) {
3013 printk(KERN_ERR MYNAM ": ERROR: Failed to register with Fusion MPT base driver\n"); 3033 printk(KERN_ERR MYNAM ": ERROR: Failed to register with Fusion MPT base driver\n");
3014 mpt_deregister(mptctl_id); 3034 mpt_deregister(mptctl_id);
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index b5f03ad81568..e15220ff52fc 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -1472,9 +1472,12 @@ mptfc_init(void)
1472 if (!mptfc_transport_template) 1472 if (!mptfc_transport_template)
1473 return -ENODEV; 1473 return -ENODEV;
1474 1474
1475 mptfcDoneCtx = mpt_register(mptscsih_io_done, MPTFC_DRIVER); 1475 mptfcDoneCtx = mpt_register(mptscsih_io_done, MPTFC_DRIVER,
1476 mptfcTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTFC_DRIVER); 1476 "mptscsih_scandv_complete");
1477 mptfcInternalCtx = mpt_register(mptscsih_scandv_complete, MPTFC_DRIVER); 1477 mptfcTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTFC_DRIVER,
1478 "mptscsih_scandv_complete");
1479 mptfcInternalCtx = mpt_register(mptscsih_scandv_complete, MPTFC_DRIVER,
1480 "mptscsih_scandv_complete");
1478 1481
1479 mpt_event_register(mptfcDoneCtx, mptfc_event_process); 1482 mpt_event_register(mptfcDoneCtx, mptfc_event_process);
1480 mpt_reset_register(mptfcDoneCtx, mptfc_ioc_reset); 1483 mpt_reset_register(mptfcDoneCtx, mptfc_ioc_reset);
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index 4fa9665cbe93..cbe96072a6cc 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -1452,7 +1452,9 @@ static int __init mpt_lan_init (void)
1452{ 1452{
1453 show_mptmod_ver(LANAME, LANVER); 1453 show_mptmod_ver(LANAME, LANVER);
1454 1454
1455 if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) { 1455 LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER,
1456 "lan_reply");
1457 if (LanCtx <= 0) {
1456 printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n"); 1458 printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1457 return -EBUSY; 1459 return -EBUSY;
1458 } 1460 }
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index ac000e83db0e..83a5115f0251 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -57,6 +57,7 @@
57#include <scsi/scsi_device.h> 57#include <scsi/scsi_device.h>
58#include <scsi/scsi_host.h> 58#include <scsi/scsi_host.h>
59#include <scsi/scsi_transport_sas.h> 59#include <scsi/scsi_transport_sas.h>
60#include <scsi/scsi_transport.h>
60#include <scsi/scsi_dbg.h> 61#include <scsi/scsi_dbg.h>
61 62
62#include "mptbase.h" 63#include "mptbase.h"
@@ -126,6 +127,7 @@ static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc);
126static void mptsas_broadcast_primative_work(struct fw_event_work *fw_event); 127static void mptsas_broadcast_primative_work(struct fw_event_work *fw_event);
127static void mptsas_handle_queue_full_event(struct fw_event_work *fw_event); 128static void mptsas_handle_queue_full_event(struct fw_event_work *fw_event);
128static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id); 129static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id);
130void mptsas_schedule_target_reset(void *ioc);
129 131
130static void mptsas_print_phy_data(MPT_ADAPTER *ioc, 132static void mptsas_print_phy_data(MPT_ADAPTER *ioc,
131 MPI_SAS_IO_UNIT0_PHY_DATA *phy_data) 133 MPI_SAS_IO_UNIT0_PHY_DATA *phy_data)
@@ -1139,6 +1141,44 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc,
1139} 1141}
1140 1142
1141/** 1143/**
1144 * mptsas_schedule_target_reset- send pending target reset
1145 * @iocp: per adapter object
1146 *
1147 * This function will delete scheduled target reset from the list and
1148 * try to send next target reset. This will be called from completion
1149 * context of any Task managment command.
1150 */
1151
1152void
1153mptsas_schedule_target_reset(void *iocp)
1154{
1155 MPT_ADAPTER *ioc = (MPT_ADAPTER *)(iocp);
1156 MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
1157 struct list_head *head = &hd->target_reset_list;
1158 struct mptsas_target_reset_event *target_reset_list;
1159 u8 id, channel;
1160 /*
1161 * issue target reset to next device in the queue
1162 */
1163
1164 head = &hd->target_reset_list;
1165 if (list_empty(head))
1166 return;
1167
1168 target_reset_list = list_entry(head->next,
1169 struct mptsas_target_reset_event, list);
1170
1171 id = target_reset_list->sas_event_data.TargetID;
1172 channel = target_reset_list->sas_event_data.Bus;
1173 target_reset_list->time_count = jiffies;
1174
1175 if (mptsas_target_reset(ioc, channel, id))
1176 target_reset_list->target_reset_issued = 1;
1177 return;
1178}
1179
1180
1181/**
1142 * mptsas_taskmgmt_complete - complete SAS task management function 1182 * mptsas_taskmgmt_complete - complete SAS task management function
1143 * @ioc: Pointer to MPT_ADAPTER structure 1183 * @ioc: Pointer to MPT_ADAPTER structure
1144 * 1184 *
@@ -1222,28 +1262,12 @@ mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
1222 * enable work queue to remove device from upper layers 1262 * enable work queue to remove device from upper layers
1223 */ 1263 */
1224 list_del(&target_reset_list->list); 1264 list_del(&target_reset_list->list);
1225 if ((mptsas_find_vtarget(ioc, channel, id)) && !ioc->fw_events_off) 1265 if (!ioc->fw_events_off)
1226 mptsas_queue_device_delete(ioc, 1266 mptsas_queue_device_delete(ioc,
1227 &target_reset_list->sas_event_data); 1267 &target_reset_list->sas_event_data);
1228 1268
1229 1269
1230 /* 1270 ioc->schedule_target_reset(ioc);
1231 * issue target reset to next device in the queue
1232 */
1233
1234 head = &hd->target_reset_list;
1235 if (list_empty(head))
1236 return 1;
1237
1238 target_reset_list = list_entry(head->next, struct mptsas_target_reset_event,
1239 list);
1240
1241 id = target_reset_list->sas_event_data.TargetID;
1242 channel = target_reset_list->sas_event_data.Bus;
1243 target_reset_list->time_count = jiffies;
1244
1245 if (mptsas_target_reset(ioc, channel, id))
1246 target_reset_list->target_reset_issued = 1;
1247 1271
1248 return 1; 1272 return 1;
1249} 1273}
@@ -1889,6 +1913,48 @@ mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1889 return mptscsih_qcmd(SCpnt,done); 1913 return mptscsih_qcmd(SCpnt,done);
1890} 1914}
1891 1915
1916/**
1917 * mptsas_mptsas_eh_timed_out - resets the scsi_cmnd timeout
1918 * if the device under question is currently in the
1919 * device removal delay.
1920 * @sc: scsi command that the midlayer is about to time out
1921 *
1922 **/
1923static enum blk_eh_timer_return mptsas_eh_timed_out(struct scsi_cmnd *sc)
1924{
1925 MPT_SCSI_HOST *hd;
1926 MPT_ADAPTER *ioc;
1927 VirtDevice *vdevice;
1928 enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
1929
1930 hd = shost_priv(sc->device->host);
1931 if (hd == NULL) {
1932 printk(KERN_ERR MYNAM ": %s: Can't locate host! (sc=%p)\n",
1933 __func__, sc);
1934 goto done;
1935 }
1936
1937 ioc = hd->ioc;
1938 if (ioc->bus_type != SAS) {
1939 printk(KERN_ERR MYNAM ": %s: Wrong bus type (sc=%p)\n",
1940 __func__, sc);
1941 goto done;
1942 }
1943
1944 vdevice = sc->device->hostdata;
1945 if (vdevice && vdevice->vtarget && (vdevice->vtarget->inDMD
1946 || vdevice->vtarget->deleted)) {
1947 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT ": %s: target removed "
1948 "or in device removal delay (sc=%p)\n",
1949 ioc->name, __func__, sc));
1950 rc = BLK_EH_RESET_TIMER;
1951 goto done;
1952 }
1953
1954done:
1955 return rc;
1956}
1957
1892 1958
1893static struct scsi_host_template mptsas_driver_template = { 1959static struct scsi_host_template mptsas_driver_template = {
1894 .module = THIS_MODULE, 1960 .module = THIS_MODULE,
@@ -2364,7 +2430,7 @@ mptsas_sas_io_unit_pg1(MPT_ADAPTER *ioc)
2364 SasIOUnitPage1_t *buffer; 2430 SasIOUnitPage1_t *buffer;
2365 dma_addr_t dma_handle; 2431 dma_addr_t dma_handle;
2366 int error; 2432 int error;
2367 u16 device_missing_delay; 2433 u8 device_missing_delay;
2368 2434
2369 memset(&hdr, 0, sizeof(ConfigExtendedPageHeader_t)); 2435 memset(&hdr, 0, sizeof(ConfigExtendedPageHeader_t));
2370 memset(&cfg, 0, sizeof(CONFIGPARMS)); 2436 memset(&cfg, 0, sizeof(CONFIGPARMS));
@@ -2401,7 +2467,7 @@ mptsas_sas_io_unit_pg1(MPT_ADAPTER *ioc)
2401 2467
2402 ioc->io_missing_delay = 2468 ioc->io_missing_delay =
2403 le16_to_cpu(buffer->IODeviceMissingDelay); 2469 le16_to_cpu(buffer->IODeviceMissingDelay);
2404 device_missing_delay = le16_to_cpu(buffer->ReportDeviceMissingDelay); 2470 device_missing_delay = buffer->ReportDeviceMissingDelay;
2405 ioc->device_missing_delay = (device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_UNIT_16) ? 2471 ioc->device_missing_delay = (device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_UNIT_16) ?
2406 (device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16 : 2472 (device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16 :
2407 device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK; 2473 device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
@@ -2549,6 +2615,7 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
2549 device_info->sas_address = le64_to_cpu(sas_address); 2615 device_info->sas_address = le64_to_cpu(sas_address);
2550 device_info->device_info = 2616 device_info->device_info =
2551 le32_to_cpu(buffer->DeviceInfo); 2617 le32_to_cpu(buffer->DeviceInfo);
2618 device_info->flags = le16_to_cpu(buffer->Flags);
2552 2619
2553 out_free_consistent: 2620 out_free_consistent:
2554 pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4, 2621 pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
@@ -2960,6 +3027,7 @@ static int mptsas_probe_one_phy(struct device *dev,
2960 struct sas_phy *phy; 3027 struct sas_phy *phy;
2961 struct sas_port *port; 3028 struct sas_port *port;
2962 int error = 0; 3029 int error = 0;
3030 VirtTarget *vtarget;
2963 3031
2964 if (!dev) { 3032 if (!dev) {
2965 error = -ENODEV; 3033 error = -ENODEV;
@@ -3182,6 +3250,16 @@ static int mptsas_probe_one_phy(struct device *dev,
3182 rphy_to_expander_device(rphy)); 3250 rphy_to_expander_device(rphy));
3183 } 3251 }
3184 3252
3253 /* If the device exists,verify it wasn't previously flagged
3254 as a missing device. If so, clear it */
3255 vtarget = mptsas_find_vtarget(ioc,
3256 phy_info->attached.channel,
3257 phy_info->attached.id);
3258 if (vtarget && vtarget->inDMD) {
3259 printk(KERN_INFO "Device returned, unsetting inDMD\n");
3260 vtarget->inDMD = 0;
3261 }
3262
3185 out: 3263 out:
3186 return error; 3264 return error;
3187} 3265}
@@ -3635,9 +3713,42 @@ mptsas_send_link_status_event(struct fw_event_work *fw_event)
3635 MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION) 3713 MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION)
3636 phy_info->phy->negotiated_linkrate = 3714 phy_info->phy->negotiated_linkrate =
3637 SAS_LINK_RATE_FAILED; 3715 SAS_LINK_RATE_FAILED;
3638 else 3716 else {
3639 phy_info->phy->negotiated_linkrate = 3717 phy_info->phy->negotiated_linkrate =
3640 SAS_LINK_RATE_UNKNOWN; 3718 SAS_LINK_RATE_UNKNOWN;
3719 if (ioc->device_missing_delay &&
3720 mptsas_is_end_device(&phy_info->attached)) {
3721 struct scsi_device *sdev;
3722 VirtDevice *vdevice;
3723 u8 channel, id;
3724 id = phy_info->attached.id;
3725 channel = phy_info->attached.channel;
3726 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3727 "Link down for fw_id %d:fw_channel %d\n",
3728 ioc->name, phy_info->attached.id,
3729 phy_info->attached.channel));
3730
3731 shost_for_each_device(sdev, ioc->sh) {
3732 vdevice = sdev->hostdata;
3733 if ((vdevice == NULL) ||
3734 (vdevice->vtarget == NULL))
3735 continue;
3736 if ((vdevice->vtarget->tflags &
3737 MPT_TARGET_FLAGS_RAID_COMPONENT ||
3738 vdevice->vtarget->raidVolume))
3739 continue;
3740 if (vdevice->vtarget->id == id &&
3741 vdevice->vtarget->channel ==
3742 channel)
3743 devtprintk(ioc,
3744 printk(MYIOC_s_DEBUG_FMT
3745 "SDEV OUTSTANDING CMDS"
3746 "%d\n", ioc->name,
3747 sdev->device_busy));
3748 }
3749
3750 }
3751 }
3641 } 3752 }
3642 out: 3753 out:
3643 mptsas_free_fw_event(ioc, fw_event); 3754 mptsas_free_fw_event(ioc, fw_event);
@@ -3840,6 +3951,13 @@ mptsas_probe_devices(MPT_ADAPTER *ioc)
3840 MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0) 3951 MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0)
3841 continue; 3952 continue;
3842 3953
3954 /* If there is no FW B_T mapping for this device then continue
3955 * */
3956 if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
3957 || !(sas_device.flags &
3958 MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
3959 continue;
3960
3843 phy_info = mptsas_refreshing_device_handles(ioc, &sas_device); 3961 phy_info = mptsas_refreshing_device_handles(ioc, &sas_device);
3844 if (!phy_info) 3962 if (!phy_info)
3845 continue; 3963 continue;
@@ -4149,6 +4267,14 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
4149 phys_disk.PhysDiskID)) 4267 phys_disk.PhysDiskID))
4150 continue; 4268 continue;
4151 4269
4270 /* If there is no FW B_T mapping for this device then continue
4271 * */
4272 if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
4273 || !(sas_device.flags &
4274 MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
4275 continue;
4276
4277
4152 phy_info = mptsas_find_phyinfo_by_sas_address(ioc, 4278 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
4153 sas_device.sas_address); 4279 sas_device.sas_address);
4154 mptsas_add_end_device(ioc, phy_info); 4280 mptsas_add_end_device(ioc, phy_info);
@@ -4171,6 +4297,7 @@ mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
4171 struct mptsas_devinfo sas_device; 4297 struct mptsas_devinfo sas_device;
4172 VirtTarget *vtarget; 4298 VirtTarget *vtarget;
4173 int i; 4299 int i;
4300 struct mptsas_portinfo *port_info;
4174 4301
4175 switch (hot_plug_info->event_type) { 4302 switch (hot_plug_info->event_type) {
4176 4303
@@ -4199,12 +4326,47 @@ mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
4199 (hot_plug_info->channel << 8) + 4326 (hot_plug_info->channel << 8) +
4200 hot_plug_info->id); 4327 hot_plug_info->id);
4201 4328
4329 /* If there is no FW B_T mapping for this device then break
4330 * */
4331 if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
4332 || !(sas_device.flags &
4333 MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
4334 break;
4335
4202 if (!sas_device.handle) 4336 if (!sas_device.handle)
4203 return; 4337 return;
4204 4338
4205 phy_info = mptsas_refreshing_device_handles(ioc, &sas_device); 4339 phy_info = mptsas_refreshing_device_handles(ioc, &sas_device);
4206 if (!phy_info) 4340 /* Only For SATA Device ADD */
4341 if (!phy_info && (sas_device.device_info &
4342 MPI_SAS_DEVICE_INFO_SATA_DEVICE)) {
4343 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4344 "%s %d SATA HOT PLUG: "
4345 "parent handle of device %x\n", ioc->name,
4346 __func__, __LINE__, sas_device.handle_parent));
4347 port_info = mptsas_find_portinfo_by_handle(ioc,
4348 sas_device.handle_parent);
4349
4350 if (port_info == ioc->hba_port_info)
4351 mptsas_probe_hba_phys(ioc);
4352 else if (port_info)
4353 mptsas_expander_refresh(ioc, port_info);
4354 else {
4355 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
4356 "%s %d port info is NULL\n",
4357 ioc->name, __func__, __LINE__));
4358 break;
4359 }
4360 phy_info = mptsas_refreshing_device_handles
4361 (ioc, &sas_device);
4362 }
4363
4364 if (!phy_info) {
4365 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
4366 "%s %d phy info is NULL\n",
4367 ioc->name, __func__, __LINE__));
4207 break; 4368 break;
4369 }
4208 4370
4209 if (mptsas_get_rphy(phy_info)) 4371 if (mptsas_get_rphy(phy_info))
4210 break; 4372 break;
@@ -4241,6 +4403,13 @@ mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
4241 break; 4403 break;
4242 } 4404 }
4243 4405
4406 /* If there is no FW B_T mapping for this device then break
4407 * */
4408 if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
4409 || !(sas_device.flags &
4410 MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
4411 break;
4412
4244 phy_info = mptsas_find_phyinfo_by_sas_address( 4413 phy_info = mptsas_find_phyinfo_by_sas_address(
4245 ioc, sas_device.sas_address); 4414 ioc, sas_device.sas_address);
4246 4415
@@ -4294,6 +4463,13 @@ mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
4294 break; 4463 break;
4295 } 4464 }
4296 4465
4466 /* If there is no FW B_T mapping for this device then break
4467 * */
4468 if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
4469 || !(sas_device.flags &
4470 MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
4471 break;
4472
4297 phy_info = mptsas_find_phyinfo_by_sas_address(ioc, 4473 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
4298 sas_device.sas_address); 4474 sas_device.sas_address);
4299 if (!phy_info) { 4475 if (!phy_info) {
@@ -4727,8 +4903,9 @@ mptsas_broadcast_primative_work(struct fw_event_work *fw_event)
4727 mutex_unlock(&ioc->taskmgmt_cmds.mutex); 4903 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
4728 4904
4729 if (issue_reset) { 4905 if (issue_reset) {
4730 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n", 4906 printk(MYIOC_s_WARN_FMT
4731 ioc->name, __func__); 4907 "Issuing Reset from %s!! doorbell=0x%08x\n",
4908 ioc->name, __func__, mpt_GetIocState(ioc, 0));
4732 mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP); 4909 mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
4733 } 4910 }
4734 mptsas_free_fw_event(ioc, fw_event); 4911 mptsas_free_fw_event(ioc, fw_event);
@@ -4816,12 +4993,47 @@ mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
4816 { 4993 {
4817 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data = 4994 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data =
4818 (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data; 4995 (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data;
4996 u16 ioc_stat;
4997 ioc_stat = le16_to_cpu(reply->IOCStatus);
4819 4998
4820 if (sas_event_data->ReasonCode == 4999 if (sas_event_data->ReasonCode ==
4821 MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING) { 5000 MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING) {
4822 mptsas_target_reset_queue(ioc, sas_event_data); 5001 mptsas_target_reset_queue(ioc, sas_event_data);
4823 return 0; 5002 return 0;
4824 } 5003 }
5004 if (sas_event_data->ReasonCode ==
5005 MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
5006 ioc->device_missing_delay &&
5007 (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)) {
5008 VirtTarget *vtarget = NULL;
5009 u8 id, channel;
5010 u32 log_info = le32_to_cpu(reply->IOCLogInfo);
5011
5012 id = sas_event_data->TargetID;
5013 channel = sas_event_data->Bus;
5014
5015 vtarget = mptsas_find_vtarget(ioc, channel, id);
5016 if (vtarget) {
5017 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
5018 "LogInfo (0x%x) available for "
5019 "INTERNAL_DEVICE_RESET"
5020 "fw_id %d fw_channel %d\n", ioc->name,
5021 log_info, id, channel));
5022 if (vtarget->raidVolume) {
5023 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
5024 "Skipping Raid Volume for inDMD\n",
5025 ioc->name));
5026 } else {
5027 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
5028 "Setting device flag inDMD\n",
5029 ioc->name));
5030 vtarget->inDMD = 1;
5031 }
5032
5033 }
5034
5035 }
5036
4825 break; 5037 break;
4826 } 5038 }
4827 case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE: 5039 case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
@@ -4924,7 +5136,7 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4924 ioc->DoneCtx = mptsasDoneCtx; 5136 ioc->DoneCtx = mptsasDoneCtx;
4925 ioc->TaskCtx = mptsasTaskCtx; 5137 ioc->TaskCtx = mptsasTaskCtx;
4926 ioc->InternalCtx = mptsasInternalCtx; 5138 ioc->InternalCtx = mptsasInternalCtx;
4927 5139 ioc->schedule_target_reset = &mptsas_schedule_target_reset;
4928 /* Added sanity check on readiness of the MPT adapter. 5140 /* Added sanity check on readiness of the MPT adapter.
4929 */ 5141 */
4930 if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) { 5142 if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) {
@@ -5154,14 +5366,20 @@ mptsas_init(void)
5154 sas_attach_transport(&mptsas_transport_functions); 5366 sas_attach_transport(&mptsas_transport_functions);
5155 if (!mptsas_transport_template) 5367 if (!mptsas_transport_template)
5156 return -ENODEV; 5368 return -ENODEV;
5369 mptsas_transport_template->eh_timed_out = mptsas_eh_timed_out;
5157 5370
5158 mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER); 5371 mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER,
5159 mptsasTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSAS_DRIVER); 5372 "mptscsih_io_done");
5373 mptsasTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSAS_DRIVER,
5374 "mptscsih_taskmgmt_complete");
5160 mptsasInternalCtx = 5375 mptsasInternalCtx =
5161 mpt_register(mptscsih_scandv_complete, MPTSAS_DRIVER); 5376 mpt_register(mptscsih_scandv_complete, MPTSAS_DRIVER,
5162 mptsasMgmtCtx = mpt_register(mptsas_mgmt_done, MPTSAS_DRIVER); 5377 "mptscsih_scandv_complete");
5378 mptsasMgmtCtx = mpt_register(mptsas_mgmt_done, MPTSAS_DRIVER,
5379 "mptsas_mgmt_done");
5163 mptsasDeviceResetCtx = 5380 mptsasDeviceResetCtx =
5164 mpt_register(mptsas_taskmgmt_complete, MPTSAS_DRIVER); 5381 mpt_register(mptsas_taskmgmt_complete, MPTSAS_DRIVER,
5382 "mptsas_taskmgmt_complete");
5165 5383
5166 mpt_event_register(mptsasDoneCtx, mptsas_event_process); 5384 mpt_event_register(mptsasDoneCtx, mptsas_event_process);
5167 mpt_reset_register(mptsasDoneCtx, mptsas_ioc_reset); 5385 mpt_reset_register(mptsasDoneCtx, mptsas_ioc_reset);
diff --git a/drivers/message/fusion/mptsas.h b/drivers/message/fusion/mptsas.h
index 7b249edbda78..57e86ab77661 100644
--- a/drivers/message/fusion/mptsas.h
+++ b/drivers/message/fusion/mptsas.h
@@ -140,6 +140,7 @@ struct mptsas_devinfo {
140 u64 sas_address; /* WWN of this device, 140 u64 sas_address; /* WWN of this device,
141 SATA is assigned by HBA,expander */ 141 SATA is assigned by HBA,expander */
142 u32 device_info; /* bitfield detailed info about this device */ 142 u32 device_info; /* bitfield detailed info about this device */
143 u16 flags; /* sas device pg0 flags */
143}; 144};
144 145
145/* 146/*
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 5c53624e0e87..59b8f53d1ece 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -664,6 +664,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
664 u32 log_info; 664 u32 log_info;
665 665
666 status = le16_to_cpu(pScsiReply->IOCStatus) & MPI_IOCSTATUS_MASK; 666 status = le16_to_cpu(pScsiReply->IOCStatus) & MPI_IOCSTATUS_MASK;
667
667 scsi_state = pScsiReply->SCSIState; 668 scsi_state = pScsiReply->SCSIState;
668 scsi_status = pScsiReply->SCSIStatus; 669 scsi_status = pScsiReply->SCSIStatus;
669 xfer_cnt = le32_to_cpu(pScsiReply->TransferCount); 670 xfer_cnt = le32_to_cpu(pScsiReply->TransferCount);
@@ -738,13 +739,36 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
738 739
739 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */ 740 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
740 if ( ioc->bus_type == SAS ) { 741 if ( ioc->bus_type == SAS ) {
741 u16 ioc_status = le16_to_cpu(pScsiReply->IOCStatus); 742 u16 ioc_status =
742 if (ioc_status & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { 743 le16_to_cpu(pScsiReply->IOCStatus);
743 if ((log_info & SAS_LOGINFO_MASK) 744 if ((ioc_status &
744 == SAS_LOGINFO_NEXUS_LOSS) { 745 MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
745 sc->result = (DID_BUS_BUSY << 16); 746 &&
746 break; 747 ((log_info & SAS_LOGINFO_MASK) ==
747 } 748 SAS_LOGINFO_NEXUS_LOSS)) {
749 VirtDevice *vdevice =
750 sc->device->hostdata;
751
752 /* flag the device as being in
753 * device removal delay so we can
754 * notify the midlayer to hold off
755 * on timeout eh */
756 if (vdevice && vdevice->
757 vtarget &&
758 vdevice->vtarget->
759 raidVolume)
760 printk(KERN_INFO
761 "Skipping Raid Volume"
762 "for inDMD\n");
763 else if (vdevice &&
764 vdevice->vtarget)
765 vdevice->vtarget->
766 inDMD = 1;
767
768 sc->result =
769 (DID_TRANSPORT_DISRUPTED
770 << 16);
771 break;
748 } 772 }
749 } else if (ioc->bus_type == FC) { 773 } else if (ioc->bus_type == FC) {
750 /* 774 /*
@@ -1704,8 +1728,9 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun,
1704 1728
1705 CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status) 1729 CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
1706 if (issue_hard_reset) { 1730 if (issue_hard_reset) {
1707 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n", 1731 printk(MYIOC_s_WARN_FMT
1708 ioc->name, __func__); 1732 "Issuing Reset from %s!! doorbell=0x%08x\n",
1733 ioc->name, __func__, mpt_GetIocState(ioc, 0));
1709 retval = mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP); 1734 retval = mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
1710 mpt_free_msg_frame(ioc, mf); 1735 mpt_free_msg_frame(ioc, mf);
1711 } 1736 }
@@ -2132,6 +2157,8 @@ mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
2132 mpt_clear_taskmgmt_in_progress_flag(ioc); 2157 mpt_clear_taskmgmt_in_progress_flag(ioc);
2133 ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING; 2158 ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
2134 complete(&ioc->taskmgmt_cmds.done); 2159 complete(&ioc->taskmgmt_cmds.done);
2160 if (ioc->bus_type == SAS)
2161 ioc->schedule_target_reset(ioc);
2135 return 1; 2162 return 1;
2136 } 2163 }
2137 return 0; 2164 return 0;
@@ -2459,6 +2486,8 @@ mptscsih_slave_configure(struct scsi_device *sdev)
2459 ioc->name,sdev->tagged_supported, sdev->simple_tags, 2486 ioc->name,sdev->tagged_supported, sdev->simple_tags,
2460 sdev->ordered_tags)); 2487 sdev->ordered_tags));
2461 2488
2489 blk_queue_dma_alignment (sdev->request_queue, 512 - 1);
2490
2462 return 0; 2491 return 0;
2463} 2492}
2464 2493
@@ -3045,8 +3074,11 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3045 goto out; 3074 goto out;
3046 } 3075 }
3047 if (!timeleft) { 3076 if (!timeleft) {
3048 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n", 3077 printk(MYIOC_s_WARN_FMT
3049 ioc->name, __func__); 3078 "Issuing Reset from %s!! doorbell=0x%08xh"
3079 " cmd=0x%02x\n",
3080 ioc->name, __func__, mpt_GetIocState(ioc, 0),
3081 cmd);
3050 mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP); 3082 mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
3051 mpt_free_msg_frame(ioc, mf); 3083 mpt_free_msg_frame(ioc, mf);
3052 } 3084 }
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 1abaa5d01ae3..0e2803155ae2 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -1551,9 +1551,12 @@ mptspi_init(void)
1551 if (!mptspi_transport_template) 1551 if (!mptspi_transport_template)
1552 return -ENODEV; 1552 return -ENODEV;
1553 1553
1554 mptspiDoneCtx = mpt_register(mptscsih_io_done, MPTSPI_DRIVER); 1554 mptspiDoneCtx = mpt_register(mptscsih_io_done, MPTSPI_DRIVER,
1555 mptspiTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSPI_DRIVER); 1555 "mptscsih_io_done");
1556 mptspiInternalCtx = mpt_register(mptscsih_scandv_complete, MPTSPI_DRIVER); 1556 mptspiTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSPI_DRIVER,
1557 "mptscsih_taskmgmt_complete");
1558 mptspiInternalCtx = mpt_register(mptscsih_scandv_complete,
1559 MPTSPI_DRIVER, "mptscsih_scandv_complete");
1557 1560
1558 mpt_event_register(mptspiDoneCtx, mptspi_event_process); 1561 mpt_event_register(mptspiDoneCtx, mptspi_event_process);
1559 mpt_reset_register(mptspiDoneCtx, mptspi_ioc_reset); 1562 mpt_reset_register(mptspiDoneCtx, mptspi_ioc_reset);
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 48c84a58163e..00e5fcac8fdf 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -285,8 +285,11 @@ enclosure_component_register(struct enclosure_device *edev,
285 cdev->groups = enclosure_groups; 285 cdev->groups = enclosure_groups;
286 286
287 err = device_register(cdev); 287 err = device_register(cdev);
288 if (err) 288 if (err) {
289 ERR_PTR(err); 289 ecomp->number = -1;
290 put_device(cdev);
291 return ERR_PTR(err);
292 }
290 293
291 return ecomp; 294 return ecomp;
292} 295}
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 6326b67c45d2..34c7e4046df4 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -368,6 +368,8 @@ static void setup_qib(struct qdio_irq *irq_ptr,
368 if (qebsm_possible()) 368 if (qebsm_possible())
369 irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM; 369 irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
370 370
371 irq_ptr->qib.rflags |= init_data->qib_rflags;
372
371 irq_ptr->qib.qfmt = init_data->q_format; 373 irq_ptr->qib.qfmt = init_data->q_format;
372 if (init_data->no_input_qs) 374 if (init_data->no_input_qs)
373 irq_ptr->qib.isliba = 375 irq_ptr->qib.isliba =
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index e331df2122f7..96fa1f536394 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -98,13 +98,11 @@ static void __init zfcp_init_device_setup(char *devstr)
98 u64 wwpn, lun; 98 u64 wwpn, lun;
99 99
100 /* duplicate devstr and keep the original for sysfs presentation*/ 100 /* duplicate devstr and keep the original for sysfs presentation*/
101 str_saved = kmalloc(strlen(devstr) + 1, GFP_KERNEL); 101 str_saved = kstrdup(devstr, GFP_KERNEL);
102 str = str_saved; 102 str = str_saved;
103 if (!str) 103 if (!str)
104 return; 104 return;
105 105
106 strcpy(str, devstr);
107
108 token = strsep(&str, ","); 106 token = strsep(&str, ",");
109 if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE) 107 if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE)
110 goto err_out; 108 goto err_out;
@@ -314,7 +312,7 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
314 } 312 }
315 retval = -EINVAL; 313 retval = -EINVAL;
316 314
317 INIT_WORK(&unit->scsi_work, zfcp_scsi_scan); 315 INIT_WORK(&unit->scsi_work, zfcp_scsi_scan_work);
318 316
319 spin_lock_init(&unit->latencies.lock); 317 spin_lock_init(&unit->latencies.lock);
320 unit->latencies.write.channel.min = 0xFFFFFFFF; 318 unit->latencies.write.channel.min = 0xFFFFFFFF;
@@ -526,6 +524,10 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
526 rwlock_init(&adapter->port_list_lock); 524 rwlock_init(&adapter->port_list_lock);
527 INIT_LIST_HEAD(&adapter->port_list); 525 INIT_LIST_HEAD(&adapter->port_list);
528 526
527 INIT_LIST_HEAD(&adapter->events.list);
528 INIT_WORK(&adapter->events.work, zfcp_fc_post_event);
529 spin_lock_init(&adapter->events.list_lock);
530
529 init_waitqueue_head(&adapter->erp_ready_wq); 531 init_waitqueue_head(&adapter->erp_ready_wq);
530 init_waitqueue_head(&adapter->erp_done_wqh); 532 init_waitqueue_head(&adapter->erp_done_wqh);
531 533
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index 1a2db0a35737..fcbd2b756da4 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -189,18 +189,12 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
189 if (!fsf_cfdc) 189 if (!fsf_cfdc)
190 return -ENOMEM; 190 return -ENOMEM;
191 191
192 data = kmalloc(sizeof(struct zfcp_cfdc_data), GFP_KERNEL); 192 data = memdup_user(data_user, sizeof(*data_user));
193 if (!data) { 193 if (IS_ERR(data)) {
194 retval = -ENOMEM; 194 retval = PTR_ERR(data);
195 goto no_mem_sense; 195 goto no_mem_sense;
196 } 196 }
197 197
198 retval = copy_from_user(data, data_user, sizeof(*data));
199 if (retval) {
200 retval = -EFAULT;
201 goto free_buffer;
202 }
203
204 if (data->signature != 0xCFDCACDF) { 198 if (data->signature != 0xCFDCACDF) {
205 retval = -EINVAL; 199 retval = -EINVAL;
206 goto free_buffer; 200 goto free_buffer;
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 075852f6968c..a86117b0d6e1 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -155,6 +155,8 @@ void _zfcp_dbf_hba_fsf_response(const char *tag2, int level,
155 if (scsi_cmnd) { 155 if (scsi_cmnd) {
156 response->u.fcp.cmnd = (unsigned long)scsi_cmnd; 156 response->u.fcp.cmnd = (unsigned long)scsi_cmnd;
157 response->u.fcp.serial = scsi_cmnd->serial_number; 157 response->u.fcp.serial = scsi_cmnd->serial_number;
158 response->u.fcp.data_dir =
159 qtcb->bottom.io.data_direction;
158 } 160 }
159 break; 161 break;
160 162
@@ -326,6 +328,7 @@ static void zfcp_dbf_hba_view_response(char **p,
326 case FSF_QTCB_FCP_CMND: 328 case FSF_QTCB_FCP_CMND:
327 if (r->fsf_req_status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT) 329 if (r->fsf_req_status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
328 break; 330 break;
331 zfcp_dbf_out(p, "data_direction", "0x%04x", r->u.fcp.data_dir);
329 zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd); 332 zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd);
330 zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial); 333 zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial);
331 *p += sprintf(*p, "\n"); 334 *p += sprintf(*p, "\n");
@@ -1005,7 +1008,7 @@ int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter)
1005 char dbf_name[DEBUG_MAX_NAME_LEN]; 1008 char dbf_name[DEBUG_MAX_NAME_LEN];
1006 struct zfcp_dbf *dbf; 1009 struct zfcp_dbf *dbf;
1007 1010
1008 dbf = kmalloc(sizeof(struct zfcp_dbf), GFP_KERNEL); 1011 dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL);
1009 if (!dbf) 1012 if (!dbf)
1010 return -ENOMEM; 1013 return -ENOMEM;
1011 1014
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 457e046f2d28..2bcc3403126a 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -111,6 +111,7 @@ struct zfcp_dbf_hba_record_response {
111 struct { 111 struct {
112 u64 cmnd; 112 u64 cmnd;
113 u64 serial; 113 u64 serial;
114 u32 data_dir;
114 } fcp; 115 } fcp;
115 struct { 116 struct {
116 u64 wwpn; 117 u64 wwpn;
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 9fa1b064893e..e1c6b6e05a75 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -37,6 +37,7 @@
37#include <asm/ebcdic.h> 37#include <asm/ebcdic.h>
38#include <asm/sysinfo.h> 38#include <asm/sysinfo.h>
39#include "zfcp_fsf.h" 39#include "zfcp_fsf.h"
40#include "zfcp_fc.h"
40#include "zfcp_qdio.h" 41#include "zfcp_qdio.h"
41 42
42struct zfcp_reqlist; 43struct zfcp_reqlist;
@@ -72,10 +73,12 @@ struct zfcp_reqlist;
72 73
73/* adapter status */ 74/* adapter status */
74#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002 75#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002
76#define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED 0x00000004
75#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008 77#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
76#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010 78#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010
77#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100 79#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
78#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200 80#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
81#define ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED 0x00000400
79 82
80/* remote port status */ 83/* remote port status */
81#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001 84#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001
@@ -190,6 +193,7 @@ struct zfcp_adapter {
190 struct service_level service_level; 193 struct service_level service_level;
191 struct workqueue_struct *work_queue; 194 struct workqueue_struct *work_queue;
192 struct device_dma_parameters dma_parms; 195 struct device_dma_parameters dma_parms;
196 struct zfcp_fc_events events;
193}; 197};
194 198
195struct zfcp_port { 199struct zfcp_port {
@@ -212,6 +216,7 @@ struct zfcp_port {
212 struct work_struct test_link_work; 216 struct work_struct test_link_work;
213 struct work_struct rport_work; 217 struct work_struct rport_work;
214 enum { RPORT_NONE, RPORT_ADD, RPORT_DEL } rport_task; 218 enum { RPORT_NONE, RPORT_ADD, RPORT_DEL } rport_task;
219 unsigned int starget_id;
215}; 220};
216 221
217struct zfcp_unit { 222struct zfcp_unit {
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index fd068bc1bd0a..160b432c907f 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -141,9 +141,13 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
141 if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED)) 141 if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED))
142 need = ZFCP_ERP_ACTION_REOPEN_PORT; 142 need = ZFCP_ERP_ACTION_REOPEN_PORT;
143 /* fall through */ 143 /* fall through */
144 case ZFCP_ERP_ACTION_REOPEN_PORT:
145 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 144 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
146 p_status = atomic_read(&port->status); 145 p_status = atomic_read(&port->status);
146 if (!(p_status & ZFCP_STATUS_COMMON_OPEN))
147 need = ZFCP_ERP_ACTION_REOPEN_PORT;
148 /* fall through */
149 case ZFCP_ERP_ACTION_REOPEN_PORT:
150 p_status = atomic_read(&port->status);
147 if (p_status & ZFCP_STATUS_COMMON_ERP_INUSE) 151 if (p_status & ZFCP_STATUS_COMMON_ERP_INUSE)
148 return 0; 152 return 0;
149 a_status = atomic_read(&adapter->status); 153 a_status = atomic_read(&adapter->status);
@@ -893,8 +897,7 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
893 } 897 }
894 if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) { 898 if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) {
895 port->d_id = 0; 899 port->d_id = 0;
896 _zfcp_erp_port_reopen(port, 0, "erpsoc1", NULL); 900 return ZFCP_ERP_FAILED;
897 return ZFCP_ERP_EXIT;
898 } 901 }
899 /* fall through otherwise */ 902 /* fall through otherwise */
900 } 903 }
@@ -1188,19 +1191,14 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
1188 1191
1189 switch (act->action) { 1192 switch (act->action) {
1190 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1193 case ZFCP_ERP_ACTION_REOPEN_UNIT:
1191 if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) {
1192 get_device(&unit->dev);
1193 if (scsi_queue_work(unit->port->adapter->scsi_host,
1194 &unit->scsi_work) <= 0)
1195 put_device(&unit->dev);
1196 }
1197 put_device(&unit->dev); 1194 put_device(&unit->dev);
1198 break; 1195 break;
1199 1196
1200 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
1201 case ZFCP_ERP_ACTION_REOPEN_PORT: 1197 case ZFCP_ERP_ACTION_REOPEN_PORT:
1202 if (result == ZFCP_ERP_SUCCEEDED) 1198 if (result == ZFCP_ERP_SUCCEEDED)
1203 zfcp_scsi_schedule_rport_register(port); 1199 zfcp_scsi_schedule_rport_register(port);
1200 /* fall through */
1201 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
1204 put_device(&port->dev); 1202 put_device(&port->dev);
1205 break; 1203 break;
1206 1204
@@ -1247,6 +1245,11 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
1247 goto unlock; 1245 goto unlock;
1248 } 1246 }
1249 1247
1248 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
1249 retval = ZFCP_ERP_FAILED;
1250 goto check_target;
1251 }
1252
1250 zfcp_erp_action_to_running(erp_action); 1253 zfcp_erp_action_to_running(erp_action);
1251 1254
1252 /* no lock to allow for blocking operations */ 1255 /* no lock to allow for blocking operations */
@@ -1279,6 +1282,7 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
1279 goto unlock; 1282 goto unlock;
1280 } 1283 }
1281 1284
1285check_target:
1282 retval = zfcp_erp_strategy_check_target(erp_action, retval); 1286 retval = zfcp_erp_strategy_check_target(erp_action, retval);
1283 zfcp_erp_action_dequeue(erp_action); 1287 zfcp_erp_action_dequeue(erp_action);
1284 retval = zfcp_erp_strategy_statechange(erp_action, retval); 1288 retval = zfcp_erp_strategy_statechange(erp_action, retval);
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 48a8f93b72f5..3b93239c6f69 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -96,6 +96,9 @@ extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *,
96extern void zfcp_erp_timeout_handler(unsigned long); 96extern void zfcp_erp_timeout_handler(unsigned long);
97 97
98/* zfcp_fc.c */ 98/* zfcp_fc.c */
99extern void zfcp_fc_enqueue_event(struct zfcp_adapter *,
100 enum fc_host_event_code event_code, u32);
101extern void zfcp_fc_post_event(struct work_struct *);
99extern void zfcp_fc_scan_ports(struct work_struct *); 102extern void zfcp_fc_scan_ports(struct work_struct *);
100extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *); 103extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
101extern void zfcp_fc_port_did_lookup(struct work_struct *); 104extern void zfcp_fc_port_did_lookup(struct work_struct *);
@@ -146,9 +149,10 @@ extern void zfcp_qdio_destroy(struct zfcp_qdio *);
146extern int zfcp_qdio_sbal_get(struct zfcp_qdio *); 149extern int zfcp_qdio_sbal_get(struct zfcp_qdio *);
147extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *); 150extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *);
148extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *, 151extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *,
149 struct scatterlist *, int); 152 struct scatterlist *);
150extern int zfcp_qdio_open(struct zfcp_qdio *); 153extern int zfcp_qdio_open(struct zfcp_qdio *);
151extern void zfcp_qdio_close(struct zfcp_qdio *); 154extern void zfcp_qdio_close(struct zfcp_qdio *);
155extern void zfcp_qdio_siosl(struct zfcp_adapter *);
152 156
153/* zfcp_scsi.c */ 157/* zfcp_scsi.c */
154extern struct zfcp_data zfcp_data; 158extern struct zfcp_data zfcp_data;
@@ -159,7 +163,10 @@ extern void zfcp_scsi_rport_work(struct work_struct *);
159extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *); 163extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
160extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *); 164extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *);
161extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *); 165extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *);
162extern void zfcp_scsi_scan(struct work_struct *); 166extern void zfcp_scsi_scan(struct zfcp_unit *);
167extern void zfcp_scsi_scan_work(struct work_struct *);
168extern void zfcp_scsi_set_prot(struct zfcp_adapter *);
169extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
163 170
164/* zfcp_sysfs.c */ 171/* zfcp_sysfs.c */
165extern struct attribute_group zfcp_sysfs_unit_attrs; 172extern struct attribute_group zfcp_sysfs_unit_attrs;
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 6f8ab43a4856..6f3ed2b9a349 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -23,6 +23,58 @@ static u32 zfcp_fc_rscn_range_mask[] = {
23 [ELS_ADDR_FMT_FAB] = 0x000000, 23 [ELS_ADDR_FMT_FAB] = 0x000000,
24}; 24};
25 25
26/**
27 * zfcp_fc_post_event - post event to userspace via fc_transport
28 * @work: work struct with enqueued events
29 */
30void zfcp_fc_post_event(struct work_struct *work)
31{
32 struct zfcp_fc_event *event = NULL, *tmp = NULL;
33 LIST_HEAD(tmp_lh);
34 struct zfcp_fc_events *events = container_of(work,
35 struct zfcp_fc_events, work);
36 struct zfcp_adapter *adapter = container_of(events, struct zfcp_adapter,
37 events);
38
39 spin_lock_bh(&events->list_lock);
40 list_splice_init(&events->list, &tmp_lh);
41 spin_unlock_bh(&events->list_lock);
42
43 list_for_each_entry_safe(event, tmp, &tmp_lh, list) {
44 fc_host_post_event(adapter->scsi_host, fc_get_event_number(),
45 event->code, event->data);
46 list_del(&event->list);
47 kfree(event);
48 }
49
50}
51
52/**
53 * zfcp_fc_enqueue_event - safely enqueue FC HBA API event from irq context
54 * @adapter: The adapter where to enqueue the event
55 * @event_code: The event code (as defined in fc_host_event_code in
56 * scsi_transport_fc.h)
57 * @event_data: The event data (e.g. n_port page in case of els)
58 */
59void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter,
60 enum fc_host_event_code event_code, u32 event_data)
61{
62 struct zfcp_fc_event *event;
63
64 event = kmalloc(sizeof(struct zfcp_fc_event), GFP_ATOMIC);
65 if (!event)
66 return;
67
68 event->code = event_code;
69 event->data = event_data;
70
71 spin_lock(&adapter->events.list_lock);
72 list_add_tail(&event->list, &adapter->events.list);
73 spin_unlock(&adapter->events.list_lock);
74
75 queue_work(adapter->work_queue, &adapter->events.work);
76}
77
26static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port) 78static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port)
27{ 79{
28 if (mutex_lock_interruptible(&wka_port->mutex)) 80 if (mutex_lock_interruptible(&wka_port->mutex))
@@ -148,6 +200,8 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
148 afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK; 200 afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK;
149 _zfcp_fc_incoming_rscn(fsf_req, zfcp_fc_rscn_range_mask[afmt], 201 _zfcp_fc_incoming_rscn(fsf_req, zfcp_fc_rscn_range_mask[afmt],
150 page); 202 page);
203 zfcp_fc_enqueue_event(fsf_req->adapter, FCH_EVT_RSCN,
204 *(u32 *)page);
151 } 205 }
152 queue_work(fsf_req->adapter->work_queue, &fsf_req->adapter->scan_work); 206 queue_work(fsf_req->adapter->work_queue, &fsf_req->adapter->scan_work);
153} 207}
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index 0747b087390d..938d50360166 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -30,6 +30,30 @@
30#define ZFCP_FC_CTELS_TMO (2 * FC_DEF_R_A_TOV / 1000) 30#define ZFCP_FC_CTELS_TMO (2 * FC_DEF_R_A_TOV / 1000)
31 31
32/** 32/**
33 * struct zfcp_fc_event - FC HBAAPI event for internal queueing from irq context
34 * @code: Event code
35 * @data: Event data
36 * @list: list_head for zfcp_fc_events list
37 */
38struct zfcp_fc_event {
39 enum fc_host_event_code code;
40 u32 data;
41 struct list_head list;
42};
43
44/**
45 * struct zfcp_fc_events - Infrastructure for posting FC events from irq context
46 * @list: List for queueing of events from irq context to workqueue
47 * @list_lock: Lock for event list
48 * @work: work_struct for forwarding events in workqueue
49*/
50struct zfcp_fc_events {
51 struct list_head list;
52 spinlock_t list_lock;
53 struct work_struct work;
54};
55
56/**
33 * struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request 57 * struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request
34 * @ct_hdr: FC GS common transport header 58 * @ct_hdr: FC GS common transport header
35 * @gid_pn: GID_PN request 59 * @gid_pn: GID_PN request
@@ -196,6 +220,9 @@ void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi)
196 memcpy(fcp->fc_cdb, scsi->cmnd, scsi->cmd_len); 220 memcpy(fcp->fc_cdb, scsi->cmnd, scsi->cmd_len);
197 221
198 fcp->fc_dl = scsi_bufflen(scsi); 222 fcp->fc_dl = scsi_bufflen(scsi);
223
224 if (scsi_get_prot_type(scsi) == SCSI_PROT_DIF_TYPE1)
225 fcp->fc_dl += fcp->fc_dl / scsi->device->sector_size * 8;
199} 226}
200 227
201/** 228/**
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 71663fb77310..9d1d7d1842ce 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -21,6 +21,7 @@
21static void zfcp_fsf_request_timeout_handler(unsigned long data) 21static void zfcp_fsf_request_timeout_handler(unsigned long data)
22{ 22{
23 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; 23 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
24 zfcp_qdio_siosl(adapter);
24 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 25 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
25 "fsrth_1", NULL); 26 "fsrth_1", NULL);
26} 27}
@@ -274,6 +275,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
274 break; 275 break;
275 case FSF_STATUS_READ_LINK_DOWN: 276 case FSF_STATUS_READ_LINK_DOWN:
276 zfcp_fsf_status_read_link_down(req); 277 zfcp_fsf_status_read_link_down(req);
278 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0);
277 break; 279 break;
278 case FSF_STATUS_READ_LINK_UP: 280 case FSF_STATUS_READ_LINK_UP:
279 dev_info(&adapter->ccw_device->dev, 281 dev_info(&adapter->ccw_device->dev,
@@ -286,6 +288,8 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
286 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 288 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
287 ZFCP_STATUS_COMMON_ERP_FAILED, 289 ZFCP_STATUS_COMMON_ERP_FAILED,
288 "fssrh_2", req); 290 "fssrh_2", req);
291 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0);
292
289 break; 293 break;
290 case FSF_STATUS_READ_NOTIFICATION_LOST: 294 case FSF_STATUS_READ_NOTIFICATION_LOST:
291 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED) 295 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
@@ -323,6 +327,7 @@ static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
323 dev_err(&req->adapter->ccw_device->dev, 327 dev_err(&req->adapter->ccw_device->dev,
324 "The FCP adapter reported a problem " 328 "The FCP adapter reported a problem "
325 "that cannot be recovered\n"); 329 "that cannot be recovered\n");
330 zfcp_qdio_siosl(req->adapter);
326 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1", req); 331 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1", req);
327 break; 332 break;
328 } 333 }
@@ -413,6 +418,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
413 dev_err(&adapter->ccw_device->dev, 418 dev_err(&adapter->ccw_device->dev,
414 "0x%x is not a valid transfer protocol status\n", 419 "0x%x is not a valid transfer protocol status\n",
415 qtcb->prefix.prot_status); 420 qtcb->prefix.prot_status);
421 zfcp_qdio_siosl(adapter);
416 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9", req); 422 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9", req);
417 } 423 }
418 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 424 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -495,7 +501,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
495 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; 501 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
496 502
497 adapter->hydra_version = bottom->adapter_type; 503 adapter->hydra_version = bottom->adapter_type;
498 adapter->timer_ticks = bottom->timer_interval; 504 adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
499 adapter->stat_read_buf_num = max(bottom->status_read_buf_num, 505 adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
500 (u16)FSF_STATUS_READS_RECOM); 506 (u16)FSF_STATUS_READS_RECOM);
501 507
@@ -523,6 +529,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
523 return -EIO; 529 return -EIO;
524 } 530 }
525 531
532 zfcp_scsi_set_prot(adapter);
533
526 return 0; 534 return 0;
527} 535}
528 536
@@ -732,7 +740,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
732 740
733 zfcp_reqlist_add(adapter->req_list, req); 741 zfcp_reqlist_add(adapter->req_list, req);
734 742
735 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q.count); 743 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
736 req->issued = get_clock(); 744 req->issued = get_clock();
737 if (zfcp_qdio_send(qdio, &req->qdio_req)) { 745 if (zfcp_qdio_send(qdio, &req->qdio_req)) {
738 del_timer(&req->timer); 746 del_timer(&req->timer);
@@ -959,8 +967,7 @@ static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
959 967
960static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, 968static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
961 struct scatterlist *sg_req, 969 struct scatterlist *sg_req,
962 struct scatterlist *sg_resp, 970 struct scatterlist *sg_resp)
963 int max_sbals)
964{ 971{
965 struct zfcp_adapter *adapter = req->adapter; 972 struct zfcp_adapter *adapter = req->adapter;
966 u32 feat = adapter->adapter_features; 973 u32 feat = adapter->adapter_features;
@@ -983,18 +990,19 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
983 return 0; 990 return 0;
984 } 991 }
985 992
986 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, 993 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, sg_req);
987 sg_req, max_sbals);
988 if (bytes <= 0) 994 if (bytes <= 0)
989 return -EIO; 995 return -EIO;
996 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
990 req->qtcb->bottom.support.req_buf_length = bytes; 997 req->qtcb->bottom.support.req_buf_length = bytes;
991 zfcp_qdio_skip_to_last_sbale(&req->qdio_req); 998 zfcp_qdio_skip_to_last_sbale(&req->qdio_req);
992 999
993 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, 1000 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
994 sg_resp, max_sbals); 1001 sg_resp);
995 req->qtcb->bottom.support.resp_buf_length = bytes; 1002 req->qtcb->bottom.support.resp_buf_length = bytes;
996 if (bytes <= 0) 1003 if (bytes <= 0)
997 return -EIO; 1004 return -EIO;
1005 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
998 1006
999 return 0; 1007 return 0;
1000} 1008}
@@ -1002,11 +1010,11 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1002static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, 1010static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1003 struct scatterlist *sg_req, 1011 struct scatterlist *sg_req,
1004 struct scatterlist *sg_resp, 1012 struct scatterlist *sg_resp,
1005 int max_sbals, unsigned int timeout) 1013 unsigned int timeout)
1006{ 1014{
1007 int ret; 1015 int ret;
1008 1016
1009 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp, max_sbals); 1017 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp);
1010 if (ret) 1018 if (ret)
1011 return ret; 1019 return ret;
1012 1020
@@ -1046,8 +1054,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1046 } 1054 }
1047 1055
1048 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1056 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1049 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, 1057 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout);
1050 ZFCP_FSF_MAX_SBALS_PER_REQ, timeout);
1051 if (ret) 1058 if (ret)
1052 goto failed_send; 1059 goto failed_send;
1053 1060
@@ -1143,7 +1150,10 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1143 } 1150 }
1144 1151
1145 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1152 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1146 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2, timeout); 1153
1154 zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
1155
1156 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout);
1147 1157
1148 if (ret) 1158 if (ret)
1149 goto failed_send; 1159 goto failed_send;
@@ -2025,7 +2035,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2025 blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC; 2035 blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
2026 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 2036 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2027 blktrc.flags |= ZFCP_BLK_REQ_ERROR; 2037 blktrc.flags |= ZFCP_BLK_REQ_ERROR;
2028 blktrc.inb_usage = req->qdio_req.qdio_inb_usage; 2038 blktrc.inb_usage = 0;
2029 blktrc.outb_usage = req->qdio_req.qdio_outb_usage; 2039 blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
2030 2040
2031 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA && 2041 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
@@ -2035,9 +2045,13 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2035 blktrc.fabric_lat = lat_in->fabric_lat * ticks; 2045 blktrc.fabric_lat = lat_in->fabric_lat * ticks;
2036 2046
2037 switch (req->qtcb->bottom.io.data_direction) { 2047 switch (req->qtcb->bottom.io.data_direction) {
2048 case FSF_DATADIR_DIF_READ_STRIP:
2049 case FSF_DATADIR_DIF_READ_CONVERT:
2038 case FSF_DATADIR_READ: 2050 case FSF_DATADIR_READ:
2039 lat = &unit->latencies.read; 2051 lat = &unit->latencies.read;
2040 break; 2052 break;
2053 case FSF_DATADIR_DIF_WRITE_INSERT:
2054 case FSF_DATADIR_DIF_WRITE_CONVERT:
2041 case FSF_DATADIR_WRITE: 2055 case FSF_DATADIR_WRITE:
2042 lat = &unit->latencies.write; 2056 lat = &unit->latencies.write;
2043 break; 2057 break;
@@ -2078,6 +2092,21 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
2078 goto skip_fsfstatus; 2092 goto skip_fsfstatus;
2079 } 2093 }
2080 2094
2095 switch (req->qtcb->header.fsf_status) {
2096 case FSF_INCONSISTENT_PROT_DATA:
2097 case FSF_INVALID_PROT_PARM:
2098 set_host_byte(scpnt, DID_ERROR);
2099 goto skip_fsfstatus;
2100 case FSF_BLOCK_GUARD_CHECK_FAILURE:
2101 zfcp_scsi_dif_sense_error(scpnt, 0x1);
2102 goto skip_fsfstatus;
2103 case FSF_APP_TAG_CHECK_FAILURE:
2104 zfcp_scsi_dif_sense_error(scpnt, 0x2);
2105 goto skip_fsfstatus;
2106 case FSF_REF_TAG_CHECK_FAILURE:
2107 zfcp_scsi_dif_sense_error(scpnt, 0x3);
2108 goto skip_fsfstatus;
2109 }
2081 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; 2110 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2082 zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt); 2111 zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
2083 2112
@@ -2187,6 +2216,44 @@ skip_fsfstatus:
2187 } 2216 }
2188} 2217}
2189 2218
2219static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
2220{
2221 switch (scsi_get_prot_op(scsi_cmnd)) {
2222 case SCSI_PROT_NORMAL:
2223 switch (scsi_cmnd->sc_data_direction) {
2224 case DMA_NONE:
2225 *data_dir = FSF_DATADIR_CMND;
2226 break;
2227 case DMA_FROM_DEVICE:
2228 *data_dir = FSF_DATADIR_READ;
2229 break;
2230 case DMA_TO_DEVICE:
2231 *data_dir = FSF_DATADIR_WRITE;
2232 break;
2233 case DMA_BIDIRECTIONAL:
2234 return -EINVAL;
2235 }
2236 break;
2237
2238 case SCSI_PROT_READ_STRIP:
2239 *data_dir = FSF_DATADIR_DIF_READ_STRIP;
2240 break;
2241 case SCSI_PROT_WRITE_INSERT:
2242 *data_dir = FSF_DATADIR_DIF_WRITE_INSERT;
2243 break;
2244 case SCSI_PROT_READ_PASS:
2245 *data_dir = FSF_DATADIR_DIF_READ_CONVERT;
2246 break;
2247 case SCSI_PROT_WRITE_PASS:
2248 *data_dir = FSF_DATADIR_DIF_WRITE_CONVERT;
2249 break;
2250 default:
2251 return -EINVAL;
2252 }
2253
2254 return 0;
2255}
2256
2190/** 2257/**
2191 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command) 2258 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
2192 * @unit: unit where command is sent to 2259 * @unit: unit where command is sent to
@@ -2198,16 +2265,17 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2198 struct zfcp_fsf_req *req; 2265 struct zfcp_fsf_req *req;
2199 struct fcp_cmnd *fcp_cmnd; 2266 struct fcp_cmnd *fcp_cmnd;
2200 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; 2267 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
2201 int real_bytes, retval = -EIO; 2268 int real_bytes, retval = -EIO, dix_bytes = 0;
2202 struct zfcp_adapter *adapter = unit->port->adapter; 2269 struct zfcp_adapter *adapter = unit->port->adapter;
2203 struct zfcp_qdio *qdio = adapter->qdio; 2270 struct zfcp_qdio *qdio = adapter->qdio;
2271 struct fsf_qtcb_bottom_io *io;
2204 2272
2205 if (unlikely(!(atomic_read(&unit->status) & 2273 if (unlikely(!(atomic_read(&unit->status) &
2206 ZFCP_STATUS_COMMON_UNBLOCKED))) 2274 ZFCP_STATUS_COMMON_UNBLOCKED)))
2207 return -EBUSY; 2275 return -EBUSY;
2208 2276
2209 spin_lock(&qdio->req_q_lock); 2277 spin_lock(&qdio->req_q_lock);
2210 if (atomic_read(&qdio->req_q.count) <= 0) { 2278 if (atomic_read(&qdio->req_q_free) <= 0) {
2211 atomic_inc(&qdio->req_q_full); 2279 atomic_inc(&qdio->req_q_full);
2212 goto out; 2280 goto out;
2213 } 2281 }
@@ -2223,56 +2291,45 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2223 goto out; 2291 goto out;
2224 } 2292 }
2225 2293
2294 scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
2295
2296 io = &req->qtcb->bottom.io;
2226 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2297 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2227 req->unit = unit; 2298 req->unit = unit;
2228 req->data = scsi_cmnd; 2299 req->data = scsi_cmnd;
2229 req->handler = zfcp_fsf_send_fcp_command_handler; 2300 req->handler = zfcp_fsf_send_fcp_command_handler;
2230 req->qtcb->header.lun_handle = unit->handle; 2301 req->qtcb->header.lun_handle = unit->handle;
2231 req->qtcb->header.port_handle = unit->port->handle; 2302 req->qtcb->header.port_handle = unit->port->handle;
2232 req->qtcb->bottom.io.service_class = FSF_CLASS_3; 2303 io->service_class = FSF_CLASS_3;
2233 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN; 2304 io->fcp_cmnd_length = FCP_CMND_LEN;
2234 2305
2235 scsi_cmnd->host_scribble = (unsigned char *) req->req_id; 2306 if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) {
2236 2307 io->data_block_length = scsi_cmnd->device->sector_size;
2237 /* 2308 io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF;
2238 * set depending on data direction:
2239 * data direction bits in SBALE (SB Type)
2240 * data direction bits in QTCB
2241 */
2242 switch (scsi_cmnd->sc_data_direction) {
2243 case DMA_NONE:
2244 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2245 break;
2246 case DMA_FROM_DEVICE:
2247 req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
2248 break;
2249 case DMA_TO_DEVICE:
2250 req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
2251 break;
2252 case DMA_BIDIRECTIONAL:
2253 goto failed_scsi_cmnd;
2254 } 2309 }
2255 2310
2311 zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction);
2312
2256 get_device(&unit->dev); 2313 get_device(&unit->dev);
2257 2314
2258 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; 2315 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2259 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd); 2316 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
2260 2317
2318 if (scsi_prot_sg_count(scsi_cmnd)) {
2319 zfcp_qdio_set_data_div(qdio, &req->qdio_req,
2320 scsi_prot_sg_count(scsi_cmnd));
2321 dix_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2322 scsi_prot_sglist(scsi_cmnd));
2323 io->prot_data_length = dix_bytes;
2324 }
2325
2261 real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, 2326 real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2262 scsi_sglist(scsi_cmnd), 2327 scsi_sglist(scsi_cmnd));
2263 ZFCP_FSF_MAX_SBALS_PER_REQ); 2328
2264 if (unlikely(real_bytes < 0)) { 2329 if (unlikely(real_bytes < 0) || unlikely(dix_bytes < 0))
2265 if (req->qdio_req.sbal_number >= ZFCP_FSF_MAX_SBALS_PER_REQ) {
2266 dev_err(&adapter->ccw_device->dev,
2267 "Oversize data package, unit 0x%016Lx "
2268 "on port 0x%016Lx closed\n",
2269 (unsigned long long)unit->fcp_lun,
2270 (unsigned long long)unit->port->wwpn);
2271 zfcp_erp_unit_shutdown(unit, 0, "fssfct1", req);
2272 retval = -EINVAL;
2273 }
2274 goto failed_scsi_cmnd; 2330 goto failed_scsi_cmnd;
2275 } 2331
2332 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
2276 2333
2277 retval = zfcp_fsf_req_send(req); 2334 retval = zfcp_fsf_req_send(req);
2278 if (unlikely(retval)) 2335 if (unlikely(retval))
@@ -2391,13 +2448,13 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2391 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE; 2448 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
2392 bottom->option = fsf_cfdc->option; 2449 bottom->option = fsf_cfdc->option;
2393 2450
2394 bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, 2451 bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, fsf_cfdc->sg);
2395 fsf_cfdc->sg, 2452
2396 ZFCP_FSF_MAX_SBALS_PER_REQ);
2397 if (bytes != ZFCP_CFDC_MAX_SIZE) { 2453 if (bytes != ZFCP_CFDC_MAX_SIZE) {
2398 zfcp_fsf_req_free(req); 2454 zfcp_fsf_req_free(req);
2399 goto out; 2455 goto out;
2400 } 2456 }
2457 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
2401 2458
2402 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 2459 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2403 retval = zfcp_fsf_req_send(req); 2460 retval = zfcp_fsf_req_send(req);
@@ -2419,7 +2476,7 @@ out:
2419void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) 2476void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2420{ 2477{
2421 struct zfcp_adapter *adapter = qdio->adapter; 2478 struct zfcp_adapter *adapter = qdio->adapter;
2422 struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx]; 2479 struct qdio_buffer *sbal = qdio->res_q[sbal_idx];
2423 struct qdio_buffer_element *sbale; 2480 struct qdio_buffer_element *sbale;
2424 struct zfcp_fsf_req *fsf_req; 2481 struct zfcp_fsf_req *fsf_req;
2425 unsigned long req_id; 2482 unsigned long req_id;
@@ -2431,17 +2488,17 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2431 req_id = (unsigned long) sbale->addr; 2488 req_id = (unsigned long) sbale->addr;
2432 fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id); 2489 fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
2433 2490
2434 if (!fsf_req) 2491 if (!fsf_req) {
2435 /* 2492 /*
2436 * Unknown request means that we have potentially memory 2493 * Unknown request means that we have potentially memory
2437 * corruption and must stop the machine immediately. 2494 * corruption and must stop the machine immediately.
2438 */ 2495 */
2496 zfcp_qdio_siosl(adapter);
2439 panic("error: unknown req_id (%lx) on adapter %s.\n", 2497 panic("error: unknown req_id (%lx) on adapter %s.\n",
2440 req_id, dev_name(&adapter->ccw_device->dev)); 2498 req_id, dev_name(&adapter->ccw_device->dev));
2499 }
2441 2500
2442 fsf_req->qdio_req.sbal_response = sbal_idx; 2501 fsf_req->qdio_req.sbal_response = sbal_idx;
2443 fsf_req->qdio_req.qdio_inb_usage =
2444 atomic_read(&qdio->resp_q.count);
2445 zfcp_fsf_req_complete(fsf_req); 2502 zfcp_fsf_req_complete(fsf_req);
2446 2503
2447 if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) 2504 if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY))
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 519083fd6e89..db8c85382dca 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -80,11 +80,15 @@
80#define FSF_REQUEST_SIZE_TOO_LARGE 0x00000061 80#define FSF_REQUEST_SIZE_TOO_LARGE 0x00000061
81#define FSF_RESPONSE_SIZE_TOO_LARGE 0x00000062 81#define FSF_RESPONSE_SIZE_TOO_LARGE 0x00000062
82#define FSF_SBAL_MISMATCH 0x00000063 82#define FSF_SBAL_MISMATCH 0x00000063
83#define FSF_INCONSISTENT_PROT_DATA 0x00000070
84#define FSF_INVALID_PROT_PARM 0x00000071
85#define FSF_BLOCK_GUARD_CHECK_FAILURE 0x00000081
86#define FSF_APP_TAG_CHECK_FAILURE 0x00000082
87#define FSF_REF_TAG_CHECK_FAILURE 0x00000083
83#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD 88#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD
84#define FSF_UNKNOWN_COMMAND 0x000000E2 89#define FSF_UNKNOWN_COMMAND 0x000000E2
85#define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3 90#define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3
86#define FSF_INVALID_COMMAND_OPTION 0x000000E5 91#define FSF_INVALID_COMMAND_OPTION 0x000000E5
87/* #define FSF_ERROR 0x000000FF */
88 92
89#define FSF_PROT_STATUS_QUAL_SIZE 16 93#define FSF_PROT_STATUS_QUAL_SIZE 16
90#define FSF_STATUS_QUALIFIER_SIZE 16 94#define FSF_STATUS_QUALIFIER_SIZE 16
@@ -147,18 +151,17 @@
147#define FSF_DATADIR_WRITE 0x00000001 151#define FSF_DATADIR_WRITE 0x00000001
148#define FSF_DATADIR_READ 0x00000002 152#define FSF_DATADIR_READ 0x00000002
149#define FSF_DATADIR_CMND 0x00000004 153#define FSF_DATADIR_CMND 0x00000004
154#define FSF_DATADIR_DIF_WRITE_INSERT 0x00000009
155#define FSF_DATADIR_DIF_READ_STRIP 0x0000000a
156#define FSF_DATADIR_DIF_WRITE_CONVERT 0x0000000b
157#define FSF_DATADIR_DIF_READ_CONVERT 0X0000000c
158
159/* data protection control flags */
160#define FSF_APP_TAG_CHECK_ENABLE 0x10
150 161
151/* fc service class */ 162/* fc service class */
152#define FSF_CLASS_3 0x00000003 163#define FSF_CLASS_3 0x00000003
153 164
154/* SBAL chaining */
155#define ZFCP_FSF_MAX_SBALS_PER_REQ 36
156
157/* max. number of (data buffer) SBALEs in largest SBAL chain
158 * request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
159#define ZFCP_FSF_MAX_SBALES_PER_REQ \
160 (ZFCP_FSF_MAX_SBALS_PER_REQ * ZFCP_QDIO_MAX_SBALES_PER_SBAL - 2)
161
162/* logging space behind QTCB */ 165/* logging space behind QTCB */
163#define FSF_QTCB_LOG_SIZE 1024 166#define FSF_QTCB_LOG_SIZE 1024
164 167
@@ -170,6 +173,8 @@
170#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020 173#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
171#define FSF_FEATURE_UPDATE_ALERT 0x00000100 174#define FSF_FEATURE_UPDATE_ALERT 0x00000100
172#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200 175#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200
176#define FSF_FEATURE_DIF_PROT_TYPE1 0x00010000
177#define FSF_FEATURE_DIX_PROT_TCPIP 0x00020000
173 178
174/* host connection features */ 179/* host connection features */
175#define FSF_FEATURE_NPIV_MODE 0x00000001 180#define FSF_FEATURE_NPIV_MODE 0x00000001
@@ -324,9 +329,14 @@ struct fsf_qtcb_header {
324struct fsf_qtcb_bottom_io { 329struct fsf_qtcb_bottom_io {
325 u32 data_direction; 330 u32 data_direction;
326 u32 service_class; 331 u32 service_class;
327 u8 res1[8]; 332 u8 res1;
333 u8 data_prot_flags;
334 u16 app_tag_value;
335 u32 ref_tag_value;
328 u32 fcp_cmnd_length; 336 u32 fcp_cmnd_length;
329 u8 res2[12]; 337 u32 data_block_length;
338 u32 prot_data_length;
339 u8 res2[4];
330 u8 fcp_cmnd[FSF_FCP_CMND_SIZE]; 340 u8 fcp_cmnd[FSF_FCP_CMND_SIZE];
331 u8 fcp_rsp[FSF_FCP_RSP_SIZE]; 341 u8 fcp_rsp[FSF_FCP_RSP_SIZE];
332 u8 res3[64]; 342 u8 res3[64];
@@ -352,6 +362,8 @@ struct fsf_qtcb_bottom_support {
352 u8 els[256]; 362 u8 els[256];
353} __attribute__ ((packed)); 363} __attribute__ ((packed));
354 364
365#define ZFCP_FSF_TIMER_INT_MASK 0x3FFF
366
355struct fsf_qtcb_bottom_config { 367struct fsf_qtcb_bottom_config {
356 u32 lic_version; 368 u32 lic_version;
357 u32 feature_selection; 369 u32 feature_selection;
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 6fa5e0453176..b2635759721c 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -30,12 +30,15 @@ static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
30 return 0; 30 return 0;
31} 31}
32 32
33static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id) 33static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
34 unsigned int qdio_err)
34{ 35{
35 struct zfcp_adapter *adapter = qdio->adapter; 36 struct zfcp_adapter *adapter = qdio->adapter;
36 37
37 dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n"); 38 dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
38 39
40 if (qdio_err & QDIO_ERROR_SLSB_STATE)
41 zfcp_qdio_siosl(adapter);
39 zfcp_erp_adapter_reopen(adapter, 42 zfcp_erp_adapter_reopen(adapter,
40 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 43 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
41 ZFCP_STATUS_COMMON_ERP_FAILED, id, NULL); 44 ZFCP_STATUS_COMMON_ERP_FAILED, id, NULL);
@@ -55,72 +58,47 @@ static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
55static inline void zfcp_qdio_account(struct zfcp_qdio *qdio) 58static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
56{ 59{
57 unsigned long long now, span; 60 unsigned long long now, span;
58 int free, used; 61 int used;
59 62
60 spin_lock(&qdio->stat_lock); 63 spin_lock(&qdio->stat_lock);
61 now = get_clock_monotonic(); 64 now = get_clock_monotonic();
62 span = (now - qdio->req_q_time) >> 12; 65 span = (now - qdio->req_q_time) >> 12;
63 free = atomic_read(&qdio->req_q.count); 66 used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
64 used = QDIO_MAX_BUFFERS_PER_Q - free;
65 qdio->req_q_util += used * span; 67 qdio->req_q_util += used * span;
66 qdio->req_q_time = now; 68 qdio->req_q_time = now;
67 spin_unlock(&qdio->stat_lock); 69 spin_unlock(&qdio->stat_lock);
68} 70}
69 71
70static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, 72static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
71 int queue_no, int first, int count, 73 int queue_no, int idx, int count,
72 unsigned long parm) 74 unsigned long parm)
73{ 75{
74 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; 76 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
75 struct zfcp_qdio_queue *queue = &qdio->req_q;
76 77
77 if (unlikely(qdio_err)) { 78 if (unlikely(qdio_err)) {
78 zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first, 79 zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, idx, count);
79 count); 80 zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
80 zfcp_qdio_handler_error(qdio, "qdireq1");
81 return; 81 return;
82 } 82 }
83 83
84 /* cleanup all SBALs being program-owned now */ 84 /* cleanup all SBALs being program-owned now */
85 zfcp_qdio_zero_sbals(queue->sbal, first, count); 85 zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
86 86
87 zfcp_qdio_account(qdio); 87 zfcp_qdio_account(qdio);
88 atomic_add(count, &queue->count); 88 atomic_add(count, &qdio->req_q_free);
89 wake_up(&qdio->req_q_wq); 89 wake_up(&qdio->req_q_wq);
90} 90}
91 91
92static void zfcp_qdio_resp_put_back(struct zfcp_qdio *qdio, int processed)
93{
94 struct zfcp_qdio_queue *queue = &qdio->resp_q;
95 struct ccw_device *cdev = qdio->adapter->ccw_device;
96 u8 count, start = queue->first;
97 unsigned int retval;
98
99 count = atomic_read(&queue->count) + processed;
100
101 retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, start, count);
102
103 if (unlikely(retval)) {
104 atomic_set(&queue->count, count);
105 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdrpb_1", NULL);
106 } else {
107 queue->first += count;
108 queue->first %= QDIO_MAX_BUFFERS_PER_Q;
109 atomic_set(&queue->count, 0);
110 }
111}
112
113static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, 92static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
114 int queue_no, int first, int count, 93 int queue_no, int idx, int count,
115 unsigned long parm) 94 unsigned long parm)
116{ 95{
117 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; 96 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
118 int sbal_idx, sbal_no; 97 int sbal_idx, sbal_no;
119 98
120 if (unlikely(qdio_err)) { 99 if (unlikely(qdio_err)) {
121 zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first, 100 zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, idx, count);
122 count); 101 zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
123 zfcp_qdio_handler_error(qdio, "qdires1");
124 return; 102 return;
125 } 103 }
126 104
@@ -129,25 +107,16 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
129 * returned by QDIO layer 107 * returned by QDIO layer
130 */ 108 */
131 for (sbal_no = 0; sbal_no < count; sbal_no++) { 109 for (sbal_no = 0; sbal_no < count; sbal_no++) {
132 sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; 110 sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
133 /* go through all SBALEs of SBAL */ 111 /* go through all SBALEs of SBAL */
134 zfcp_fsf_reqid_check(qdio, sbal_idx); 112 zfcp_fsf_reqid_check(qdio, sbal_idx);
135 } 113 }
136 114
137 /* 115 /*
138 * put range of SBALs back to response queue 116 * put SBALs back to response queue
139 * (including SBALs which have already been free before)
140 */ 117 */
141 zfcp_qdio_resp_put_back(qdio, count); 118 if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
142} 119 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2", NULL);
143
144static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
145 struct zfcp_qdio_req *q_req, int max_sbals)
146{
147 int count = atomic_read(&qdio->req_q.count);
148 count = min(count, max_sbals);
149 q_req->sbal_limit = (q_req->sbal_first + count - 1)
150 % QDIO_MAX_BUFFERS_PER_Q;
151} 120}
152 121
153static struct qdio_buffer_element * 122static struct qdio_buffer_element *
@@ -173,6 +142,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
173 142
174 /* keep this requests number of SBALs up-to-date */ 143 /* keep this requests number of SBALs up-to-date */
175 q_req->sbal_number++; 144 q_req->sbal_number++;
145 BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ);
176 146
177 /* start at first SBALE of new SBAL */ 147 /* start at first SBALE of new SBAL */
178 q_req->sbale_curr = 0; 148 q_req->sbale_curr = 0;
@@ -193,17 +163,6 @@ zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
193 return zfcp_qdio_sbale_curr(qdio, q_req); 163 return zfcp_qdio_sbale_curr(qdio, q_req);
194} 164}
195 165
196static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
197 struct zfcp_qdio_req *q_req)
198{
199 struct qdio_buffer **sbal = qdio->req_q.sbal;
200 int first = q_req->sbal_first;
201 int last = q_req->sbal_last;
202 int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) %
203 QDIO_MAX_BUFFERS_PER_Q + 1;
204 zfcp_qdio_zero_sbals(sbal, first, count);
205}
206
207/** 166/**
208 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list 167 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
209 * @qdio: pointer to struct zfcp_qdio 168 * @qdio: pointer to struct zfcp_qdio
@@ -213,14 +172,11 @@ static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
213 * Returns: number of bytes, or error (negativ) 172 * Returns: number of bytes, or error (negativ)
214 */ 173 */
215int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, 174int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
216 struct scatterlist *sg, int max_sbals) 175 struct scatterlist *sg)
217{ 176{
218 struct qdio_buffer_element *sbale; 177 struct qdio_buffer_element *sbale;
219 int bytes = 0; 178 int bytes = 0;
220 179
221 /* figure out last allowed SBAL */
222 zfcp_qdio_sbal_limit(qdio, q_req, max_sbals);
223
224 /* set storage-block type for this request */ 180 /* set storage-block type for this request */
225 sbale = zfcp_qdio_sbale_req(qdio, q_req); 181 sbale = zfcp_qdio_sbale_req(qdio, q_req);
226 sbale->flags |= q_req->sbtype; 182 sbale->flags |= q_req->sbtype;
@@ -229,7 +185,8 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
229 sbale = zfcp_qdio_sbale_next(qdio, q_req); 185 sbale = zfcp_qdio_sbale_next(qdio, q_req);
230 if (!sbale) { 186 if (!sbale) {
231 atomic_inc(&qdio->req_q_full); 187 atomic_inc(&qdio->req_q_full);
232 zfcp_qdio_undo_sbals(qdio, q_req); 188 zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
189 q_req->sbal_number);
233 return -EINVAL; 190 return -EINVAL;
234 } 191 }
235 192
@@ -239,19 +196,13 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
239 bytes += sg->length; 196 bytes += sg->length;
240 } 197 }
241 198
242 /* assume that no other SBALEs are to follow in the same SBAL */
243 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
244 sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
245
246 return bytes; 199 return bytes;
247} 200}
248 201
249static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) 202static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
250{ 203{
251 struct zfcp_qdio_queue *req_q = &qdio->req_q;
252
253 spin_lock_bh(&qdio->req_q_lock); 204 spin_lock_bh(&qdio->req_q_lock);
254 if (atomic_read(&req_q->count) || 205 if (atomic_read(&qdio->req_q_free) ||
255 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 206 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
256 return 1; 207 return 1;
257 spin_unlock_bh(&qdio->req_q_lock); 208 spin_unlock_bh(&qdio->req_q_lock);
@@ -300,25 +251,25 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
300 */ 251 */
301int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) 252int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
302{ 253{
303 struct zfcp_qdio_queue *req_q = &qdio->req_q;
304 int first = q_req->sbal_first;
305 int count = q_req->sbal_number;
306 int retval; 254 int retval;
307 unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT; 255 u8 sbal_number = q_req->sbal_number;
308 256
309 zfcp_qdio_account(qdio); 257 zfcp_qdio_account(qdio);
310 258
311 retval = do_QDIO(qdio->adapter->ccw_device, qdio_flags, 0, first, 259 retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
312 count); 260 q_req->sbal_first, sbal_number);
261
313 if (unlikely(retval)) { 262 if (unlikely(retval)) {
314 zfcp_qdio_zero_sbals(req_q->sbal, first, count); 263 zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
264 sbal_number);
315 return retval; 265 return retval;
316 } 266 }
317 267
318 /* account for transferred buffers */ 268 /* account for transferred buffers */
319 atomic_sub(count, &req_q->count); 269 atomic_sub(sbal_number, &qdio->req_q_free);
320 req_q->first += count; 270 qdio->req_q_idx += sbal_number;
321 req_q->first %= QDIO_MAX_BUFFERS_PER_Q; 271 qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
272
322 return 0; 273 return 0;
323} 274}
324 275
@@ -331,6 +282,7 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
331 id->q_format = QDIO_ZFCP_QFMT; 282 id->q_format = QDIO_ZFCP_QFMT;
332 memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8); 283 memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8);
333 ASCEBC(id->adapter_name, 8); 284 ASCEBC(id->adapter_name, 8);
285 id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
334 id->qib_param_field_format = 0; 286 id->qib_param_field_format = 0;
335 id->qib_param_field = NULL; 287 id->qib_param_field = NULL;
336 id->input_slib_elements = NULL; 288 id->input_slib_elements = NULL;
@@ -340,10 +292,10 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
340 id->input_handler = zfcp_qdio_int_resp; 292 id->input_handler = zfcp_qdio_int_resp;
341 id->output_handler = zfcp_qdio_int_req; 293 id->output_handler = zfcp_qdio_int_req;
342 id->int_parm = (unsigned long) qdio; 294 id->int_parm = (unsigned long) qdio;
343 id->input_sbal_addr_array = (void **) (qdio->resp_q.sbal); 295 id->input_sbal_addr_array = (void **) (qdio->res_q);
344 id->output_sbal_addr_array = (void **) (qdio->req_q.sbal); 296 id->output_sbal_addr_array = (void **) (qdio->req_q);
345
346} 297}
298
347/** 299/**
348 * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data 300 * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
349 * @adapter: pointer to struct zfcp_adapter 301 * @adapter: pointer to struct zfcp_adapter
@@ -354,8 +306,8 @@ static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
354{ 306{
355 struct qdio_initialize init_data; 307 struct qdio_initialize init_data;
356 308
357 if (zfcp_qdio_buffers_enqueue(qdio->req_q.sbal) || 309 if (zfcp_qdio_buffers_enqueue(qdio->req_q) ||
358 zfcp_qdio_buffers_enqueue(qdio->resp_q.sbal)) 310 zfcp_qdio_buffers_enqueue(qdio->res_q))
359 return -ENOMEM; 311 return -ENOMEM;
360 312
361 zfcp_qdio_setup_init_data(&init_data, qdio); 313 zfcp_qdio_setup_init_data(&init_data, qdio);
@@ -369,34 +321,30 @@ static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
369 */ 321 */
370void zfcp_qdio_close(struct zfcp_qdio *qdio) 322void zfcp_qdio_close(struct zfcp_qdio *qdio)
371{ 323{
372 struct zfcp_qdio_queue *req_q; 324 struct zfcp_adapter *adapter = qdio->adapter;
373 int first, count; 325 int idx, count;
374 326
375 if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 327 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
376 return; 328 return;
377 329
378 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ 330 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
379 req_q = &qdio->req_q;
380 spin_lock_bh(&qdio->req_q_lock); 331 spin_lock_bh(&qdio->req_q_lock);
381 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); 332 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
382 spin_unlock_bh(&qdio->req_q_lock); 333 spin_unlock_bh(&qdio->req_q_lock);
383 334
384 wake_up(&qdio->req_q_wq); 335 wake_up(&qdio->req_q_wq);
385 336
386 qdio_shutdown(qdio->adapter->ccw_device, 337 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
387 QDIO_FLAG_CLEANUP_USING_CLEAR);
388 338
389 /* cleanup used outbound sbals */ 339 /* cleanup used outbound sbals */
390 count = atomic_read(&req_q->count); 340 count = atomic_read(&qdio->req_q_free);
391 if (count < QDIO_MAX_BUFFERS_PER_Q) { 341 if (count < QDIO_MAX_BUFFERS_PER_Q) {
392 first = (req_q->first + count) % QDIO_MAX_BUFFERS_PER_Q; 342 idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q;
393 count = QDIO_MAX_BUFFERS_PER_Q - count; 343 count = QDIO_MAX_BUFFERS_PER_Q - count;
394 zfcp_qdio_zero_sbals(req_q->sbal, first, count); 344 zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
395 } 345 }
396 req_q->first = 0; 346 qdio->req_q_idx = 0;
397 atomic_set(&req_q->count, 0); 347 atomic_set(&qdio->req_q_free, 0);
398 qdio->resp_q.first = 0;
399 atomic_set(&qdio->resp_q.count, 0);
400} 348}
401 349
402/** 350/**
@@ -408,34 +356,45 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
408{ 356{
409 struct qdio_buffer_element *sbale; 357 struct qdio_buffer_element *sbale;
410 struct qdio_initialize init_data; 358 struct qdio_initialize init_data;
411 struct ccw_device *cdev = qdio->adapter->ccw_device; 359 struct zfcp_adapter *adapter = qdio->adapter;
360 struct ccw_device *cdev = adapter->ccw_device;
361 struct qdio_ssqd_desc ssqd;
412 int cc; 362 int cc;
413 363
414 if (atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) 364 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
415 return -EIO; 365 return -EIO;
416 366
367 atomic_clear_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
368 &qdio->adapter->status);
369
417 zfcp_qdio_setup_init_data(&init_data, qdio); 370 zfcp_qdio_setup_init_data(&init_data, qdio);
418 371
419 if (qdio_establish(&init_data)) 372 if (qdio_establish(&init_data))
420 goto failed_establish; 373 goto failed_establish;
421 374
375 if (qdio_get_ssqd_desc(init_data.cdev, &ssqd))
376 goto failed_qdio;
377
378 if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
379 atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
380 &qdio->adapter->status);
381
422 if (qdio_activate(cdev)) 382 if (qdio_activate(cdev))
423 goto failed_qdio; 383 goto failed_qdio;
424 384
425 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { 385 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
426 sbale = &(qdio->resp_q.sbal[cc]->element[0]); 386 sbale = &(qdio->res_q[cc]->element[0]);
427 sbale->length = 0; 387 sbale->length = 0;
428 sbale->flags = SBAL_FLAGS_LAST_ENTRY; 388 sbale->flags = SBAL_FLAGS_LAST_ENTRY;
429 sbale->addr = NULL; 389 sbale->addr = NULL;
430 } 390 }
431 391
432 if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, 392 if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
433 QDIO_MAX_BUFFERS_PER_Q))
434 goto failed_qdio; 393 goto failed_qdio;
435 394
436 /* set index of first avalable SBALS / number of available SBALS */ 395 /* set index of first avalable SBALS / number of available SBALS */
437 qdio->req_q.first = 0; 396 qdio->req_q_idx = 0;
438 atomic_set(&qdio->req_q.count, QDIO_MAX_BUFFERS_PER_Q); 397 atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
439 398
440 return 0; 399 return 0;
441 400
@@ -449,7 +408,6 @@ failed_establish:
449 408
450void zfcp_qdio_destroy(struct zfcp_qdio *qdio) 409void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
451{ 410{
452 struct qdio_buffer **sbal_req, **sbal_resp;
453 int p; 411 int p;
454 412
455 if (!qdio) 413 if (!qdio)
@@ -458,12 +416,9 @@ void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
458 if (qdio->adapter->ccw_device) 416 if (qdio->adapter->ccw_device)
459 qdio_free(qdio->adapter->ccw_device); 417 qdio_free(qdio->adapter->ccw_device);
460 418
461 sbal_req = qdio->req_q.sbal;
462 sbal_resp = qdio->resp_q.sbal;
463
464 for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) { 419 for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) {
465 free_page((unsigned long) sbal_req[p]); 420 free_page((unsigned long) qdio->req_q[p]);
466 free_page((unsigned long) sbal_resp[p]); 421 free_page((unsigned long) qdio->res_q[p]);
467 } 422 }
468 423
469 kfree(qdio); 424 kfree(qdio);
@@ -491,3 +446,26 @@ int zfcp_qdio_setup(struct zfcp_adapter *adapter)
491 return 0; 446 return 0;
492} 447}
493 448
449/**
450 * zfcp_qdio_siosl - Trigger logging in FCP channel
451 * @adapter: The zfcp_adapter where to trigger logging
452 *
453 * Call the cio siosl function to trigger hardware logging. This
454 * wrapper function sets a flag to ensure hardware logging is only
455 * triggered once before going through qdio shutdown.
456 *
457 * The triggers are always run from qdio tasklet context, so no
458 * additional synchronization is necessary.
459 */
460void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
461{
462 int rc;
463
464 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED)
465 return;
466
467 rc = ccw_device_siosl(adapter->ccw_device);
468 if (!rc)
469 atomic_set_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
470 &adapter->status);
471}
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 138fba577b48..2297d8d3e947 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -19,22 +19,20 @@
19/* index of last SBALE (with respect to DMQ bug workaround) */ 19/* index of last SBALE (with respect to DMQ bug workaround) */
20#define ZFCP_QDIO_LAST_SBALE_PER_SBAL (ZFCP_QDIO_MAX_SBALES_PER_SBAL - 1) 20#define ZFCP_QDIO_LAST_SBALE_PER_SBAL (ZFCP_QDIO_MAX_SBALES_PER_SBAL - 1)
21 21
22/** 22/* Max SBALS for chaining */
23 * struct zfcp_qdio_queue - qdio queue buffer, zfcp index and free count 23#define ZFCP_QDIO_MAX_SBALS_PER_REQ 36
24 * @sbal: qdio buffers 24
25 * @first: index of next free buffer in queue 25/* max. number of (data buffer) SBALEs in largest SBAL chain
26 * @count: number of free buffers in queue 26 * request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
27 */ 27#define ZFCP_QDIO_MAX_SBALES_PER_REQ \
28struct zfcp_qdio_queue { 28 (ZFCP_QDIO_MAX_SBALS_PER_REQ * ZFCP_QDIO_MAX_SBALES_PER_SBAL - 2)
29 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
30 u8 first;
31 atomic_t count;
32};
33 29
34/** 30/**
35 * struct zfcp_qdio - basic qdio data structure 31 * struct zfcp_qdio - basic qdio data structure
36 * @resp_q: response queue 32 * @res_q: response queue
37 * @req_q: request queue 33 * @req_q: request queue
34 * @req_q_idx: index of next free buffer
35 * @req_q_free: number of free buffers in queue
38 * @stat_lock: lock to protect req_q_util and req_q_time 36 * @stat_lock: lock to protect req_q_util and req_q_time
39 * @req_q_lock: lock to serialize access to request queue 37 * @req_q_lock: lock to serialize access to request queue
40 * @req_q_time: time of last fill level change 38 * @req_q_time: time of last fill level change
@@ -44,8 +42,10 @@ struct zfcp_qdio_queue {
44 * @adapter: adapter used in conjunction with this qdio structure 42 * @adapter: adapter used in conjunction with this qdio structure
45 */ 43 */
46struct zfcp_qdio { 44struct zfcp_qdio {
47 struct zfcp_qdio_queue resp_q; 45 struct qdio_buffer *res_q[QDIO_MAX_BUFFERS_PER_Q];
48 struct zfcp_qdio_queue req_q; 46 struct qdio_buffer *req_q[QDIO_MAX_BUFFERS_PER_Q];
47 u8 req_q_idx;
48 atomic_t req_q_free;
49 spinlock_t stat_lock; 49 spinlock_t stat_lock;
50 spinlock_t req_q_lock; 50 spinlock_t req_q_lock;
51 unsigned long long req_q_time; 51 unsigned long long req_q_time;
@@ -65,7 +65,6 @@ struct zfcp_qdio {
65 * @sbale_curr: current sbale at creation of this request 65 * @sbale_curr: current sbale at creation of this request
66 * @sbal_response: sbal used in interrupt 66 * @sbal_response: sbal used in interrupt
67 * @qdio_outb_usage: usage of outbound queue 67 * @qdio_outb_usage: usage of outbound queue
68 * @qdio_inb_usage: usage of inbound queue
69 */ 68 */
70struct zfcp_qdio_req { 69struct zfcp_qdio_req {
71 u32 sbtype; 70 u32 sbtype;
@@ -76,22 +75,9 @@ struct zfcp_qdio_req {
76 u8 sbale_curr; 75 u8 sbale_curr;
77 u8 sbal_response; 76 u8 sbal_response;
78 u16 qdio_outb_usage; 77 u16 qdio_outb_usage;
79 u16 qdio_inb_usage;
80}; 78};
81 79
82/** 80/**
83 * zfcp_qdio_sbale - return pointer to sbale in qdio queue
84 * @q: queue where to find sbal
85 * @sbal_idx: sbal index in queue
86 * @sbale_idx: sbale index in sbal
87 */
88static inline struct qdio_buffer_element *
89zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
90{
91 return &q->sbal[sbal_idx]->element[sbale_idx];
92}
93
94/**
95 * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request 81 * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request
96 * @qdio: pointer to struct zfcp_qdio 82 * @qdio: pointer to struct zfcp_qdio
97 * @q_rec: pointer to struct zfcp_qdio_req 83 * @q_rec: pointer to struct zfcp_qdio_req
@@ -100,7 +86,7 @@ zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
100static inline struct qdio_buffer_element * 86static inline struct qdio_buffer_element *
101zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) 87zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
102{ 88{
103 return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0); 89 return &qdio->req_q[q_req->sbal_last]->element[0];
104} 90}
105 91
106/** 92/**
@@ -112,8 +98,7 @@ zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
112static inline struct qdio_buffer_element * 98static inline struct qdio_buffer_element *
113zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) 99zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
114{ 100{
115 return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 101 return &qdio->req_q[q_req->sbal_last]->element[q_req->sbale_curr];
116 q_req->sbale_curr);
117} 102}
118 103
119/** 104/**
@@ -134,21 +119,25 @@ void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
134 unsigned long req_id, u32 sbtype, void *data, u32 len) 119 unsigned long req_id, u32 sbtype, void *data, u32 len)
135{ 120{
136 struct qdio_buffer_element *sbale; 121 struct qdio_buffer_element *sbale;
122 int count = min(atomic_read(&qdio->req_q_free),
123 ZFCP_QDIO_MAX_SBALS_PER_REQ);
137 124
138 q_req->sbal_first = q_req->sbal_last = qdio->req_q.first; 125 q_req->sbal_first = q_req->sbal_last = qdio->req_q_idx;
139 q_req->sbal_number = 1; 126 q_req->sbal_number = 1;
140 q_req->sbtype = sbtype; 127 q_req->sbtype = sbtype;
128 q_req->sbale_curr = 1;
129 q_req->sbal_limit = (q_req->sbal_first + count - 1)
130 % QDIO_MAX_BUFFERS_PER_Q;
141 131
142 sbale = zfcp_qdio_sbale_req(qdio, q_req); 132 sbale = zfcp_qdio_sbale_req(qdio, q_req);
143 sbale->addr = (void *) req_id; 133 sbale->addr = (void *) req_id;
144 sbale->flags |= SBAL_FLAGS0_COMMAND; 134 sbale->flags = SBAL_FLAGS0_COMMAND | sbtype;
145 sbale->flags |= sbtype;
146 135
147 q_req->sbale_curr = 1; 136 if (unlikely(!data))
137 return;
148 sbale++; 138 sbale++;
149 sbale->addr = data; 139 sbale->addr = data;
150 if (likely(data)) 140 sbale->length = len;
151 sbale->length = len;
152} 141}
153 142
154/** 143/**
@@ -210,4 +199,36 @@ void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio_req *q_req)
210 q_req->sbale_curr = ZFCP_QDIO_LAST_SBALE_PER_SBAL; 199 q_req->sbale_curr = ZFCP_QDIO_LAST_SBALE_PER_SBAL;
211} 200}
212 201
202/**
203 * zfcp_qdio_sbal_limit - set the sbal limit for a request in q_req
204 * @qdio: pointer to struct zfcp_qdio
205 * @q_req: The current zfcp_qdio_req
206 * @max_sbals: maximum number of SBALs allowed
207 */
208static inline
209void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
210 struct zfcp_qdio_req *q_req, int max_sbals)
211{
212 int count = min(atomic_read(&qdio->req_q_free), max_sbals);
213
214 q_req->sbal_limit = (q_req->sbal_first + count - 1) %
215 QDIO_MAX_BUFFERS_PER_Q;
216}
217
218/**
219 * zfcp_qdio_set_data_div - set data division count
220 * @qdio: pointer to struct zfcp_qdio
221 * @q_req: The current zfcp_qdio_req
222 * @count: The data division count
223 */
224static inline
225void zfcp_qdio_set_data_div(struct zfcp_qdio *qdio,
226 struct zfcp_qdio_req *q_req, u32 count)
227{
228 struct qdio_buffer_element *sbale;
229
230 sbale = &qdio->req_q[q_req->sbal_first]->element[0];
231 sbale->length = count;
232}
233
213#endif /* ZFCP_QDIO_H */ 234#endif /* ZFCP_QDIO_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index be5d2c60453d..cb000c9833bb 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -12,6 +12,7 @@
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <scsi/fc/fc_fcp.h> 14#include <scsi/fc/fc_fcp.h>
15#include <scsi/scsi_eh.h>
15#include <asm/atomic.h> 16#include <asm/atomic.h>
16#include "zfcp_ext.h" 17#include "zfcp_ext.h"
17#include "zfcp_dbf.h" 18#include "zfcp_dbf.h"
@@ -22,6 +23,13 @@ static unsigned int default_depth = 32;
22module_param_named(queue_depth, default_depth, uint, 0600); 23module_param_named(queue_depth, default_depth, uint, 0600);
23MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices"); 24MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
24 25
26static bool enable_dif;
27
28#ifdef CONFIG_ZFCP_DIF
29module_param_named(dif, enable_dif, bool, 0600);
30MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support");
31#endif
32
25static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth, 33static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
26 int reason) 34 int reason)
27{ 35{
@@ -506,8 +514,10 @@ static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
506 * @rport: The FC rport where to teminate I/O 514 * @rport: The FC rport where to teminate I/O
507 * 515 *
508 * Abort all pending SCSI commands for a port by closing the 516 * Abort all pending SCSI commands for a port by closing the
509 * port. Using a reopen avoiding a conflict with a shutdown 517 * port. Using a reopen avoids a conflict with a shutdown
510 * overwriting a reopen. 518 * overwriting a reopen. The "forced" ensures that a disappeared port
519 * is not opened again as valid due to the cached plogi data in
520 * non-NPIV mode.
511 */ 521 */
512static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) 522static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
513{ 523{
@@ -519,11 +529,25 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
519 port = zfcp_get_port_by_wwpn(adapter, rport->port_name); 529 port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
520 530
521 if (port) { 531 if (port) {
522 zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL); 532 zfcp_erp_port_forced_reopen(port, 0, "sctrpi1", NULL);
523 put_device(&port->dev); 533 put_device(&port->dev);
524 } 534 }
525} 535}
526 536
537static void zfcp_scsi_queue_unit_register(struct zfcp_port *port)
538{
539 struct zfcp_unit *unit;
540
541 read_lock_irq(&port->unit_list_lock);
542 list_for_each_entry(unit, &port->unit_list, list) {
543 get_device(&unit->dev);
544 if (scsi_queue_work(port->adapter->scsi_host,
545 &unit->scsi_work) <= 0)
546 put_device(&unit->dev);
547 }
548 read_unlock_irq(&port->unit_list_lock);
549}
550
527static void zfcp_scsi_rport_register(struct zfcp_port *port) 551static void zfcp_scsi_rport_register(struct zfcp_port *port)
528{ 552{
529 struct fc_rport_identifiers ids; 553 struct fc_rport_identifiers ids;
@@ -548,6 +572,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
548 rport->maxframe_size = port->maxframe_size; 572 rport->maxframe_size = port->maxframe_size;
549 rport->supported_classes = port->supported_classes; 573 rport->supported_classes = port->supported_classes;
550 port->rport = rport; 574 port->rport = rport;
575 port->starget_id = rport->scsi_target_id;
576
577 zfcp_scsi_queue_unit_register(port);
551} 578}
552 579
553static void zfcp_scsi_rport_block(struct zfcp_port *port) 580static void zfcp_scsi_rport_block(struct zfcp_port *port)
@@ -610,24 +637,74 @@ void zfcp_scsi_rport_work(struct work_struct *work)
610 put_device(&port->dev); 637 put_device(&port->dev);
611} 638}
612 639
613 640/**
614void zfcp_scsi_scan(struct work_struct *work) 641 * zfcp_scsi_scan - Register LUN with SCSI midlayer
642 * @unit: The LUN/unit to register
643 */
644void zfcp_scsi_scan(struct zfcp_unit *unit)
615{ 645{
616 struct zfcp_unit *unit = container_of(work, struct zfcp_unit, 646 struct fc_rport *rport = unit->port->rport;
617 scsi_work);
618 struct fc_rport *rport;
619
620 flush_work(&unit->port->rport_work);
621 rport = unit->port->rport;
622 647
623 if (rport && rport->port_state == FC_PORTSTATE_ONLINE) 648 if (rport && rport->port_state == FC_PORTSTATE_ONLINE)
624 scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, 649 scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
625 scsilun_to_int((struct scsi_lun *) 650 scsilun_to_int((struct scsi_lun *)
626 &unit->fcp_lun), 0); 651 &unit->fcp_lun), 0);
652}
627 653
654void zfcp_scsi_scan_work(struct work_struct *work)
655{
656 struct zfcp_unit *unit = container_of(work, struct zfcp_unit,
657 scsi_work);
658
659 zfcp_scsi_scan(unit);
628 put_device(&unit->dev); 660 put_device(&unit->dev);
629} 661}
630 662
663/**
664 * zfcp_scsi_set_prot - Configure DIF/DIX support in scsi_host
665 * @adapter: The adapter where to configure DIF/DIX for the SCSI host
666 */
667void zfcp_scsi_set_prot(struct zfcp_adapter *adapter)
668{
669 unsigned int mask = 0;
670 unsigned int data_div;
671 struct Scsi_Host *shost = adapter->scsi_host;
672
673 data_div = atomic_read(&adapter->status) &
674 ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED;
675
676 if (enable_dif &&
677 adapter->adapter_features & FSF_FEATURE_DIF_PROT_TYPE1)
678 mask |= SHOST_DIF_TYPE1_PROTECTION;
679
680 if (enable_dif && data_div &&
681 adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) {
682 mask |= SHOST_DIX_TYPE1_PROTECTION;
683 scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP);
684 shost->sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ / 2;
685 shost->max_sectors = ZFCP_QDIO_MAX_SBALES_PER_REQ * 8 / 2;
686 }
687
688 scsi_host_set_prot(shost, mask);
689}
690
691/**
692 * zfcp_scsi_dif_sense_error - Report DIF/DIX error as driver sense error
693 * @scmd: The SCSI command to report the error for
694 * @ascq: The ASCQ to put in the sense buffer
695 *
696 * See the error handling in sd_done for the sense codes used here.
697 * Set DID_SOFT_ERROR to retry the request, if possible.
698 */
699void zfcp_scsi_dif_sense_error(struct scsi_cmnd *scmd, int ascq)
700{
701 scsi_build_sense_buffer(1, scmd->sense_buffer,
702 ILLEGAL_REQUEST, 0x10, ascq);
703 set_driver_byte(scmd, DRIVER_SENSE);
704 scmd->result |= SAM_STAT_CHECK_CONDITION;
705 set_host_byte(scmd, DID_SOFT_ERROR);
706}
707
631struct fc_function_template zfcp_transport_functions = { 708struct fc_function_template zfcp_transport_functions = {
632 .show_starget_port_id = 1, 709 .show_starget_port_id = 1,
633 .show_starget_port_name = 1, 710 .show_starget_port_name = 1,
@@ -677,11 +754,11 @@ struct zfcp_data zfcp_data = {
677 .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, 754 .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
678 .can_queue = 4096, 755 .can_queue = 4096,
679 .this_id = -1, 756 .this_id = -1,
680 .sg_tablesize = ZFCP_FSF_MAX_SBALES_PER_REQ, 757 .sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ,
681 .cmd_per_lun = 1, 758 .cmd_per_lun = 1,
682 .use_clustering = 1, 759 .use_clustering = 1,
683 .sdev_attrs = zfcp_sysfs_sdev_attrs, 760 .sdev_attrs = zfcp_sysfs_sdev_attrs,
684 .max_sectors = (ZFCP_FSF_MAX_SBALES_PER_REQ * 8), 761 .max_sectors = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8),
685 .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, 762 .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
686 .shost_attrs = zfcp_sysfs_shost_attrs, 763 .shost_attrs = zfcp_sysfs_shost_attrs,
687 }, 764 },
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index f5f60698dc4c..b4561c86e230 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -275,7 +275,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
275 275
276 zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL); 276 zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL);
277 zfcp_erp_wait(unit->port->adapter); 277 zfcp_erp_wait(unit->port->adapter);
278 flush_work(&unit->scsi_work); 278 zfcp_scsi_scan(unit);
279out: 279out:
280 put_device(&port->dev); 280 put_device(&port->dev);
281 return retval ? retval : (ssize_t) count; 281 return retval ? retval : (ssize_t) count;
@@ -290,6 +290,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
290 struct zfcp_unit *unit; 290 struct zfcp_unit *unit;
291 u64 fcp_lun; 291 u64 fcp_lun;
292 int retval = -EINVAL; 292 int retval = -EINVAL;
293 struct scsi_device *sdev;
293 294
294 if (!(port && get_device(&port->dev))) 295 if (!(port && get_device(&port->dev)))
295 return -EBUSY; 296 return -EBUSY;
@@ -303,8 +304,13 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
303 else 304 else
304 retval = 0; 305 retval = 0;
305 306
306 /* wait for possible timeout during SCSI probe */ 307 sdev = scsi_device_lookup(port->adapter->scsi_host, 0,
307 flush_work(&unit->scsi_work); 308 port->starget_id,
309 scsilun_to_int((struct scsi_lun *)&fcp_lun));
310 if (sdev) {
311 scsi_remove_device(sdev);
312 scsi_device_put(sdev);
313 }
308 314
309 write_lock_irq(&port->unit_list_lock); 315 write_lock_irq(&port->unit_list_lock);
310 list_del(&unit->list); 316 list_del(&unit->list);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 75f2336807cb..158284f05732 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1847,6 +1847,10 @@ config ZFCP
1847 called zfcp. If you want to compile it as a module, say M here 1847 called zfcp. If you want to compile it as a module, say M here
1848 and read <file:Documentation/kbuild/modules.txt>. 1848 and read <file:Documentation/kbuild/modules.txt>.
1849 1849
1850config ZFCP_DIF
1851 tristate "T10 DIF/DIX support for the zfcp driver (EXPERIMENTAL)"
1852 depends on ZFCP && EXPERIMENTAL
1853
1850config SCSI_PMCRAID 1854config SCSI_PMCRAID
1851 tristate "PMC SIERRA Linux MaxRAID adapter support" 1855 tristate "PMC SIERRA Linux MaxRAID adapter support"
1852 depends on PCI && SCSI 1856 depends on PCI && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 1c7ac49be649..2a3fca2eca6a 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -163,6 +163,7 @@ scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o
163scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o 163scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o
164scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o 164scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
165scsi_mod-y += scsi_trace.o 165scsi_mod-y += scsi_trace.o
166scsi_mod-$(CONFIG_PM_OPS) += scsi_pm.o
166 167
167scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o 168scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o
168 169
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 33898b61fdb5..cad6f9abaeb9 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1091,6 +1091,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
1091 struct list_head *insert = &aac_devices; 1091 struct list_head *insert = &aac_devices;
1092 int error = -ENODEV; 1092 int error = -ENODEV;
1093 int unique_id = 0; 1093 int unique_id = 0;
1094 u64 dmamask;
1094 1095
1095 list_for_each_entry(aac, &aac_devices, entry) { 1096 list_for_each_entry(aac, &aac_devices, entry) {
1096 if (aac->id > unique_id) 1097 if (aac->id > unique_id)
@@ -1104,17 +1105,18 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
1104 goto out; 1105 goto out;
1105 error = -ENODEV; 1106 error = -ENODEV;
1106 1107
1107 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
1108 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1109 goto out_disable_pdev;
1110 /* 1108 /*
1111 * If the quirk31 bit is set, the adapter needs adapter 1109 * If the quirk31 bit is set, the adapter needs adapter
1112 * to driver communication memory to be allocated below 2gig 1110 * to driver communication memory to be allocated below 2gig
1113 */ 1111 */
1114 if (aac_drivers[index].quirks & AAC_QUIRK_31BIT) 1112 if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
1115 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(31)) || 1113 dmamask = DMA_BIT_MASK(31);
1116 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(31))) 1114 else
1117 goto out_disable_pdev; 1115 dmamask = DMA_BIT_MASK(32);
1116
1117 if (pci_set_dma_mask(pdev, dmamask) ||
1118 pci_set_consistent_dma_mask(pdev, dmamask))
1119 goto out_disable_pdev;
1118 1120
1119 pci_set_master(pdev); 1121 pci_set_master(pdev);
1120 1122
diff --git a/drivers/scsi/aic7xxx/aic7770.c b/drivers/scsi/aic7xxx/aic7770.c
index 6d86a9be538f..5000bd69c13f 100644
--- a/drivers/scsi/aic7xxx/aic7770.c
+++ b/drivers/scsi/aic7xxx/aic7770.c
@@ -170,7 +170,7 @@ aic7770_config(struct ahc_softc *ahc, struct aic7770_identity *entry, u_int io)
170 case 15: 170 case 15:
171 break; 171 break;
172 default: 172 default:
173 printf("aic7770_config: invalid irq setting %d\n", intdef); 173 printk("aic7770_config: invalid irq setting %d\n", intdef);
174 return (ENXIO); 174 return (ENXIO);
175 } 175 }
176 176
@@ -221,7 +221,7 @@ aic7770_config(struct ahc_softc *ahc, struct aic7770_identity *entry, u_int io)
221 break; 221 break;
222 } 222 }
223 if (have_seeprom == 0) { 223 if (have_seeprom == 0) {
224 free(ahc->seep_config, M_DEVBUF); 224 kfree(ahc->seep_config);
225 ahc->seep_config = NULL; 225 ahc->seep_config = NULL;
226 } 226 }
227 227
@@ -293,7 +293,7 @@ aha2840_load_seeprom(struct ahc_softc *ahc)
293 sc = ahc->seep_config; 293 sc = ahc->seep_config;
294 294
295 if (bootverbose) 295 if (bootverbose)
296 printf("%s: Reading SEEPROM...", ahc_name(ahc)); 296 printk("%s: Reading SEEPROM...", ahc_name(ahc));
297 have_seeprom = ahc_read_seeprom(&sd, (uint16_t *)sc, 297 have_seeprom = ahc_read_seeprom(&sd, (uint16_t *)sc,
298 /*start_addr*/0, sizeof(*sc)/2); 298 /*start_addr*/0, sizeof(*sc)/2);
299 299
@@ -301,16 +301,16 @@ aha2840_load_seeprom(struct ahc_softc *ahc)
301 301
302 if (ahc_verify_cksum(sc) == 0) { 302 if (ahc_verify_cksum(sc) == 0) {
303 if(bootverbose) 303 if(bootverbose)
304 printf ("checksum error\n"); 304 printk ("checksum error\n");
305 have_seeprom = 0; 305 have_seeprom = 0;
306 } else if (bootverbose) { 306 } else if (bootverbose) {
307 printf("done.\n"); 307 printk("done.\n");
308 } 308 }
309 } 309 }
310 310
311 if (!have_seeprom) { 311 if (!have_seeprom) {
312 if (bootverbose) 312 if (bootverbose)
313 printf("%s: No SEEPROM available\n", ahc_name(ahc)); 313 printk("%s: No SEEPROM available\n", ahc_name(ahc));
314 ahc->flags |= AHC_USEDEFAULTS; 314 ahc->flags |= AHC_USEDEFAULTS;
315 } else { 315 } else {
316 /* 316 /*
diff --git a/drivers/scsi/aic7xxx/aic7770_osm.c b/drivers/scsi/aic7xxx/aic7770_osm.c
index f220e5e436ab..0cb8ef64b5ce 100644
--- a/drivers/scsi/aic7xxx/aic7770_osm.c
+++ b/drivers/scsi/aic7xxx/aic7770_osm.c
@@ -85,7 +85,7 @@ aic7770_probe(struct device *dev)
85 int error; 85 int error;
86 86
87 sprintf(buf, "ahc_eisa:%d", eisaBase >> 12); 87 sprintf(buf, "ahc_eisa:%d", eisaBase >> 12);
88 name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT); 88 name = kmalloc(strlen(buf) + 1, GFP_ATOMIC);
89 if (name == NULL) 89 if (name == NULL)
90 return (ENOMEM); 90 return (ENOMEM);
91 strcpy(name, buf); 91 strcpy(name, buf);
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 78971db5b60e..3233bf564435 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -289,7 +289,7 @@ ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
289 || ahd->dst_mode == AHD_MODE_UNKNOWN) 289 || ahd->dst_mode == AHD_MODE_UNKNOWN)
290 panic("Setting mode prior to saving it.\n"); 290 panic("Setting mode prior to saving it.\n");
291 if ((ahd_debug & AHD_SHOW_MODEPTR) != 0) 291 if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
292 printf("%s: Setting mode 0x%x\n", ahd_name(ahd), 292 printk("%s: Setting mode 0x%x\n", ahd_name(ahd),
293 ahd_build_mode_state(ahd, src, dst)); 293 ahd_build_mode_state(ahd, src, dst));
294#endif 294#endif
295 ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst)); 295 ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst));
@@ -307,7 +307,7 @@ ahd_update_modes(struct ahd_softc *ahd)
307 mode_ptr = ahd_inb(ahd, MODE_PTR); 307 mode_ptr = ahd_inb(ahd, MODE_PTR);
308#ifdef AHD_DEBUG 308#ifdef AHD_DEBUG
309 if ((ahd_debug & AHD_SHOW_MODEPTR) != 0) 309 if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
310 printf("Reading mode 0x%x\n", mode_ptr); 310 printk("Reading mode 0x%x\n", mode_ptr);
311#endif 311#endif
312 ahd_extract_mode_state(ahd, mode_ptr, &src, &dst); 312 ahd_extract_mode_state(ahd, mode_ptr, &src, &dst);
313 ahd_known_modes(ahd, src, dst); 313 ahd_known_modes(ahd, src, dst);
@@ -877,7 +877,7 @@ ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb)
877 uint64_t host_dataptr; 877 uint64_t host_dataptr;
878 878
879 host_dataptr = ahd_le64toh(scb->hscb->dataptr); 879 host_dataptr = ahd_le64toh(scb->hscb->dataptr);
880 printf("%s: Queueing SCB %d:0x%x bus addr 0x%x - 0x%x%x/0x%x\n", 880 printk("%s: Queueing SCB %d:0x%x bus addr 0x%x - 0x%x%x/0x%x\n",
881 ahd_name(ahd), 881 ahd_name(ahd),
882 SCB_GET_TAG(scb), scb->hscb->scsiid, 882 SCB_GET_TAG(scb), scb->hscb->scsiid,
883 ahd_le32toh(scb->hscb->hscb_busaddr), 883 ahd_le32toh(scb->hscb->hscb_busaddr),
@@ -1174,7 +1174,7 @@ ahd_clear_fifo(struct ahd_softc *ahd, u_int fifo)
1174 1174
1175#ifdef AHD_DEBUG 1175#ifdef AHD_DEBUG
1176 if ((ahd_debug & AHD_SHOW_FIFOS) != 0) 1176 if ((ahd_debug & AHD_SHOW_FIFOS) != 0)
1177 printf("%s: Clearing FIFO %d\n", ahd_name(ahd), fifo); 1177 printk("%s: Clearing FIFO %d\n", ahd_name(ahd), fifo);
1178#endif 1178#endif
1179 saved_modes = ahd_save_modes(ahd); 1179 saved_modes = ahd_save_modes(ahd);
1180 ahd_set_modes(ahd, fifo, fifo); 1180 ahd_set_modes(ahd, fifo, fifo);
@@ -1215,7 +1215,7 @@ ahd_flush_qoutfifo(struct ahd_softc *ahd)
1215 scbid = ahd_inw(ahd, GSFIFO); 1215 scbid = ahd_inw(ahd, GSFIFO);
1216 scb = ahd_lookup_scb(ahd, scbid); 1216 scb = ahd_lookup_scb(ahd, scbid);
1217 if (scb == NULL) { 1217 if (scb == NULL) {
1218 printf("%s: Warning - GSFIFO SCB %d invalid\n", 1218 printk("%s: Warning - GSFIFO SCB %d invalid\n",
1219 ahd_name(ahd), scbid); 1219 ahd_name(ahd), scbid);
1220 continue; 1220 continue;
1221 } 1221 }
@@ -1339,7 +1339,7 @@ rescan_fifos:
1339 next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); 1339 next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
1340 scb = ahd_lookup_scb(ahd, scbid); 1340 scb = ahd_lookup_scb(ahd, scbid);
1341 if (scb == NULL) { 1341 if (scb == NULL) {
1342 printf("%s: Warning - DMA-up and complete " 1342 printk("%s: Warning - DMA-up and complete "
1343 "SCB %d invalid\n", ahd_name(ahd), scbid); 1343 "SCB %d invalid\n", ahd_name(ahd), scbid);
1344 continue; 1344 continue;
1345 } 1345 }
@@ -1360,7 +1360,7 @@ rescan_fifos:
1360 next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); 1360 next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
1361 scb = ahd_lookup_scb(ahd, scbid); 1361 scb = ahd_lookup_scb(ahd, scbid);
1362 if (scb == NULL) { 1362 if (scb == NULL) {
1363 printf("%s: Warning - Complete Qfrz SCB %d invalid\n", 1363 printk("%s: Warning - Complete Qfrz SCB %d invalid\n",
1364 ahd_name(ahd), scbid); 1364 ahd_name(ahd), scbid);
1365 continue; 1365 continue;
1366 } 1366 }
@@ -1377,7 +1377,7 @@ rescan_fifos:
1377 next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); 1377 next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
1378 scb = ahd_lookup_scb(ahd, scbid); 1378 scb = ahd_lookup_scb(ahd, scbid);
1379 if (scb == NULL) { 1379 if (scb == NULL) {
1380 printf("%s: Warning - Complete SCB %d invalid\n", 1380 printk("%s: Warning - Complete SCB %d invalid\n",
1381 ahd_name(ahd), scbid); 1381 ahd_name(ahd), scbid);
1382 continue; 1382 continue;
1383 } 1383 }
@@ -1682,7 +1682,7 @@ ahd_run_qoutfifo(struct ahd_softc *ahd)
1682 scb_index = ahd_le16toh(completion->tag); 1682 scb_index = ahd_le16toh(completion->tag);
1683 scb = ahd_lookup_scb(ahd, scb_index); 1683 scb = ahd_lookup_scb(ahd, scb_index);
1684 if (scb == NULL) { 1684 if (scb == NULL) {
1685 printf("%s: WARNING no command for scb %d " 1685 printk("%s: WARNING no command for scb %d "
1686 "(cmdcmplt)\nQOUTPOS = %d\n", 1686 "(cmdcmplt)\nQOUTPOS = %d\n",
1687 ahd_name(ahd), scb_index, 1687 ahd_name(ahd), scb_index,
1688 ahd->qoutfifonext); 1688 ahd->qoutfifonext);
@@ -1714,7 +1714,7 @@ ahd_handle_hwerrint(struct ahd_softc *ahd)
1714 error = ahd_inb(ahd, ERROR); 1714 error = ahd_inb(ahd, ERROR);
1715 for (i = 0; i < num_errors; i++) { 1715 for (i = 0; i < num_errors; i++) {
1716 if ((error & ahd_hard_errors[i].errno) != 0) 1716 if ((error & ahd_hard_errors[i].errno) != 0)
1717 printf("%s: hwerrint, %s\n", 1717 printk("%s: hwerrint, %s\n",
1718 ahd_name(ahd), ahd_hard_errors[i].errmesg); 1718 ahd_name(ahd), ahd_hard_errors[i].errmesg);
1719 } 1719 }
1720 1720
@@ -1747,7 +1747,7 @@ ahd_dump_sglist(struct scb *scb)
1747 1747
1748 addr = ahd_le64toh(sg_list[i].addr); 1748 addr = ahd_le64toh(sg_list[i].addr);
1749 len = ahd_le32toh(sg_list[i].len); 1749 len = ahd_le32toh(sg_list[i].len);
1750 printf("sg[%d] - Addr 0x%x%x : Length %d%s\n", 1750 printk("sg[%d] - Addr 0x%x%x : Length %d%s\n",
1751 i, 1751 i,
1752 (uint32_t)((addr >> 32) & 0xFFFFFFFF), 1752 (uint32_t)((addr >> 32) & 0xFFFFFFFF),
1753 (uint32_t)(addr & 0xFFFFFFFF), 1753 (uint32_t)(addr & 0xFFFFFFFF),
@@ -1763,7 +1763,7 @@ ahd_dump_sglist(struct scb *scb)
1763 uint32_t len; 1763 uint32_t len;
1764 1764
1765 len = ahd_le32toh(sg_list[i].len); 1765 len = ahd_le32toh(sg_list[i].len);
1766 printf("sg[%d] - Addr 0x%x%x : Length %d%s\n", 1766 printk("sg[%d] - Addr 0x%x%x : Length %d%s\n",
1767 i, 1767 i,
1768 (len & AHD_SG_HIGH_ADDR_MASK) >> 24, 1768 (len & AHD_SG_HIGH_ADDR_MASK) >> 24,
1769 ahd_le32toh(sg_list[i].addr), 1769 ahd_le32toh(sg_list[i].addr),
@@ -1802,7 +1802,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
1802 ahd_update_modes(ahd); 1802 ahd_update_modes(ahd);
1803#ifdef AHD_DEBUG 1803#ifdef AHD_DEBUG
1804 if ((ahd_debug & AHD_SHOW_MISC) != 0) 1804 if ((ahd_debug & AHD_SHOW_MISC) != 0)
1805 printf("%s: Handle Seqint Called for code %d\n", 1805 printk("%s: Handle Seqint Called for code %d\n",
1806 ahd_name(ahd), seqintcode); 1806 ahd_name(ahd), seqintcode);
1807#endif 1807#endif
1808 switch (seqintcode) { 1808 switch (seqintcode) {
@@ -1836,18 +1836,18 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
1836 */ 1836 */
1837#ifdef AHD_DEBUG 1837#ifdef AHD_DEBUG
1838 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) 1838 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
1839 printf("%s: Assuming LQIPHASE_NLQ with " 1839 printk("%s: Assuming LQIPHASE_NLQ with "
1840 "P0 assertion\n", ahd_name(ahd)); 1840 "P0 assertion\n", ahd_name(ahd));
1841#endif 1841#endif
1842 } 1842 }
1843#ifdef AHD_DEBUG 1843#ifdef AHD_DEBUG
1844 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) 1844 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
1845 printf("%s: Entering NONPACK\n", ahd_name(ahd)); 1845 printk("%s: Entering NONPACK\n", ahd_name(ahd));
1846#endif 1846#endif
1847 break; 1847 break;
1848 } 1848 }
1849 case INVALID_SEQINT: 1849 case INVALID_SEQINT:
1850 printf("%s: Invalid Sequencer interrupt occurred, " 1850 printk("%s: Invalid Sequencer interrupt occurred, "
1851 "resetting channel.\n", 1851 "resetting channel.\n",
1852 ahd_name(ahd)); 1852 ahd_name(ahd));
1853#ifdef AHD_DEBUG 1853#ifdef AHD_DEBUG
@@ -1866,8 +1866,8 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
1866 if (scb != NULL) 1866 if (scb != NULL)
1867 ahd_print_path(ahd, scb); 1867 ahd_print_path(ahd, scb);
1868 else 1868 else
1869 printf("%s: ", ahd_name(ahd)); 1869 printk("%s: ", ahd_name(ahd));
1870 printf("SCB %d Packetized Status Overrun", scbid); 1870 printk("SCB %d Packetized Status Overrun", scbid);
1871 ahd_dump_card_state(ahd); 1871 ahd_dump_card_state(ahd);
1872 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 1872 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1873 break; 1873 break;
@@ -1881,7 +1881,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
1881 scb = ahd_lookup_scb(ahd, scbid); 1881 scb = ahd_lookup_scb(ahd, scbid);
1882 if (scb == NULL) { 1882 if (scb == NULL) {
1883 ahd_dump_card_state(ahd); 1883 ahd_dump_card_state(ahd);
1884 printf("CFG4ISTAT: Free SCB %d referenced", scbid); 1884 printk("CFG4ISTAT: Free SCB %d referenced", scbid);
1885 panic("For safety"); 1885 panic("For safety");
1886 } 1886 }
1887 ahd_outq(ahd, HADDR, scb->sense_busaddr); 1887 ahd_outq(ahd, HADDR, scb->sense_busaddr);
@@ -1896,7 +1896,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
1896 u_int bus_phase; 1896 u_int bus_phase;
1897 1897
1898 bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; 1898 bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
1899 printf("%s: ILLEGAL_PHASE 0x%x\n", 1899 printk("%s: ILLEGAL_PHASE 0x%x\n",
1900 ahd_name(ahd), bus_phase); 1900 ahd_name(ahd), bus_phase);
1901 1901
1902 switch (bus_phase) { 1902 switch (bus_phase) {
@@ -1908,7 +1908,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
1908 case P_STATUS: 1908 case P_STATUS:
1909 case P_MESGIN: 1909 case P_MESGIN:
1910 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 1910 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1911 printf("%s: Issued Bus Reset.\n", ahd_name(ahd)); 1911 printk("%s: Issued Bus Reset.\n", ahd_name(ahd));
1912 break; 1912 break;
1913 case P_COMMAND: 1913 case P_COMMAND:
1914 { 1914 {
@@ -1933,7 +1933,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
1933 scbid = ahd_get_scbptr(ahd); 1933 scbid = ahd_get_scbptr(ahd);
1934 scb = ahd_lookup_scb(ahd, scbid); 1934 scb = ahd_lookup_scb(ahd, scbid);
1935 if (scb == NULL) { 1935 if (scb == NULL) {
1936 printf("Invalid phase with no valid SCB. " 1936 printk("Invalid phase with no valid SCB. "
1937 "Resetting bus.\n"); 1937 "Resetting bus.\n");
1938 ahd_reset_channel(ahd, 'A', 1938 ahd_reset_channel(ahd, 'A',
1939 /*Initiate Reset*/TRUE); 1939 /*Initiate Reset*/TRUE);
@@ -1997,7 +1997,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
1997#ifdef AHD_DEBUG 1997#ifdef AHD_DEBUG
1998 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { 1998 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
1999 ahd_print_path(ahd, scb); 1999 ahd_print_path(ahd, scb);
2000 printf("Unexpected command phase from " 2000 printk("Unexpected command phase from "
2001 "packetized target\n"); 2001 "packetized target\n");
2002 } 2002 }
2003#endif 2003#endif
@@ -2013,7 +2013,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
2013 2013
2014#ifdef AHD_DEBUG 2014#ifdef AHD_DEBUG
2015 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { 2015 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
2016 printf("%s: CFG4OVERRUN mode = %x\n", ahd_name(ahd), 2016 printk("%s: CFG4OVERRUN mode = %x\n", ahd_name(ahd),
2017 ahd_inb(ahd, MODE_PTR)); 2017 ahd_inb(ahd, MODE_PTR));
2018 } 2018 }
2019#endif 2019#endif
@@ -2049,7 +2049,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
2049 { 2049 {
2050#ifdef AHD_DEBUG 2050#ifdef AHD_DEBUG
2051 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { 2051 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
2052 printf("%s: PDATA_REINIT - DFCNTRL = 0x%x " 2052 printk("%s: PDATA_REINIT - DFCNTRL = 0x%x "
2053 "SG_CACHE_SHADOW = 0x%x\n", 2053 "SG_CACHE_SHADOW = 0x%x\n",
2054 ahd_name(ahd), ahd_inb(ahd, DFCNTRL), 2054 ahd_name(ahd), ahd_inb(ahd, DFCNTRL),
2055 ahd_inb(ahd, SG_CACHE_SHADOW)); 2055 ahd_inb(ahd, SG_CACHE_SHADOW));
@@ -2082,7 +2082,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
2082 bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; 2082 bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
2083 if (bus_phase != P_MESGIN 2083 if (bus_phase != P_MESGIN
2084 && bus_phase != P_MESGOUT) { 2084 && bus_phase != P_MESGOUT) {
2085 printf("ahd_intr: HOST_MSG_LOOP bad " 2085 printk("ahd_intr: HOST_MSG_LOOP bad "
2086 "phase 0x%x\n", bus_phase); 2086 "phase 0x%x\n", bus_phase);
2087 /* 2087 /*
2088 * Probably transitioned to bus free before 2088 * Probably transitioned to bus free before
@@ -2131,29 +2131,29 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
2131 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 2131 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
2132 ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); 2132 ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
2133 2133
2134 printf("%s:%c:%d: no active SCB for reconnecting " 2134 printk("%s:%c:%d: no active SCB for reconnecting "
2135 "target - issuing BUS DEVICE RESET\n", 2135 "target - issuing BUS DEVICE RESET\n",
2136 ahd_name(ahd), 'A', ahd_inb(ahd, SELID) >> 4); 2136 ahd_name(ahd), 'A', ahd_inb(ahd, SELID) >> 4);
2137 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " 2137 printk("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
2138 "REG0 == 0x%x ACCUM = 0x%x\n", 2138 "REG0 == 0x%x ACCUM = 0x%x\n",
2139 ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN), 2139 ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN),
2140 ahd_inw(ahd, REG0), ahd_inb(ahd, ACCUM)); 2140 ahd_inw(ahd, REG0), ahd_inb(ahd, ACCUM));
2141 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " 2141 printk("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
2142 "SINDEX == 0x%x\n", 2142 "SINDEX == 0x%x\n",
2143 ahd_inb(ahd, SEQ_FLAGS), ahd_get_scbptr(ahd), 2143 ahd_inb(ahd, SEQ_FLAGS), ahd_get_scbptr(ahd),
2144 ahd_find_busy_tcl(ahd, 2144 ahd_find_busy_tcl(ahd,
2145 BUILD_TCL(ahd_inb(ahd, SAVED_SCSIID), 2145 BUILD_TCL(ahd_inb(ahd, SAVED_SCSIID),
2146 ahd_inb(ahd, SAVED_LUN))), 2146 ahd_inb(ahd, SAVED_LUN))),
2147 ahd_inw(ahd, SINDEX)); 2147 ahd_inw(ahd, SINDEX));
2148 printf("SELID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " 2148 printk("SELID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
2149 "SCB_CONTROL == 0x%x\n", 2149 "SCB_CONTROL == 0x%x\n",
2150 ahd_inb(ahd, SELID), ahd_inb_scbram(ahd, SCB_SCSIID), 2150 ahd_inb(ahd, SELID), ahd_inb_scbram(ahd, SCB_SCSIID),
2151 ahd_inb_scbram(ahd, SCB_LUN), 2151 ahd_inb_scbram(ahd, SCB_LUN),
2152 ahd_inb_scbram(ahd, SCB_CONTROL)); 2152 ahd_inb_scbram(ahd, SCB_CONTROL));
2153 printf("SCSIBUS[0] == 0x%x, SCSISIGI == 0x%x\n", 2153 printk("SCSIBUS[0] == 0x%x, SCSISIGI == 0x%x\n",
2154 ahd_inb(ahd, SCSIBUS), ahd_inb(ahd, SCSISIGI)); 2154 ahd_inb(ahd, SCSIBUS), ahd_inb(ahd, SCSISIGI));
2155 printf("SXFRCTL0 == 0x%x\n", ahd_inb(ahd, SXFRCTL0)); 2155 printk("SXFRCTL0 == 0x%x\n", ahd_inb(ahd, SXFRCTL0));
2156 printf("SEQCTL0 == 0x%x\n", ahd_inb(ahd, SEQCTL0)); 2156 printk("SEQCTL0 == 0x%x\n", ahd_inb(ahd, SEQCTL0));
2157 ahd_dump_card_state(ahd); 2157 ahd_dump_card_state(ahd);
2158 ahd->msgout_buf[0] = MSG_BUS_DEV_RESET; 2158 ahd->msgout_buf[0] = MSG_BUS_DEV_RESET;
2159 ahd->msgout_len = 1; 2159 ahd->msgout_len = 1;
@@ -2181,7 +2181,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
2181 u_int lastphase; 2181 u_int lastphase;
2182 2182
2183 lastphase = ahd_inb(ahd, LASTPHASE); 2183 lastphase = ahd_inb(ahd, LASTPHASE);
2184 printf("%s:%c:%d: unknown scsi bus phase %x, " 2184 printk("%s:%c:%d: unknown scsi bus phase %x, "
2185 "lastphase = 0x%x. Attempting to continue\n", 2185 "lastphase = 0x%x. Attempting to continue\n",
2186 ahd_name(ahd), 'A', 2186 ahd_name(ahd), 'A',
2187 SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)), 2187 SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)),
@@ -2193,7 +2193,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
2193 u_int lastphase; 2193 u_int lastphase;
2194 2194
2195 lastphase = ahd_inb(ahd, LASTPHASE); 2195 lastphase = ahd_inb(ahd, LASTPHASE);
2196 printf("%s:%c:%d: Missed busfree. " 2196 printk("%s:%c:%d: Missed busfree. "
2197 "Lastphase = 0x%x, Curphase = 0x%x\n", 2197 "Lastphase = 0x%x, Curphase = 0x%x\n",
2198 ahd_name(ahd), 'A', 2198 ahd_name(ahd), 'A',
2199 SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)), 2199 SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)),
@@ -2223,11 +2223,11 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
2223 lastphase = ahd_inb(ahd, LASTPHASE); 2223 lastphase = ahd_inb(ahd, LASTPHASE);
2224 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { 2224 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
2225 ahd_print_path(ahd, scb); 2225 ahd_print_path(ahd, scb);
2226 printf("data overrun detected %s. Tag == 0x%x.\n", 2226 printk("data overrun detected %s. Tag == 0x%x.\n",
2227 ahd_lookup_phase_entry(lastphase)->phasemsg, 2227 ahd_lookup_phase_entry(lastphase)->phasemsg,
2228 SCB_GET_TAG(scb)); 2228 SCB_GET_TAG(scb));
2229 ahd_print_path(ahd, scb); 2229 ahd_print_path(ahd, scb);
2230 printf("%s seen Data Phase. Length = %ld. " 2230 printk("%s seen Data Phase. Length = %ld. "
2231 "NumSGs = %d.\n", 2231 "NumSGs = %d.\n",
2232 ahd_inb(ahd, SEQ_FLAGS) & DPHASE 2232 ahd_inb(ahd, SEQ_FLAGS) & DPHASE
2233 ? "Have" : "Haven't", 2233 ? "Have" : "Haven't",
@@ -2252,7 +2252,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
2252 u_int scbid; 2252 u_int scbid;
2253 2253
2254 ahd_fetch_devinfo(ahd, &devinfo); 2254 ahd_fetch_devinfo(ahd, &devinfo);
2255 printf("%s:%c:%d:%d: Attempt to issue message failed\n", 2255 printk("%s:%c:%d:%d: Attempt to issue message failed\n",
2256 ahd_name(ahd), devinfo.channel, devinfo.target, 2256 ahd_name(ahd), devinfo.channel, devinfo.target,
2257 devinfo.lun); 2257 devinfo.lun);
2258 scbid = ahd_get_scbptr(ahd); 2258 scbid = ahd_get_scbptr(ahd);
@@ -2285,7 +2285,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
2285 cam_status error; 2285 cam_status error;
2286 2286
2287 ahd_print_path(ahd, scb); 2287 ahd_print_path(ahd, scb);
2288 printf("Task Management Func 0x%x Complete\n", 2288 printk("Task Management Func 0x%x Complete\n",
2289 scb->hscb->task_management); 2289 scb->hscb->task_management);
2290 lun = CAM_LUN_WILDCARD; 2290 lun = CAM_LUN_WILDCARD;
2291 tag = SCB_LIST_NULL; 2291 tag = SCB_LIST_NULL;
@@ -2341,7 +2341,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
2341 * the QINFIFO if it is still there. 2341 * the QINFIFO if it is still there.
2342 */ 2342 */
2343 ahd_print_path(ahd, scb); 2343 ahd_print_path(ahd, scb);
2344 printf("SCB completes before TMF\n"); 2344 printk("SCB completes before TMF\n");
2345 /* 2345 /*
2346 * Handle losing the race. Wait until any 2346 * Handle losing the race. Wait until any
2347 * current selection completes. We will then 2347 * current selection completes. We will then
@@ -2366,7 +2366,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
2366 case TRACEPOINT1: 2366 case TRACEPOINT1:
2367 case TRACEPOINT2: 2367 case TRACEPOINT2:
2368 case TRACEPOINT3: 2368 case TRACEPOINT3:
2369 printf("%s: Tracepoint %d\n", ahd_name(ahd), 2369 printk("%s: Tracepoint %d\n", ahd_name(ahd),
2370 seqintcode - TRACEPOINT0); 2370 seqintcode - TRACEPOINT0);
2371 break; 2371 break;
2372 case NO_SEQINT: 2372 case NO_SEQINT:
@@ -2375,7 +2375,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
2375 ahd_handle_hwerrint(ahd); 2375 ahd_handle_hwerrint(ahd);
2376 break; 2376 break;
2377 default: 2377 default:
2378 printf("%s: Unexpected SEQINTCODE %d\n", ahd_name(ahd), 2378 printk("%s: Unexpected SEQINTCODE %d\n", ahd_name(ahd),
2379 seqintcode); 2379 seqintcode);
2380 break; 2380 break;
2381 } 2381 }
@@ -2440,7 +2440,7 @@ ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
2440 u_int now_lvd; 2440 u_int now_lvd;
2441 2441
2442 now_lvd = ahd_inb(ahd, SBLKCTL) & ENAB40; 2442 now_lvd = ahd_inb(ahd, SBLKCTL) & ENAB40;
2443 printf("%s: Transceiver State Has Changed to %s mode\n", 2443 printk("%s: Transceiver State Has Changed to %s mode\n",
2444 ahd_name(ahd), now_lvd ? "LVD" : "SE"); 2444 ahd_name(ahd), now_lvd ? "LVD" : "SE");
2445 ahd_outb(ahd, CLRSINT0, CLRIOERR); 2445 ahd_outb(ahd, CLRSINT0, CLRIOERR);
2446 /* 2446 /*
@@ -2452,12 +2452,12 @@ ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
2452 ahd_unpause(ahd); 2452 ahd_unpause(ahd);
2453 } else if ((status0 & OVERRUN) != 0) { 2453 } else if ((status0 & OVERRUN) != 0) {
2454 2454
2455 printf("%s: SCSI offset overrun detected. Resetting bus.\n", 2455 printk("%s: SCSI offset overrun detected. Resetting bus.\n",
2456 ahd_name(ahd)); 2456 ahd_name(ahd));
2457 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 2457 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
2458 } else if ((status & SCSIRSTI) != 0) { 2458 } else if ((status & SCSIRSTI) != 0) {
2459 2459
2460 printf("%s: Someone reset channel A\n", ahd_name(ahd)); 2460 printk("%s: Someone reset channel A\n", ahd_name(ahd));
2461 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/FALSE); 2461 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/FALSE);
2462 } else if ((status & SCSIPERR) != 0) { 2462 } else if ((status & SCSIPERR) != 0) {
2463 2463
@@ -2467,7 +2467,7 @@ ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
2467 ahd_handle_transmission_error(ahd); 2467 ahd_handle_transmission_error(ahd);
2468 } else if (lqostat0 != 0) { 2468 } else if (lqostat0 != 0) {
2469 2469
2470 printf("%s: lqostat0 == 0x%x!\n", ahd_name(ahd), lqostat0); 2470 printk("%s: lqostat0 == 0x%x!\n", ahd_name(ahd), lqostat0);
2471 ahd_outb(ahd, CLRLQOINT0, lqostat0); 2471 ahd_outb(ahd, CLRLQOINT0, lqostat0);
2472 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) 2472 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0)
2473 ahd_outb(ahd, CLRLQOINT1, 0); 2473 ahd_outb(ahd, CLRLQOINT1, 0);
@@ -2497,7 +2497,7 @@ ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
2497 scbid = ahd_inw(ahd, WAITING_TID_HEAD); 2497 scbid = ahd_inw(ahd, WAITING_TID_HEAD);
2498 scb = ahd_lookup_scb(ahd, scbid); 2498 scb = ahd_lookup_scb(ahd, scbid);
2499 if (scb == NULL) { 2499 if (scb == NULL) {
2500 printf("%s: ahd_intr - referenced scb not " 2500 printk("%s: ahd_intr - referenced scb not "
2501 "valid during SELTO scb(0x%x)\n", 2501 "valid during SELTO scb(0x%x)\n",
2502 ahd_name(ahd), scbid); 2502 ahd_name(ahd), scbid);
2503 ahd_dump_card_state(ahd); 2503 ahd_dump_card_state(ahd);
@@ -2506,7 +2506,7 @@ ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
2506#ifdef AHD_DEBUG 2506#ifdef AHD_DEBUG
2507 if ((ahd_debug & AHD_SHOW_SELTO) != 0) { 2507 if ((ahd_debug & AHD_SHOW_SELTO) != 0) {
2508 ahd_print_path(ahd, scb); 2508 ahd_print_path(ahd, scb);
2509 printf("Saw Selection Timeout for SCB 0x%x\n", 2509 printk("Saw Selection Timeout for SCB 0x%x\n",
2510 scbid); 2510 scbid);
2511 } 2511 }
2512#endif 2512#endif
@@ -2534,7 +2534,7 @@ ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
2534 ahd_iocell_first_selection(ahd); 2534 ahd_iocell_first_selection(ahd);
2535 ahd_unpause(ahd); 2535 ahd_unpause(ahd);
2536 } else if (status3 != 0) { 2536 } else if (status3 != 0) {
2537 printf("%s: SCSI Cell parity error SSTAT3 == 0x%x\n", 2537 printk("%s: SCSI Cell parity error SSTAT3 == 0x%x\n",
2538 ahd_name(ahd), status3); 2538 ahd_name(ahd), status3);
2539 ahd_outb(ahd, CLRSINT3, status3); 2539 ahd_outb(ahd, CLRSINT3, status3);
2540 } else if ((lqistat1 & (LQIPHASE_LQ|LQIPHASE_NLQ)) != 0) { 2540 } else if ((lqistat1 & (LQIPHASE_LQ|LQIPHASE_NLQ)) != 0) {
@@ -2587,7 +2587,7 @@ ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
2587 scbid = ahd_get_scbptr(ahd); 2587 scbid = ahd_get_scbptr(ahd);
2588 scb = ahd_lookup_scb(ahd, scbid); 2588 scb = ahd_lookup_scb(ahd, scbid);
2589 if (scb == NULL) { 2589 if (scb == NULL) {
2590 printf("%s: Invalid SCB %d in DFF%d " 2590 printk("%s: Invalid SCB %d in DFF%d "
2591 "during unexpected busfree\n", 2591 "during unexpected busfree\n",
2592 ahd_name(ahd), scbid, mode); 2592 ahd_name(ahd), scbid, mode);
2593 packetized = 0; 2593 packetized = 0;
@@ -2620,7 +2620,7 @@ ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
2620 2620
2621#ifdef AHD_DEBUG 2621#ifdef AHD_DEBUG
2622 if ((ahd_debug & AHD_SHOW_MISC) != 0) 2622 if ((ahd_debug & AHD_SHOW_MISC) != 0)
2623 printf("Saw Busfree. Busfreetime = 0x%x.\n", 2623 printk("Saw Busfree. Busfreetime = 0x%x.\n",
2624 busfreetime); 2624 busfreetime);
2625#endif 2625#endif
2626 /* 2626 /*
@@ -2661,7 +2661,7 @@ ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
2661 ahd_unpause(ahd); 2661 ahd_unpause(ahd);
2662 } 2662 }
2663 } else { 2663 } else {
2664 printf("%s: Missing case in ahd_handle_scsiint. status = %x\n", 2664 printk("%s: Missing case in ahd_handle_scsiint. status = %x\n",
2665 ahd_name(ahd), status); 2665 ahd_name(ahd), status);
2666 ahd_dump_card_state(ahd); 2666 ahd_dump_card_state(ahd);
2667 ahd_clear_intstat(ahd); 2667 ahd_clear_intstat(ahd);
@@ -2697,7 +2697,7 @@ ahd_handle_transmission_error(struct ahd_softc *ahd)
2697 || (lqistate == 0x29)) { 2697 || (lqistate == 0x29)) {
2698#ifdef AHD_DEBUG 2698#ifdef AHD_DEBUG
2699 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { 2699 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
2700 printf("%s: NLQCRC found via LQISTATE\n", 2700 printk("%s: NLQCRC found via LQISTATE\n",
2701 ahd_name(ahd)); 2701 ahd_name(ahd));
2702 } 2702 }
2703#endif 2703#endif
@@ -2729,18 +2729,18 @@ ahd_handle_transmission_error(struct ahd_softc *ahd)
2729 2729
2730 cur_col = 0; 2730 cur_col = 0;
2731 if (silent == FALSE) { 2731 if (silent == FALSE) {
2732 printf("%s: Transmission error detected\n", ahd_name(ahd)); 2732 printk("%s: Transmission error detected\n", ahd_name(ahd));
2733 ahd_lqistat1_print(lqistat1, &cur_col, 50); 2733 ahd_lqistat1_print(lqistat1, &cur_col, 50);
2734 ahd_lastphase_print(lastphase, &cur_col, 50); 2734 ahd_lastphase_print(lastphase, &cur_col, 50);
2735 ahd_scsisigi_print(curphase, &cur_col, 50); 2735 ahd_scsisigi_print(curphase, &cur_col, 50);
2736 ahd_perrdiag_print(perrdiag, &cur_col, 50); 2736 ahd_perrdiag_print(perrdiag, &cur_col, 50);
2737 printf("\n"); 2737 printk("\n");
2738 ahd_dump_card_state(ahd); 2738 ahd_dump_card_state(ahd);
2739 } 2739 }
2740 2740
2741 if ((lqistat1 & (LQIOVERI_LQ|LQIOVERI_NLQ)) != 0) { 2741 if ((lqistat1 & (LQIOVERI_LQ|LQIOVERI_NLQ)) != 0) {
2742 if (silent == FALSE) { 2742 if (silent == FALSE) {
2743 printf("%s: Gross protocol error during incoming " 2743 printk("%s: Gross protocol error during incoming "
2744 "packet. lqistat1 == 0x%x. Resetting bus.\n", 2744 "packet. lqistat1 == 0x%x. Resetting bus.\n",
2745 ahd_name(ahd), lqistat1); 2745 ahd_name(ahd), lqistat1);
2746 } 2746 }
@@ -2769,7 +2769,7 @@ ahd_handle_transmission_error(struct ahd_softc *ahd)
2769 * (SPI4R09 10.7.3.3.3) 2769 * (SPI4R09 10.7.3.3.3)
2770 */ 2770 */
2771 ahd_outb(ahd, LQCTL2, LQIRETRY); 2771 ahd_outb(ahd, LQCTL2, LQIRETRY);
2772 printf("LQIRetry for LQICRCI_LQ to release ACK\n"); 2772 printk("LQIRetry for LQICRCI_LQ to release ACK\n");
2773 } else if ((lqistat1 & LQICRCI_NLQ) != 0) { 2773 } else if ((lqistat1 & LQICRCI_NLQ) != 0) {
2774 /* 2774 /*
2775 * We detected a CRC error in a NON-LQ packet. 2775 * We detected a CRC error in a NON-LQ packet.
@@ -2817,22 +2817,22 @@ ahd_handle_transmission_error(struct ahd_softc *ahd)
2817 * Busfree detection is enabled. 2817 * Busfree detection is enabled.
2818 */ 2818 */
2819 if (silent == FALSE) 2819 if (silent == FALSE)
2820 printf("LQICRC_NLQ\n"); 2820 printk("LQICRC_NLQ\n");
2821 if (scb == NULL) { 2821 if (scb == NULL) {
2822 printf("%s: No SCB valid for LQICRC_NLQ. " 2822 printk("%s: No SCB valid for LQICRC_NLQ. "
2823 "Resetting bus\n", ahd_name(ahd)); 2823 "Resetting bus\n", ahd_name(ahd));
2824 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 2824 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
2825 return; 2825 return;
2826 } 2826 }
2827 } else if ((lqistat1 & LQIBADLQI) != 0) { 2827 } else if ((lqistat1 & LQIBADLQI) != 0) {
2828 printf("Need to handle BADLQI!\n"); 2828 printk("Need to handle BADLQI!\n");
2829 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 2829 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
2830 return; 2830 return;
2831 } else if ((perrdiag & (PARITYERR|PREVPHASE)) == PARITYERR) { 2831 } else if ((perrdiag & (PARITYERR|PREVPHASE)) == PARITYERR) {
2832 if ((curphase & ~P_DATAIN_DT) != 0) { 2832 if ((curphase & ~P_DATAIN_DT) != 0) {
2833 /* Ack the byte. So we can continue. */ 2833 /* Ack the byte. So we can continue. */
2834 if (silent == FALSE) 2834 if (silent == FALSE)
2835 printf("Acking %s to clear perror\n", 2835 printk("Acking %s to clear perror\n",
2836 ahd_lookup_phase_entry(curphase)->phasemsg); 2836 ahd_lookup_phase_entry(curphase)->phasemsg);
2837 ahd_inb(ahd, SCSIDAT); 2837 ahd_inb(ahd, SCSIDAT);
2838 } 2838 }
@@ -2877,10 +2877,10 @@ ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1)
2877 if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0 2877 if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0
2878 && (ahd_inb(ahd, MDFFSTAT) & DLZERO) != 0) { 2878 && (ahd_inb(ahd, MDFFSTAT) & DLZERO) != 0) {
2879 if ((lqistat1 & LQIPHASE_LQ) != 0) { 2879 if ((lqistat1 & LQIPHASE_LQ) != 0) {
2880 printf("LQIRETRY for LQIPHASE_LQ\n"); 2880 printk("LQIRETRY for LQIPHASE_LQ\n");
2881 ahd_outb(ahd, LQCTL2, LQIRETRY); 2881 ahd_outb(ahd, LQCTL2, LQIRETRY);
2882 } else if ((lqistat1 & LQIPHASE_NLQ) != 0) { 2882 } else if ((lqistat1 & LQIPHASE_NLQ) != 0) {
2883 printf("LQIRETRY for LQIPHASE_NLQ\n"); 2883 printk("LQIRETRY for LQIPHASE_NLQ\n");
2884 ahd_outb(ahd, LQCTL2, LQIRETRY); 2884 ahd_outb(ahd, LQCTL2, LQIRETRY);
2885 } else 2885 } else
2886 panic("ahd_handle_lqiphase_error: No phase errors\n"); 2886 panic("ahd_handle_lqiphase_error: No phase errors\n");
@@ -2888,7 +2888,7 @@ ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1)
2888 ahd_outb(ahd, CLRINT, CLRSCSIINT); 2888 ahd_outb(ahd, CLRINT, CLRSCSIINT);
2889 ahd_unpause(ahd); 2889 ahd_unpause(ahd);
2890 } else { 2890 } else {
2891 printf("Reseting Channel for LQI Phase error\n"); 2891 printk("Reseting Channel for LQI Phase error\n");
2892 ahd_dump_card_state(ahd); 2892 ahd_dump_card_state(ahd);
2893 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 2893 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
2894 } 2894 }
@@ -2976,7 +2976,7 @@ ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime)
2976 if (scb->crc_retry_count < AHD_MAX_LQ_CRC_ERRORS) { 2976 if (scb->crc_retry_count < AHD_MAX_LQ_CRC_ERRORS) {
2977 if (SCB_IS_SILENT(scb) == FALSE) { 2977 if (SCB_IS_SILENT(scb) == FALSE) {
2978 ahd_print_path(ahd, scb); 2978 ahd_print_path(ahd, scb);
2979 printf("Probable outgoing LQ CRC error. " 2979 printk("Probable outgoing LQ CRC error. "
2980 "Retrying command\n"); 2980 "Retrying command\n");
2981 } 2981 }
2982 scb->crc_retry_count++; 2982 scb->crc_retry_count++;
@@ -2998,7 +2998,7 @@ ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime)
2998 ahd_outb(ahd, CLRSINT1, CLRSCSIPERR|CLRBUSFREE); 2998 ahd_outb(ahd, CLRSINT1, CLRSCSIPERR|CLRBUSFREE);
2999#ifdef AHD_DEBUG 2999#ifdef AHD_DEBUG
3000 if ((ahd_debug & AHD_SHOW_MASKED_ERRORS) != 0) 3000 if ((ahd_debug & AHD_SHOW_MASKED_ERRORS) != 0)
3001 printf("%s: Parity on last REQ detected " 3001 printk("%s: Parity on last REQ detected "
3002 "during busfree phase.\n", 3002 "during busfree phase.\n",
3003 ahd_name(ahd)); 3003 ahd_name(ahd));
3004#endif 3004#endif
@@ -3012,7 +3012,7 @@ ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime)
3012 scbid = ahd_get_scbptr(ahd); 3012 scbid = ahd_get_scbptr(ahd);
3013 scb = ahd_lookup_scb(ahd, scbid); 3013 scb = ahd_lookup_scb(ahd, scbid);
3014 ahd_print_path(ahd, scb); 3014 ahd_print_path(ahd, scb);
3015 printf("Unexpected PKT busfree condition\n"); 3015 printk("Unexpected PKT busfree condition\n");
3016 ahd_dump_card_state(ahd); 3016 ahd_dump_card_state(ahd);
3017 ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A', 3017 ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A',
3018 SCB_GET_LUN(scb), SCB_GET_TAG(scb), 3018 SCB_GET_LUN(scb), SCB_GET_TAG(scb),
@@ -3021,7 +3021,7 @@ ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime)
3021 /* Return restarting the sequencer. */ 3021 /* Return restarting the sequencer. */
3022 return (1); 3022 return (1);
3023 } 3023 }
3024 printf("%s: Unexpected PKT busfree condition\n", ahd_name(ahd)); 3024 printk("%s: Unexpected PKT busfree condition\n", ahd_name(ahd));
3025 ahd_dump_card_state(ahd); 3025 ahd_dump_card_state(ahd);
3026 /* Restart the sequencer. */ 3026 /* Restart the sequencer. */
3027 return (1); 3027 return (1);
@@ -3076,14 +3076,14 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
3076 3076
3077 if (scb == NULL) { 3077 if (scb == NULL) {
3078 ahd_print_devinfo(ahd, &devinfo); 3078 ahd_print_devinfo(ahd, &devinfo);
3079 printf("Abort for unidentified " 3079 printk("Abort for unidentified "
3080 "connection completed.\n"); 3080 "connection completed.\n");
3081 /* restart the sequencer. */ 3081 /* restart the sequencer. */
3082 return (1); 3082 return (1);
3083 } 3083 }
3084 sent_msg = ahd->msgout_buf[ahd->msgout_index - 1]; 3084 sent_msg = ahd->msgout_buf[ahd->msgout_index - 1];
3085 ahd_print_path(ahd, scb); 3085 ahd_print_path(ahd, scb);
3086 printf("SCB %d - Abort%s Completed.\n", 3086 printk("SCB %d - Abort%s Completed.\n",
3087 SCB_GET_TAG(scb), 3087 SCB_GET_TAG(scb),
3088 sent_msg == MSG_ABORT_TAG ? "" : " Tag"); 3088 sent_msg == MSG_ABORT_TAG ? "" : " Tag");
3089 3089
@@ -3109,7 +3109,7 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
3109 found = ahd_abort_scbs(ahd, target, 'A', saved_lun, 3109 found = ahd_abort_scbs(ahd, target, 'A', saved_lun,
3110 tag, ROLE_INITIATOR, 3110 tag, ROLE_INITIATOR,
3111 CAM_REQ_ABORTED); 3111 CAM_REQ_ABORTED);
3112 printf("found == 0x%x\n", found); 3112 printk("found == 0x%x\n", found);
3113 printerror = 0; 3113 printerror = 0;
3114 } else if (ahd_sent_msg(ahd, AHDMSG_1B, 3114 } else if (ahd_sent_msg(ahd, AHDMSG_1B,
3115 MSG_BUS_DEV_RESET, TRUE)) { 3115 MSG_BUS_DEV_RESET, TRUE)) {
@@ -3147,7 +3147,7 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
3147 */ 3147 */
3148#ifdef AHD_DEBUG 3148#ifdef AHD_DEBUG
3149 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 3149 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
3150 printf("PPR negotiation rejected busfree.\n"); 3150 printk("PPR negotiation rejected busfree.\n");
3151#endif 3151#endif
3152 tinfo = ahd_fetch_transinfo(ahd, devinfo.channel, 3152 tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
3153 devinfo.our_scsiid, 3153 devinfo.our_scsiid,
@@ -3191,7 +3191,7 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
3191 */ 3191 */
3192#ifdef AHD_DEBUG 3192#ifdef AHD_DEBUG
3193 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 3193 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
3194 printf("WDTR negotiation rejected busfree.\n"); 3194 printk("WDTR negotiation rejected busfree.\n");
3195#endif 3195#endif
3196 ahd_set_width(ahd, &devinfo, 3196 ahd_set_width(ahd, &devinfo,
3197 MSG_EXT_WDTR_BUS_8_BIT, 3197 MSG_EXT_WDTR_BUS_8_BIT,
@@ -3216,7 +3216,7 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
3216 */ 3216 */
3217#ifdef AHD_DEBUG 3217#ifdef AHD_DEBUG
3218 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 3218 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
3219 printf("SDTR negotiation rejected busfree.\n"); 3219 printk("SDTR negotiation rejected busfree.\n");
3220#endif 3220#endif
3221 ahd_set_syncrate(ahd, &devinfo, 3221 ahd_set_syncrate(ahd, &devinfo,
3222 /*period*/0, /*offset*/0, 3222 /*period*/0, /*offset*/0,
@@ -3240,7 +3240,7 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
3240 3240
3241#ifdef AHD_DEBUG 3241#ifdef AHD_DEBUG
3242 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 3242 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
3243 printf("Expected IDE Busfree\n"); 3243 printk("Expected IDE Busfree\n");
3244#endif 3244#endif
3245 printerror = 0; 3245 printerror = 0;
3246 } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_QASREJ_BUSFREE) 3246 } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_QASREJ_BUSFREE)
@@ -3249,7 +3249,7 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
3249 3249
3250#ifdef AHD_DEBUG 3250#ifdef AHD_DEBUG
3251 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 3251 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
3252 printf("Expected QAS Reject Busfree\n"); 3252 printk("Expected QAS Reject Busfree\n");
3253#endif 3253#endif
3254 printerror = 0; 3254 printerror = 0;
3255 } 3255 }
@@ -3275,7 +3275,7 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
3275 } else { 3275 } else {
3276#ifdef AHD_DEBUG 3276#ifdef AHD_DEBUG
3277 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 3277 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
3278 printf("PPR Negotiation Busfree.\n"); 3278 printk("PPR Negotiation Busfree.\n");
3279#endif 3279#endif
3280 ahd_done(ahd, scb); 3280 ahd_done(ahd, scb);
3281 } 3281 }
@@ -3302,9 +3302,9 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
3302 * We had not fully identified this connection, 3302 * We had not fully identified this connection,
3303 * so we cannot abort anything. 3303 * so we cannot abort anything.
3304 */ 3304 */
3305 printf("%s: ", ahd_name(ahd)); 3305 printk("%s: ", ahd_name(ahd));
3306 } 3306 }
3307 printf("Unexpected busfree %s, %d SCBs aborted, " 3307 printk("Unexpected busfree %s, %d SCBs aborted, "
3308 "PRGMCNT == 0x%x\n", 3308 "PRGMCNT == 0x%x\n",
3309 ahd_lookup_phase_entry(lastphase)->phasemsg, 3309 ahd_lookup_phase_entry(lastphase)->phasemsg,
3310 aborted, 3310 aborted,
@@ -3342,7 +3342,7 @@ ahd_handle_proto_violation(struct ahd_softc *ahd)
3342 * to match. 3342 * to match.
3343 */ 3343 */
3344 ahd_print_devinfo(ahd, &devinfo); 3344 ahd_print_devinfo(ahd, &devinfo);
3345 printf("Target did not send an IDENTIFY message. " 3345 printk("Target did not send an IDENTIFY message. "
3346 "LASTPHASE = 0x%x.\n", lastphase); 3346 "LASTPHASE = 0x%x.\n", lastphase);
3347 scb = NULL; 3347 scb = NULL;
3348 } else if (scb == NULL) { 3348 } else if (scb == NULL) {
@@ -3351,13 +3351,13 @@ ahd_handle_proto_violation(struct ahd_softc *ahd)
3351 * transaction. Print an error and reset the bus. 3351 * transaction. Print an error and reset the bus.
3352 */ 3352 */
3353 ahd_print_devinfo(ahd, &devinfo); 3353 ahd_print_devinfo(ahd, &devinfo);
3354 printf("No SCB found during protocol violation\n"); 3354 printk("No SCB found during protocol violation\n");
3355 goto proto_violation_reset; 3355 goto proto_violation_reset;
3356 } else { 3356 } else {
3357 ahd_set_transaction_status(scb, CAM_SEQUENCE_FAIL); 3357 ahd_set_transaction_status(scb, CAM_SEQUENCE_FAIL);
3358 if ((seq_flags & NO_CDB_SENT) != 0) { 3358 if ((seq_flags & NO_CDB_SENT) != 0) {
3359 ahd_print_path(ahd, scb); 3359 ahd_print_path(ahd, scb);
3360 printf("No or incomplete CDB sent to device.\n"); 3360 printk("No or incomplete CDB sent to device.\n");
3361 } else if ((ahd_inb_scbram(ahd, SCB_CONTROL) 3361 } else if ((ahd_inb_scbram(ahd, SCB_CONTROL)
3362 & STATUS_RCVD) == 0) { 3362 & STATUS_RCVD) == 0) {
3363 /* 3363 /*
@@ -3368,10 +3368,10 @@ ahd_handle_proto_violation(struct ahd_softc *ahd)
3368 * message. 3368 * message.
3369 */ 3369 */
3370 ahd_print_path(ahd, scb); 3370 ahd_print_path(ahd, scb);
3371 printf("Completed command without status.\n"); 3371 printk("Completed command without status.\n");
3372 } else { 3372 } else {
3373 ahd_print_path(ahd, scb); 3373 ahd_print_path(ahd, scb);
3374 printf("Unknown protocol violation.\n"); 3374 printk("Unknown protocol violation.\n");
3375 ahd_dump_card_state(ahd); 3375 ahd_dump_card_state(ahd);
3376 } 3376 }
3377 } 3377 }
@@ -3385,7 +3385,7 @@ proto_violation_reset:
3385 * it away with a bus reset. 3385 * it away with a bus reset.
3386 */ 3386 */
3387 found = ahd_reset_channel(ahd, 'A', TRUE); 3387 found = ahd_reset_channel(ahd, 'A', TRUE);
3388 printf("%s: Issued Channel %c Bus Reset. " 3388 printk("%s: Issued Channel %c Bus Reset. "
3389 "%d SCBs aborted\n", ahd_name(ahd), 'A', found); 3389 "%d SCBs aborted\n", ahd_name(ahd), 'A', found);
3390 } else { 3390 } else {
3391 /* 3391 /*
@@ -3407,7 +3407,7 @@ proto_violation_reset:
3407 ahd_print_path(ahd, scb); 3407 ahd_print_path(ahd, scb);
3408 scb->flags |= SCB_ABORT; 3408 scb->flags |= SCB_ABORT;
3409 } 3409 }
3410 printf("Protocol violation %s. Attempting to abort.\n", 3410 printk("Protocol violation %s. Attempting to abort.\n",
3411 ahd_lookup_phase_entry(curphase)->phasemsg); 3411 ahd_lookup_phase_entry(curphase)->phasemsg);
3412 } 3412 }
3413} 3413}
@@ -3425,7 +3425,7 @@ ahd_force_renegotiation(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
3425#ifdef AHD_DEBUG 3425#ifdef AHD_DEBUG
3426 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 3426 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
3427 ahd_print_devinfo(ahd, devinfo); 3427 ahd_print_devinfo(ahd, devinfo);
3428 printf("Forcing renegotiation\n"); 3428 printk("Forcing renegotiation\n");
3429 } 3429 }
3430#endif 3430#endif
3431 targ_info = ahd_fetch_transinfo(ahd, 3431 targ_info = ahd_fetch_transinfo(ahd,
@@ -3486,7 +3486,7 @@ ahd_clear_critical_section(struct ahd_softc *ahd)
3486 break; 3486 break;
3487 3487
3488 if (steps > AHD_MAX_STEPS) { 3488 if (steps > AHD_MAX_STEPS) {
3489 printf("%s: Infinite loop in critical section\n" 3489 printk("%s: Infinite loop in critical section\n"
3490 "%s: First Instruction 0x%x now 0x%x\n", 3490 "%s: First Instruction 0x%x now 0x%x\n",
3491 ahd_name(ahd), ahd_name(ahd), first_instr, 3491 ahd_name(ahd), ahd_name(ahd), first_instr,
3492 seqaddr); 3492 seqaddr);
@@ -3497,7 +3497,7 @@ ahd_clear_critical_section(struct ahd_softc *ahd)
3497 steps++; 3497 steps++;
3498#ifdef AHD_DEBUG 3498#ifdef AHD_DEBUG
3499 if ((ahd_debug & AHD_SHOW_MISC) != 0) 3499 if ((ahd_debug & AHD_SHOW_MISC) != 0)
3500 printf("%s: Single stepping at 0x%x\n", ahd_name(ahd), 3500 printk("%s: Single stepping at 0x%x\n", ahd_name(ahd),
3501 seqaddr); 3501 seqaddr);
3502#endif 3502#endif
3503 if (stepping == FALSE) { 3503 if (stepping == FALSE) {
@@ -3601,16 +3601,16 @@ ahd_print_scb(struct scb *scb)
3601 int i; 3601 int i;
3602 3602
3603 hscb = scb->hscb; 3603 hscb = scb->hscb;
3604 printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", 3604 printk("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n",
3605 (void *)scb, 3605 (void *)scb,
3606 hscb->control, 3606 hscb->control,
3607 hscb->scsiid, 3607 hscb->scsiid,
3608 hscb->lun, 3608 hscb->lun,
3609 hscb->cdb_len); 3609 hscb->cdb_len);
3610 printf("Shared Data: "); 3610 printk("Shared Data: ");
3611 for (i = 0; i < sizeof(hscb->shared_data.idata.cdb); i++) 3611 for (i = 0; i < sizeof(hscb->shared_data.idata.cdb); i++)
3612 printf("%#02x", hscb->shared_data.idata.cdb[i]); 3612 printk("%#02x", hscb->shared_data.idata.cdb[i]);
3613 printf(" dataptr:%#x%x datacnt:%#x sgptr:%#x tag:%#x\n", 3613 printk(" dataptr:%#x%x datacnt:%#x sgptr:%#x tag:%#x\n",
3614 (uint32_t)((ahd_le64toh(hscb->dataptr) >> 32) & 0xFFFFFFFF), 3614 (uint32_t)((ahd_le64toh(hscb->dataptr) >> 32) & 0xFFFFFFFF),
3615 (uint32_t)(ahd_le64toh(hscb->dataptr) & 0xFFFFFFFF), 3615 (uint32_t)(ahd_le64toh(hscb->dataptr) & 0xFFFFFFFF),
3616 ahd_le32toh(hscb->datacnt), 3616 ahd_le32toh(hscb->datacnt),
@@ -3637,7 +3637,7 @@ ahd_alloc_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel)
3637 && ahd->enabled_targets[scsi_id] != master_tstate) 3637 && ahd->enabled_targets[scsi_id] != master_tstate)
3638 panic("%s: ahd_alloc_tstate - Target already allocated", 3638 panic("%s: ahd_alloc_tstate - Target already allocated",
3639 ahd_name(ahd)); 3639 ahd_name(ahd));
3640 tstate = malloc(sizeof(*tstate), M_DEVBUF, M_NOWAIT); 3640 tstate = kmalloc(sizeof(*tstate), GFP_ATOMIC);
3641 if (tstate == NULL) 3641 if (tstate == NULL)
3642 return (NULL); 3642 return (NULL);
3643 3643
@@ -3682,7 +3682,7 @@ ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force)
3682 3682
3683 tstate = ahd->enabled_targets[scsi_id]; 3683 tstate = ahd->enabled_targets[scsi_id];
3684 if (tstate != NULL) 3684 if (tstate != NULL)
3685 free(tstate, M_DEVBUF); 3685 kfree(tstate);
3686 ahd->enabled_targets[scsi_id] = NULL; 3686 ahd->enabled_targets[scsi_id] = NULL;
3687} 3687}
3688#endif 3688#endif
@@ -3942,37 +3942,37 @@ ahd_set_syncrate(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3942 if (offset != 0) { 3942 if (offset != 0) {
3943 int options; 3943 int options;
3944 3944
3945 printf("%s: target %d synchronous with " 3945 printk("%s: target %d synchronous with "
3946 "period = 0x%x, offset = 0x%x", 3946 "period = 0x%x, offset = 0x%x",
3947 ahd_name(ahd), devinfo->target, 3947 ahd_name(ahd), devinfo->target,
3948 period, offset); 3948 period, offset);
3949 options = 0; 3949 options = 0;
3950 if ((ppr_options & MSG_EXT_PPR_RD_STRM) != 0) { 3950 if ((ppr_options & MSG_EXT_PPR_RD_STRM) != 0) {
3951 printf("(RDSTRM"); 3951 printk("(RDSTRM");
3952 options++; 3952 options++;
3953 } 3953 }
3954 if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) { 3954 if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) {
3955 printf("%s", options ? "|DT" : "(DT"); 3955 printk("%s", options ? "|DT" : "(DT");
3956 options++; 3956 options++;
3957 } 3957 }
3958 if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { 3958 if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
3959 printf("%s", options ? "|IU" : "(IU"); 3959 printk("%s", options ? "|IU" : "(IU");
3960 options++; 3960 options++;
3961 } 3961 }
3962 if ((ppr_options & MSG_EXT_PPR_RTI) != 0) { 3962 if ((ppr_options & MSG_EXT_PPR_RTI) != 0) {
3963 printf("%s", options ? "|RTI" : "(RTI"); 3963 printk("%s", options ? "|RTI" : "(RTI");
3964 options++; 3964 options++;
3965 } 3965 }
3966 if ((ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) { 3966 if ((ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) {
3967 printf("%s", options ? "|QAS" : "(QAS"); 3967 printk("%s", options ? "|QAS" : "(QAS");
3968 options++; 3968 options++;
3969 } 3969 }
3970 if (options != 0) 3970 if (options != 0)
3971 printf(")\n"); 3971 printk(")\n");
3972 else 3972 else
3973 printf("\n"); 3973 printk("\n");
3974 } else { 3974 } else {
3975 printf("%s: target %d using " 3975 printk("%s: target %d using "
3976 "asynchronous transfers%s\n", 3976 "asynchronous transfers%s\n",
3977 ahd_name(ahd), devinfo->target, 3977 ahd_name(ahd), devinfo->target,
3978 (ppr_options & MSG_EXT_PPR_QAS_REQ) != 0 3978 (ppr_options & MSG_EXT_PPR_QAS_REQ) != 0
@@ -4000,7 +4000,7 @@ ahd_set_syncrate(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
4000#ifdef AHD_DEBUG 4000#ifdef AHD_DEBUG
4001 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 4001 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
4002 ahd_print_devinfo(ahd, devinfo); 4002 ahd_print_devinfo(ahd, devinfo);
4003 printf("Expecting IU Change busfree\n"); 4003 printk("Expecting IU Change busfree\n");
4004 } 4004 }
4005#endif 4005#endif
4006 ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE 4006 ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE
@@ -4009,7 +4009,7 @@ ahd_set_syncrate(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
4009 if ((old_ppr & MSG_EXT_PPR_IU_REQ) != 0) { 4009 if ((old_ppr & MSG_EXT_PPR_IU_REQ) != 0) {
4010#ifdef AHD_DEBUG 4010#ifdef AHD_DEBUG
4011 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 4011 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
4012 printf("PPR with IU_REQ outstanding\n"); 4012 printk("PPR with IU_REQ outstanding\n");
4013#endif 4013#endif
4014 ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE; 4014 ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE;
4015 } 4015 }
@@ -4061,7 +4061,7 @@ ahd_set_width(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
4061 ahd_send_async(ahd, devinfo->channel, devinfo->target, 4061 ahd_send_async(ahd, devinfo->channel, devinfo->target,
4062 CAM_LUN_WILDCARD, AC_TRANSFER_NEG); 4062 CAM_LUN_WILDCARD, AC_TRANSFER_NEG);
4063 if (bootverbose) { 4063 if (bootverbose) {
4064 printf("%s: target %d using %dbit transfers\n", 4064 printk("%s: target %d using %dbit transfers\n",
4065 ahd_name(ahd), devinfo->target, 4065 ahd_name(ahd), devinfo->target,
4066 8 * (0x01 << width)); 4066 8 * (0x01 << width));
4067 } 4067 }
@@ -4337,7 +4337,7 @@ ahd_fetch_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
4337void 4337void
4338ahd_print_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) 4338ahd_print_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
4339{ 4339{
4340 printf("%s:%c:%d:%d: ", ahd_name(ahd), 'A', 4340 printk("%s:%c:%d:%d: ", ahd_name(ahd), 'A',
4341 devinfo->target, devinfo->lun); 4341 devinfo->target, devinfo->lun);
4342} 4342}
4343 4343
@@ -4419,11 +4419,11 @@ ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
4419 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 4419 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
4420#ifdef AHD_DEBUG 4420#ifdef AHD_DEBUG
4421 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 4421 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
4422 printf("Setting up for Parity Error delivery\n"); 4422 printk("Setting up for Parity Error delivery\n");
4423#endif 4423#endif
4424 return; 4424 return;
4425 } else if (scb == NULL) { 4425 } else if (scb == NULL) {
4426 printf("%s: WARNING. No pending message for " 4426 printk("%s: WARNING. No pending message for "
4427 "I_T msgin. Issuing NO-OP\n", ahd_name(ahd)); 4427 "I_T msgin. Issuing NO-OP\n", ahd_name(ahd));
4428 ahd->msgout_buf[ahd->msgout_index++] = MSG_NOOP; 4428 ahd->msgout_buf[ahd->msgout_index++] = MSG_NOOP;
4429 ahd->msgout_len++; 4429 ahd->msgout_len++;
@@ -4454,7 +4454,7 @@ ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
4454 ahd->msgout_buf[ahd->msgout_index++] = MSG_BUS_DEV_RESET; 4454 ahd->msgout_buf[ahd->msgout_index++] = MSG_BUS_DEV_RESET;
4455 ahd->msgout_len++; 4455 ahd->msgout_len++;
4456 ahd_print_path(ahd, scb); 4456 ahd_print_path(ahd, scb);
4457 printf("Bus Device Reset Message Sent\n"); 4457 printk("Bus Device Reset Message Sent\n");
4458 /* 4458 /*
4459 * Clear our selection hardware in advance of 4459 * Clear our selection hardware in advance of
4460 * the busfree. We may have an entry in the waiting 4460 * the busfree. We may have an entry in the waiting
@@ -4472,7 +4472,7 @@ ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
4472 } 4472 }
4473 ahd->msgout_len++; 4473 ahd->msgout_len++;
4474 ahd_print_path(ahd, scb); 4474 ahd_print_path(ahd, scb);
4475 printf("Abort%s Message Sent\n", 4475 printk("Abort%s Message Sent\n",
4476 (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); 4476 (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : "");
4477 /* 4477 /*
4478 * Clear our selection hardware in advance of 4478 * Clear our selection hardware in advance of
@@ -4493,9 +4493,9 @@ ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
4493 */ 4493 */
4494 ahd_outb(ahd, SCSISEQ0, 0); 4494 ahd_outb(ahd, SCSISEQ0, 0);
4495 } else { 4495 } else {
4496 printf("ahd_intr: AWAITING_MSG for an SCB that " 4496 printk("ahd_intr: AWAITING_MSG for an SCB that "
4497 "does not have a waiting message\n"); 4497 "does not have a waiting message\n");
4498 printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, 4498 printk("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid,
4499 devinfo->target_mask); 4499 devinfo->target_mask);
4500 panic("SCB = %d, SCB Control = %x:%x, MSG_OUT = %x " 4500 panic("SCB = %d, SCB Control = %x:%x, MSG_OUT = %x "
4501 "SCB flags = %x", SCB_GET_TAG(scb), scb->hscb->control, 4501 "SCB flags = %x", SCB_GET_TAG(scb), scb->hscb->control,
@@ -4577,7 +4577,7 @@ ahd_build_transfer_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
4577 4577
4578 if (bootverbose) { 4578 if (bootverbose) {
4579 ahd_print_devinfo(ahd, devinfo); 4579 ahd_print_devinfo(ahd, devinfo);
4580 printf("Ensuring async\n"); 4580 printk("Ensuring async\n");
4581 } 4581 }
4582 } 4582 }
4583 /* Target initiated PPR is not allowed in the SCSI spec */ 4583 /* Target initiated PPR is not allowed in the SCSI spec */
@@ -4624,7 +4624,7 @@ ahd_construct_sdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
4624 ahd->msgout_buf + ahd->msgout_index, period, offset); 4624 ahd->msgout_buf + ahd->msgout_index, period, offset);
4625 ahd->msgout_len += 5; 4625 ahd->msgout_len += 5;
4626 if (bootverbose) { 4626 if (bootverbose) {
4627 printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", 4627 printk("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n",
4628 ahd_name(ahd), devinfo->channel, devinfo->target, 4628 ahd_name(ahd), devinfo->channel, devinfo->target,
4629 devinfo->lun, period, offset); 4629 devinfo->lun, period, offset);
4630 } 4630 }
@@ -4642,7 +4642,7 @@ ahd_construct_wdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
4642 ahd->msgout_buf + ahd->msgout_index, bus_width); 4642 ahd->msgout_buf + ahd->msgout_index, bus_width);
4643 ahd->msgout_len += 4; 4643 ahd->msgout_len += 4;
4644 if (bootverbose) { 4644 if (bootverbose) {
4645 printf("(%s:%c:%d:%d): Sending WDTR %x\n", 4645 printk("(%s:%c:%d:%d): Sending WDTR %x\n",
4646 ahd_name(ahd), devinfo->channel, devinfo->target, 4646 ahd_name(ahd), devinfo->channel, devinfo->target,
4647 devinfo->lun, bus_width); 4647 devinfo->lun, bus_width);
4648 } 4648 }
@@ -4671,7 +4671,7 @@ ahd_construct_ppr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
4671 bus_width, ppr_options); 4671 bus_width, ppr_options);
4672 ahd->msgout_len += 8; 4672 ahd->msgout_len += 8;
4673 if (bootverbose) { 4673 if (bootverbose) {
4674 printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " 4674 printk("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, "
4675 "offset %x, ppr_options %x\n", ahd_name(ahd), 4675 "offset %x, ppr_options %x\n", ahd_name(ahd),
4676 devinfo->channel, devinfo->target, devinfo->lun, 4676 devinfo->channel, devinfo->target, devinfo->lun,
4677 bus_width, period, offset, ppr_options); 4677 bus_width, period, offset, ppr_options);
@@ -4721,7 +4721,7 @@ ahd_handle_message_phase(struct ahd_softc *ahd)
4721 bus_phase = ahd_inb(ahd, LASTPHASE); 4721 bus_phase = ahd_inb(ahd, LASTPHASE);
4722 4722
4723 if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0) { 4723 if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0) {
4724 printf("LQIRETRY for LQIPHASE_OUTPKT\n"); 4724 printk("LQIRETRY for LQIPHASE_OUTPKT\n");
4725 ahd_outb(ahd, LQCTL2, LQIRETRY); 4725 ahd_outb(ahd, LQCTL2, LQIRETRY);
4726 } 4726 }
4727reswitch: 4727reswitch:
@@ -4738,14 +4738,14 @@ reswitch:
4738#ifdef AHD_DEBUG 4738#ifdef AHD_DEBUG
4739 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 4739 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
4740 ahd_print_devinfo(ahd, &devinfo); 4740 ahd_print_devinfo(ahd, &devinfo);
4741 printf("INITIATOR_MSG_OUT"); 4741 printk("INITIATOR_MSG_OUT");
4742 } 4742 }
4743#endif 4743#endif
4744 phasemis = bus_phase != P_MESGOUT; 4744 phasemis = bus_phase != P_MESGOUT;
4745 if (phasemis) { 4745 if (phasemis) {
4746#ifdef AHD_DEBUG 4746#ifdef AHD_DEBUG
4747 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 4747 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
4748 printf(" PHASEMIS %s\n", 4748 printk(" PHASEMIS %s\n",
4749 ahd_lookup_phase_entry(bus_phase) 4749 ahd_lookup_phase_entry(bus_phase)
4750 ->phasemsg); 4750 ->phasemsg);
4751 } 4751 }
@@ -4772,7 +4772,7 @@ reswitch:
4772 ahd_outb(ahd, CLRSINT1, CLRREQINIT); 4772 ahd_outb(ahd, CLRSINT1, CLRREQINIT);
4773#ifdef AHD_DEBUG 4773#ifdef AHD_DEBUG
4774 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 4774 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
4775 printf(" byte 0x%x\n", ahd->send_msg_perror); 4775 printk(" byte 0x%x\n", ahd->send_msg_perror);
4776#endif 4776#endif
4777 /* 4777 /*
4778 * If we are notifying the target of a CRC error 4778 * If we are notifying the target of a CRC error
@@ -4813,7 +4813,7 @@ reswitch:
4813 ahd_outb(ahd, CLRSINT1, CLRREQINIT); 4813 ahd_outb(ahd, CLRSINT1, CLRREQINIT);
4814#ifdef AHD_DEBUG 4814#ifdef AHD_DEBUG
4815 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 4815 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
4816 printf(" byte 0x%x\n", 4816 printk(" byte 0x%x\n",
4817 ahd->msgout_buf[ahd->msgout_index]); 4817 ahd->msgout_buf[ahd->msgout_index]);
4818#endif 4818#endif
4819 ahd_outb(ahd, RETURN_2, ahd->msgout_buf[ahd->msgout_index++]); 4819 ahd_outb(ahd, RETURN_2, ahd->msgout_buf[ahd->msgout_index++]);
@@ -4828,14 +4828,14 @@ reswitch:
4828#ifdef AHD_DEBUG 4828#ifdef AHD_DEBUG
4829 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 4829 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
4830 ahd_print_devinfo(ahd, &devinfo); 4830 ahd_print_devinfo(ahd, &devinfo);
4831 printf("INITIATOR_MSG_IN"); 4831 printk("INITIATOR_MSG_IN");
4832 } 4832 }
4833#endif 4833#endif
4834 phasemis = bus_phase != P_MESGIN; 4834 phasemis = bus_phase != P_MESGIN;
4835 if (phasemis) { 4835 if (phasemis) {
4836#ifdef AHD_DEBUG 4836#ifdef AHD_DEBUG
4837 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 4837 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
4838 printf(" PHASEMIS %s\n", 4838 printk(" PHASEMIS %s\n",
4839 ahd_lookup_phase_entry(bus_phase) 4839 ahd_lookup_phase_entry(bus_phase)
4840 ->phasemsg); 4840 ->phasemsg);
4841 } 4841 }
@@ -4856,7 +4856,7 @@ reswitch:
4856 ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIBUS); 4856 ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIBUS);
4857#ifdef AHD_DEBUG 4857#ifdef AHD_DEBUG
4858 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 4858 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
4859 printf(" byte 0x%x\n", 4859 printk(" byte 0x%x\n",
4860 ahd->msgin_buf[ahd->msgin_index]); 4860 ahd->msgin_buf[ahd->msgin_index]);
4861#endif 4861#endif
4862 4862
@@ -4878,7 +4878,7 @@ reswitch:
4878#ifdef AHD_DEBUG 4878#ifdef AHD_DEBUG
4879 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 4879 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
4880 ahd_print_devinfo(ahd, &devinfo); 4880 ahd_print_devinfo(ahd, &devinfo);
4881 printf("Asserting ATN for response\n"); 4881 printk("Asserting ATN for response\n");
4882 } 4882 }
4883#endif 4883#endif
4884 ahd_assert_atn(ahd); 4884 ahd_assert_atn(ahd);
@@ -5026,7 +5026,7 @@ reswitch:
5026 5026
5027 if (end_session) { 5027 if (end_session) {
5028 if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0) { 5028 if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0) {
5029 printf("%s: Returning to Idle Loop\n", 5029 printk("%s: Returning to Idle Loop\n",
5030 ahd_name(ahd)); 5030 ahd_name(ahd));
5031 ahd_clear_msg_state(ahd); 5031 ahd_clear_msg_state(ahd);
5032 5032
@@ -5178,7 +5178,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
5178 ahd_validate_offset(ahd, tinfo, period, &offset, 5178 ahd_validate_offset(ahd, tinfo, period, &offset,
5179 tinfo->curr.width, devinfo->role); 5179 tinfo->curr.width, devinfo->role);
5180 if (bootverbose) { 5180 if (bootverbose) {
5181 printf("(%s:%c:%d:%d): Received " 5181 printk("(%s:%c:%d:%d): Received "
5182 "SDTR period %x, offset %x\n\t" 5182 "SDTR period %x, offset %x\n\t"
5183 "Filtered to period %x, offset %x\n", 5183 "Filtered to period %x, offset %x\n",
5184 ahd_name(ahd), devinfo->channel, 5184 ahd_name(ahd), devinfo->channel,
@@ -5208,7 +5208,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
5208 */ 5208 */
5209 if (bootverbose 5209 if (bootverbose
5210 && devinfo->role == ROLE_INITIATOR) { 5210 && devinfo->role == ROLE_INITIATOR) {
5211 printf("(%s:%c:%d:%d): Target " 5211 printk("(%s:%c:%d:%d): Target "
5212 "Initiated SDTR\n", 5212 "Initiated SDTR\n",
5213 ahd_name(ahd), devinfo->channel, 5213 ahd_name(ahd), devinfo->channel,
5214 devinfo->target, devinfo->lun); 5214 devinfo->target, devinfo->lun);
@@ -5250,7 +5250,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
5250 ahd_validate_width(ahd, tinfo, &bus_width, 5250 ahd_validate_width(ahd, tinfo, &bus_width,
5251 devinfo->role); 5251 devinfo->role);
5252 if (bootverbose) { 5252 if (bootverbose) {
5253 printf("(%s:%c:%d:%d): Received WDTR " 5253 printk("(%s:%c:%d:%d): Received WDTR "
5254 "%x filtered to %x\n", 5254 "%x filtered to %x\n",
5255 ahd_name(ahd), devinfo->channel, 5255 ahd_name(ahd), devinfo->channel,
5256 devinfo->target, devinfo->lun, 5256 devinfo->target, devinfo->lun,
@@ -5266,7 +5266,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
5266 */ 5266 */
5267 if (saved_width > bus_width) { 5267 if (saved_width > bus_width) {
5268 reject = TRUE; 5268 reject = TRUE;
5269 printf("(%s:%c:%d:%d): requested %dBit " 5269 printk("(%s:%c:%d:%d): requested %dBit "
5270 "transfers. Rejecting...\n", 5270 "transfers. Rejecting...\n",
5271 ahd_name(ahd), devinfo->channel, 5271 ahd_name(ahd), devinfo->channel,
5272 devinfo->target, devinfo->lun, 5272 devinfo->target, devinfo->lun,
@@ -5279,7 +5279,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
5279 */ 5279 */
5280 if (bootverbose 5280 if (bootverbose
5281 && devinfo->role == ROLE_INITIATOR) { 5281 && devinfo->role == ROLE_INITIATOR) {
5282 printf("(%s:%c:%d:%d): Target " 5282 printk("(%s:%c:%d:%d): Target "
5283 "Initiated WDTR\n", 5283 "Initiated WDTR\n",
5284 ahd_name(ahd), devinfo->channel, 5284 ahd_name(ahd), devinfo->channel,
5285 devinfo->target, devinfo->lun); 5285 devinfo->target, devinfo->lun);
@@ -5391,12 +5391,12 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
5391 } 5391 }
5392 } else { 5392 } else {
5393 if (devinfo->role != ROLE_TARGET) 5393 if (devinfo->role != ROLE_TARGET)
5394 printf("(%s:%c:%d:%d): Target " 5394 printk("(%s:%c:%d:%d): Target "
5395 "Initiated PPR\n", 5395 "Initiated PPR\n",
5396 ahd_name(ahd), devinfo->channel, 5396 ahd_name(ahd), devinfo->channel,
5397 devinfo->target, devinfo->lun); 5397 devinfo->target, devinfo->lun);
5398 else 5398 else
5399 printf("(%s:%c:%d:%d): Initiator " 5399 printk("(%s:%c:%d:%d): Initiator "
5400 "Initiated PPR\n", 5400 "Initiated PPR\n",
5401 ahd_name(ahd), devinfo->channel, 5401 ahd_name(ahd), devinfo->channel,
5402 devinfo->target, devinfo->lun); 5402 devinfo->target, devinfo->lun);
@@ -5408,7 +5408,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
5408 response = TRUE; 5408 response = TRUE;
5409 } 5409 }
5410 if (bootverbose) { 5410 if (bootverbose) {
5411 printf("(%s:%c:%d:%d): Received PPR width %x, " 5411 printk("(%s:%c:%d:%d): Received PPR width %x, "
5412 "period %x, offset %x,options %x\n" 5412 "period %x, offset %x,options %x\n"
5413 "\tFiltered to width %x, period %x, " 5413 "\tFiltered to width %x, period %x, "
5414 "offset %x, options %x\n", 5414 "offset %x, options %x\n",
@@ -5484,7 +5484,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
5484 case MSG_QAS_REQUEST: 5484 case MSG_QAS_REQUEST:
5485#ifdef AHD_DEBUG 5485#ifdef AHD_DEBUG
5486 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 5486 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
5487 printf("%s: QAS request. SCSISIGI == 0x%x\n", 5487 printk("%s: QAS request. SCSISIGI == 0x%x\n",
5488 ahd_name(ahd), ahd_inb(ahd, SCSISIGI)); 5488 ahd_name(ahd), ahd_inb(ahd, SCSISIGI));
5489#endif 5489#endif
5490 ahd->msg_flags |= MSG_FLAG_EXPECT_QASREJ_BUSFREE; 5490 ahd->msg_flags |= MSG_FLAG_EXPECT_QASREJ_BUSFREE;
@@ -5549,7 +5549,7 @@ ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
5549 * off these options. 5549 * off these options.
5550 */ 5550 */
5551 if (bootverbose) { 5551 if (bootverbose) {
5552 printf("(%s:%c:%d:%d): PPR Rejected. " 5552 printk("(%s:%c:%d:%d): PPR Rejected. "
5553 "Trying simple U160 PPR\n", 5553 "Trying simple U160 PPR\n",
5554 ahd_name(ahd), devinfo->channel, 5554 ahd_name(ahd), devinfo->channel,
5555 devinfo->target, devinfo->lun); 5555 devinfo->target, devinfo->lun);
@@ -5564,7 +5564,7 @@ ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
5564 * Attempt to negotiate SPI-2 style. 5564 * Attempt to negotiate SPI-2 style.
5565 */ 5565 */
5566 if (bootverbose) { 5566 if (bootverbose) {
5567 printf("(%s:%c:%d:%d): PPR Rejected. " 5567 printk("(%s:%c:%d:%d): PPR Rejected. "
5568 "Trying WDTR/SDTR\n", 5568 "Trying WDTR/SDTR\n",
5569 ahd_name(ahd), devinfo->channel, 5569 ahd_name(ahd), devinfo->channel,
5570 devinfo->target, devinfo->lun); 5570 devinfo->target, devinfo->lun);
@@ -5581,7 +5581,7 @@ ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
5581 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) { 5581 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) {
5582 5582
5583 /* note 8bit xfers */ 5583 /* note 8bit xfers */
5584 printf("(%s:%c:%d:%d): refuses WIDE negotiation. Using " 5584 printk("(%s:%c:%d:%d): refuses WIDE negotiation. Using "
5585 "8bit transfers\n", ahd_name(ahd), 5585 "8bit transfers\n", ahd_name(ahd),
5586 devinfo->channel, devinfo->target, devinfo->lun); 5586 devinfo->channel, devinfo->target, devinfo->lun);
5587 ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 5587 ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
@@ -5609,7 +5609,7 @@ ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
5609 /*offset*/0, /*ppr_options*/0, 5609 /*offset*/0, /*ppr_options*/0,
5610 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, 5610 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
5611 /*paused*/TRUE); 5611 /*paused*/TRUE);
5612 printf("(%s:%c:%d:%d): refuses synchronous negotiation. " 5612 printk("(%s:%c:%d:%d): refuses synchronous negotiation. "
5613 "Using asynchronous transfers\n", 5613 "Using asynchronous transfers\n",
5614 ahd_name(ahd), devinfo->channel, 5614 ahd_name(ahd), devinfo->channel,
5615 devinfo->target, devinfo->lun); 5615 devinfo->target, devinfo->lun);
@@ -5620,13 +5620,13 @@ ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
5620 tag_type = (scb->hscb->control & MSG_SIMPLE_TASK); 5620 tag_type = (scb->hscb->control & MSG_SIMPLE_TASK);
5621 5621
5622 if (tag_type == MSG_SIMPLE_TASK) { 5622 if (tag_type == MSG_SIMPLE_TASK) {
5623 printf("(%s:%c:%d:%d): refuses tagged commands. " 5623 printk("(%s:%c:%d:%d): refuses tagged commands. "
5624 "Performing non-tagged I/O\n", ahd_name(ahd), 5624 "Performing non-tagged I/O\n", ahd_name(ahd),
5625 devinfo->channel, devinfo->target, devinfo->lun); 5625 devinfo->channel, devinfo->target, devinfo->lun);
5626 ahd_set_tags(ahd, scb->io_ctx, devinfo, AHD_QUEUE_NONE); 5626 ahd_set_tags(ahd, scb->io_ctx, devinfo, AHD_QUEUE_NONE);
5627 mask = ~0x23; 5627 mask = ~0x23;
5628 } else { 5628 } else {
5629 printf("(%s:%c:%d:%d): refuses %s tagged commands. " 5629 printk("(%s:%c:%d:%d): refuses %s tagged commands. "
5630 "Performing simple queue tagged I/O only\n", 5630 "Performing simple queue tagged I/O only\n",
5631 ahd_name(ahd), devinfo->channel, devinfo->target, 5631 ahd_name(ahd), devinfo->channel, devinfo->target,
5632 devinfo->lun, tag_type == MSG_ORDERED_TASK 5632 devinfo->lun, tag_type == MSG_ORDERED_TASK
@@ -5677,7 +5677,7 @@ ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
5677 /* 5677 /*
5678 * Otherwise, we ignore it. 5678 * Otherwise, we ignore it.
5679 */ 5679 */
5680 printf("%s:%c:%d: Message reject for %x -- ignored\n", 5680 printk("%s:%c:%d: Message reject for %x -- ignored\n",
5681 ahd_name(ahd), devinfo->channel, devinfo->target, 5681 ahd_name(ahd), devinfo->channel, devinfo->target,
5682 last_msg); 5682 last_msg);
5683 } 5683 }
@@ -5864,7 +5864,7 @@ ahd_reinitialize_dataptrs(struct ahd_softc *ahd)
5864 ahd_delay(100); 5864 ahd_delay(100);
5865 if (wait == 0) { 5865 if (wait == 0) {
5866 ahd_print_path(ahd, scb); 5866 ahd_print_path(ahd, scb);
5867 printf("ahd_reinitialize_dataptrs: Forcing FIFO free.\n"); 5867 printk("ahd_reinitialize_dataptrs: Forcing FIFO free.\n");
5868 ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT); 5868 ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT);
5869 } 5869 }
5870 saved_modes = ahd_save_modes(ahd); 5870 saved_modes = ahd_save_modes(ahd);
@@ -5978,7 +5978,7 @@ ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
5978 CAM_LUN_WILDCARD, AC_SENT_BDR); 5978 CAM_LUN_WILDCARD, AC_SENT_BDR);
5979 5979
5980 if (message != NULL && bootverbose) 5980 if (message != NULL && bootverbose)
5981 printf("%s: %s on %c:%d. %d SCBs aborted\n", ahd_name(ahd), 5981 printk("%s: %s on %c:%d. %d SCBs aborted\n", ahd_name(ahd),
5982 message, devinfo->channel, devinfo->target, found); 5982 message, devinfo->channel, devinfo->target, found);
5983} 5983}
5984 5984
@@ -6074,23 +6074,22 @@ ahd_alloc(void *platform_arg, char *name)
6074 struct ahd_softc *ahd; 6074 struct ahd_softc *ahd;
6075 6075
6076#ifndef __FreeBSD__ 6076#ifndef __FreeBSD__
6077 ahd = malloc(sizeof(*ahd), M_DEVBUF, M_NOWAIT); 6077 ahd = kmalloc(sizeof(*ahd), GFP_ATOMIC);
6078 if (!ahd) { 6078 if (!ahd) {
6079 printf("aic7xxx: cannot malloc softc!\n"); 6079 printk("aic7xxx: cannot malloc softc!\n");
6080 free(name, M_DEVBUF); 6080 kfree(name);
6081 return NULL; 6081 return NULL;
6082 } 6082 }
6083#else 6083#else
6084 ahd = device_get_softc((device_t)platform_arg); 6084 ahd = device_get_softc((device_t)platform_arg);
6085#endif 6085#endif
6086 memset(ahd, 0, sizeof(*ahd)); 6086 memset(ahd, 0, sizeof(*ahd));
6087 ahd->seep_config = malloc(sizeof(*ahd->seep_config), 6087 ahd->seep_config = kmalloc(sizeof(*ahd->seep_config), GFP_ATOMIC);
6088 M_DEVBUF, M_NOWAIT);
6089 if (ahd->seep_config == NULL) { 6088 if (ahd->seep_config == NULL) {
6090#ifndef __FreeBSD__ 6089#ifndef __FreeBSD__
6091 free(ahd, M_DEVBUF); 6090 kfree(ahd);
6092#endif 6091#endif
6093 free(name, M_DEVBUF); 6092 kfree(name);
6094 return (NULL); 6093 return (NULL);
6095 } 6094 }
6096 LIST_INIT(&ahd->pending_scbs); 6095 LIST_INIT(&ahd->pending_scbs);
@@ -6120,7 +6119,7 @@ ahd_alloc(void *platform_arg, char *name)
6120 } 6119 }
6121#ifdef AHD_DEBUG 6120#ifdef AHD_DEBUG
6122 if ((ahd_debug & AHD_SHOW_MEMORY) != 0) { 6121 if ((ahd_debug & AHD_SHOW_MEMORY) != 0) {
6123 printf("%s: scb size = 0x%x, hscb size = 0x%x\n", 6122 printk("%s: scb size = 0x%x, hscb size = 0x%x\n",
6124 ahd_name(ahd), (u_int)sizeof(struct scb), 6123 ahd_name(ahd), (u_int)sizeof(struct scb),
6125 (u_int)sizeof(struct hardware_scb)); 6124 (u_int)sizeof(struct hardware_scb));
6126 } 6125 }
@@ -6147,7 +6146,7 @@ void
6147ahd_set_name(struct ahd_softc *ahd, char *name) 6146ahd_set_name(struct ahd_softc *ahd, char *name)
6148{ 6147{
6149 if (ahd->name != NULL) 6148 if (ahd->name != NULL)
6150 free(ahd->name, M_DEVBUF); 6149 kfree(ahd->name);
6151 ahd->name = name; 6150 ahd->name = name;
6152} 6151}
6153 6152
@@ -6201,27 +6200,27 @@ ahd_free(struct ahd_softc *ahd)
6201 lstate = tstate->enabled_luns[j]; 6200 lstate = tstate->enabled_luns[j];
6202 if (lstate != NULL) { 6201 if (lstate != NULL) {
6203 xpt_free_path(lstate->path); 6202 xpt_free_path(lstate->path);
6204 free(lstate, M_DEVBUF); 6203 kfree(lstate);
6205 } 6204 }
6206 } 6205 }
6207#endif 6206#endif
6208 free(tstate, M_DEVBUF); 6207 kfree(tstate);
6209 } 6208 }
6210 } 6209 }
6211#ifdef AHD_TARGET_MODE 6210#ifdef AHD_TARGET_MODE
6212 if (ahd->black_hole != NULL) { 6211 if (ahd->black_hole != NULL) {
6213 xpt_free_path(ahd->black_hole->path); 6212 xpt_free_path(ahd->black_hole->path);
6214 free(ahd->black_hole, M_DEVBUF); 6213 kfree(ahd->black_hole);
6215 } 6214 }
6216#endif 6215#endif
6217 if (ahd->name != NULL) 6216 if (ahd->name != NULL)
6218 free(ahd->name, M_DEVBUF); 6217 kfree(ahd->name);
6219 if (ahd->seep_config != NULL) 6218 if (ahd->seep_config != NULL)
6220 free(ahd->seep_config, M_DEVBUF); 6219 kfree(ahd->seep_config);
6221 if (ahd->saved_stack != NULL) 6220 if (ahd->saved_stack != NULL)
6222 free(ahd->saved_stack, M_DEVBUF); 6221 kfree(ahd->saved_stack);
6223#ifndef __FreeBSD__ 6222#ifndef __FreeBSD__
6224 free(ahd, M_DEVBUF); 6223 kfree(ahd);
6225#endif 6224#endif
6226 return; 6225 return;
6227} 6226}
@@ -6300,7 +6299,7 @@ ahd_reset(struct ahd_softc *ahd, int reinit)
6300 } while (--wait && !(ahd_inb(ahd, HCNTRL) & CHIPRSTACK)); 6299 } while (--wait && !(ahd_inb(ahd, HCNTRL) & CHIPRSTACK));
6301 6300
6302 if (wait == 0) { 6301 if (wait == 0) {
6303 printf("%s: WARNING - Failed chip reset! " 6302 printk("%s: WARNING - Failed chip reset! "
6304 "Trying to initialize anyway.\n", ahd_name(ahd)); 6303 "Trying to initialize anyway.\n", ahd_name(ahd));
6305 } 6304 }
6306 ahd_outb(ahd, HCNTRL, ahd->pause); 6305 ahd_outb(ahd, HCNTRL, ahd->pause);
@@ -6422,7 +6421,7 @@ ahd_init_scbdata(struct ahd_softc *ahd)
6422 /* Determine the number of hardware SCBs and initialize them */ 6421 /* Determine the number of hardware SCBs and initialize them */
6423 scb_data->maxhscbs = ahd_probe_scbs(ahd); 6422 scb_data->maxhscbs = ahd_probe_scbs(ahd);
6424 if (scb_data->maxhscbs == 0) { 6423 if (scb_data->maxhscbs == 0) {
6425 printf("%s: No SCB space found\n", ahd_name(ahd)); 6424 printk("%s: No SCB space found\n", ahd_name(ahd));
6426 return (ENXIO); 6425 return (ENXIO);
6427 } 6426 }
6428 6427
@@ -6465,7 +6464,7 @@ ahd_init_scbdata(struct ahd_softc *ahd)
6465 } 6464 }
6466#ifdef AHD_DEBUG 6465#ifdef AHD_DEBUG
6467 if ((ahd_debug & AHD_SHOW_MEMORY) != 0) 6466 if ((ahd_debug & AHD_SHOW_MEMORY) != 0)
6468 printf("%s: ahd_sglist_allocsize = 0x%x\n", ahd_name(ahd), 6467 printk("%s: ahd_sglist_allocsize = 0x%x\n", ahd_name(ahd),
6469 ahd_sglist_allocsize(ahd)); 6468 ahd_sglist_allocsize(ahd));
6470#endif 6469#endif
6471 6470
@@ -6489,7 +6488,7 @@ ahd_init_scbdata(struct ahd_softc *ahd)
6489 ahd_alloc_scbs(ahd); 6488 ahd_alloc_scbs(ahd);
6490 6489
6491 if (scb_data->numscbs == 0) { 6490 if (scb_data->numscbs == 0) {
6492 printf("%s: ahd_init_scbdata - " 6491 printk("%s: ahd_init_scbdata - "
6493 "Unable to allocate initial scbs\n", 6492 "Unable to allocate initial scbs\n",
6494 ahd_name(ahd)); 6493 ahd_name(ahd));
6495 goto error_exit; 6494 goto error_exit;
@@ -6564,7 +6563,7 @@ ahd_fini_scbdata(struct ahd_softc *ahd)
6564 sns_map->dmamap); 6563 sns_map->dmamap);
6565 ahd_dmamem_free(ahd, scb_data->sense_dmat, 6564 ahd_dmamem_free(ahd, scb_data->sense_dmat,
6566 sns_map->vaddr, sns_map->dmamap); 6565 sns_map->vaddr, sns_map->dmamap);
6567 free(sns_map, M_DEVBUF); 6566 kfree(sns_map);
6568 } 6567 }
6569 ahd_dma_tag_destroy(ahd, scb_data->sense_dmat); 6568 ahd_dma_tag_destroy(ahd, scb_data->sense_dmat);
6570 /* FALLTHROUGH */ 6569 /* FALLTHROUGH */
@@ -6579,7 +6578,7 @@ ahd_fini_scbdata(struct ahd_softc *ahd)
6579 sg_map->dmamap); 6578 sg_map->dmamap);
6580 ahd_dmamem_free(ahd, scb_data->sg_dmat, 6579 ahd_dmamem_free(ahd, scb_data->sg_dmat,
6581 sg_map->vaddr, sg_map->dmamap); 6580 sg_map->vaddr, sg_map->dmamap);
6582 free(sg_map, M_DEVBUF); 6581 kfree(sg_map);
6583 } 6582 }
6584 ahd_dma_tag_destroy(ahd, scb_data->sg_dmat); 6583 ahd_dma_tag_destroy(ahd, scb_data->sg_dmat);
6585 /* FALLTHROUGH */ 6584 /* FALLTHROUGH */
@@ -6594,7 +6593,7 @@ ahd_fini_scbdata(struct ahd_softc *ahd)
6594 hscb_map->dmamap); 6593 hscb_map->dmamap);
6595 ahd_dmamem_free(ahd, scb_data->hscb_dmat, 6594 ahd_dmamem_free(ahd, scb_data->hscb_dmat,
6596 hscb_map->vaddr, hscb_map->dmamap); 6595 hscb_map->vaddr, hscb_map->dmamap);
6597 free(hscb_map, M_DEVBUF); 6596 kfree(hscb_map);
6598 } 6597 }
6599 ahd_dma_tag_destroy(ahd, scb_data->hscb_dmat); 6598 ahd_dma_tag_destroy(ahd, scb_data->hscb_dmat);
6600 /* FALLTHROUGH */ 6599 /* FALLTHROUGH */
@@ -6624,7 +6623,7 @@ ahd_setup_iocell_workaround(struct ahd_softc *ahd)
6624 ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) | (ENSELDO|ENSELDI)); 6623 ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) | (ENSELDO|ENSELDI));
6625#ifdef AHD_DEBUG 6624#ifdef AHD_DEBUG
6626 if ((ahd_debug & AHD_SHOW_MISC) != 0) 6625 if ((ahd_debug & AHD_SHOW_MISC) != 0)
6627 printf("%s: Setting up iocell workaround\n", ahd_name(ahd)); 6626 printk("%s: Setting up iocell workaround\n", ahd_name(ahd));
6628#endif 6627#endif
6629 ahd_restore_modes(ahd, saved_modes); 6628 ahd_restore_modes(ahd, saved_modes);
6630 ahd->flags &= ~AHD_HAD_FIRST_SEL; 6629 ahd->flags &= ~AHD_HAD_FIRST_SEL;
@@ -6644,14 +6643,14 @@ ahd_iocell_first_selection(struct ahd_softc *ahd)
6644 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 6643 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
6645#ifdef AHD_DEBUG 6644#ifdef AHD_DEBUG
6646 if ((ahd_debug & AHD_SHOW_MISC) != 0) 6645 if ((ahd_debug & AHD_SHOW_MISC) != 0)
6647 printf("%s: iocell first selection\n", ahd_name(ahd)); 6646 printk("%s: iocell first selection\n", ahd_name(ahd));
6648#endif 6647#endif
6649 if ((sblkctl & ENAB40) != 0) { 6648 if ((sblkctl & ENAB40) != 0) {
6650 ahd_outb(ahd, DSPDATACTL, 6649 ahd_outb(ahd, DSPDATACTL,
6651 ahd_inb(ahd, DSPDATACTL) & ~BYPASSENAB); 6650 ahd_inb(ahd, DSPDATACTL) & ~BYPASSENAB);
6652#ifdef AHD_DEBUG 6651#ifdef AHD_DEBUG
6653 if ((ahd_debug & AHD_SHOW_MISC) != 0) 6652 if ((ahd_debug & AHD_SHOW_MISC) != 0)
6654 printf("%s: BYPASS now disabled\n", ahd_name(ahd)); 6653 printk("%s: BYPASS now disabled\n", ahd_name(ahd));
6655#endif 6654#endif
6656 } 6655 }
6657 ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) & ~(ENSELDO|ENSELDI)); 6656 ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) & ~(ENSELDO|ENSELDI));
@@ -6833,7 +6832,7 @@ ahd_alloc_scbs(struct ahd_softc *ahd)
6833 hscb = &((struct hardware_scb *)hscb_map->vaddr)[offset]; 6832 hscb = &((struct hardware_scb *)hscb_map->vaddr)[offset];
6834 hscb_busaddr = hscb_map->physaddr + (offset * sizeof(*hscb)); 6833 hscb_busaddr = hscb_map->physaddr + (offset * sizeof(*hscb));
6835 } else { 6834 } else {
6836 hscb_map = malloc(sizeof(*hscb_map), M_DEVBUF, M_NOWAIT); 6835 hscb_map = kmalloc(sizeof(*hscb_map), GFP_ATOMIC);
6837 6836
6838 if (hscb_map == NULL) 6837 if (hscb_map == NULL)
6839 return; 6838 return;
@@ -6842,7 +6841,7 @@ ahd_alloc_scbs(struct ahd_softc *ahd)
6842 if (ahd_dmamem_alloc(ahd, scb_data->hscb_dmat, 6841 if (ahd_dmamem_alloc(ahd, scb_data->hscb_dmat,
6843 (void **)&hscb_map->vaddr, 6842 (void **)&hscb_map->vaddr,
6844 BUS_DMA_NOWAIT, &hscb_map->dmamap) != 0) { 6843 BUS_DMA_NOWAIT, &hscb_map->dmamap) != 0) {
6845 free(hscb_map, M_DEVBUF); 6844 kfree(hscb_map);
6846 return; 6845 return;
6847 } 6846 }
6848 6847
@@ -6866,7 +6865,7 @@ ahd_alloc_scbs(struct ahd_softc *ahd)
6866 segs = sg_map->vaddr + offset; 6865 segs = sg_map->vaddr + offset;
6867 sg_busaddr = sg_map->physaddr + offset; 6866 sg_busaddr = sg_map->physaddr + offset;
6868 } else { 6867 } else {
6869 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); 6868 sg_map = kmalloc(sizeof(*sg_map), GFP_ATOMIC);
6870 6869
6871 if (sg_map == NULL) 6870 if (sg_map == NULL)
6872 return; 6871 return;
@@ -6875,7 +6874,7 @@ ahd_alloc_scbs(struct ahd_softc *ahd)
6875 if (ahd_dmamem_alloc(ahd, scb_data->sg_dmat, 6874 if (ahd_dmamem_alloc(ahd, scb_data->sg_dmat,
6876 (void **)&sg_map->vaddr, 6875 (void **)&sg_map->vaddr,
6877 BUS_DMA_NOWAIT, &sg_map->dmamap) != 0) { 6876 BUS_DMA_NOWAIT, &sg_map->dmamap) != 0) {
6878 free(sg_map, M_DEVBUF); 6877 kfree(sg_map);
6879 return; 6878 return;
6880 } 6879 }
6881 6880
@@ -6891,7 +6890,7 @@ ahd_alloc_scbs(struct ahd_softc *ahd)
6891 ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd); 6890 ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd);
6892#ifdef AHD_DEBUG 6891#ifdef AHD_DEBUG
6893 if (ahd_debug & AHD_SHOW_MEMORY) 6892 if (ahd_debug & AHD_SHOW_MEMORY)
6894 printf("Mapped SG data\n"); 6893 printk("Mapped SG data\n");
6895#endif 6894#endif
6896 } 6895 }
6897 6896
@@ -6903,7 +6902,7 @@ ahd_alloc_scbs(struct ahd_softc *ahd)
6903 sense_data = sense_map->vaddr + offset; 6902 sense_data = sense_map->vaddr + offset;
6904 sense_busaddr = sense_map->physaddr + offset; 6903 sense_busaddr = sense_map->physaddr + offset;
6905 } else { 6904 } else {
6906 sense_map = malloc(sizeof(*sense_map), M_DEVBUF, M_NOWAIT); 6905 sense_map = kmalloc(sizeof(*sense_map), GFP_ATOMIC);
6907 6906
6908 if (sense_map == NULL) 6907 if (sense_map == NULL)
6909 return; 6908 return;
@@ -6912,7 +6911,7 @@ ahd_alloc_scbs(struct ahd_softc *ahd)
6912 if (ahd_dmamem_alloc(ahd, scb_data->sense_dmat, 6911 if (ahd_dmamem_alloc(ahd, scb_data->sense_dmat,
6913 (void **)&sense_map->vaddr, 6912 (void **)&sense_map->vaddr,
6914 BUS_DMA_NOWAIT, &sense_map->dmamap) != 0) { 6913 BUS_DMA_NOWAIT, &sense_map->dmamap) != 0) {
6915 free(sense_map, M_DEVBUF); 6914 kfree(sense_map);
6916 return; 6915 return;
6917 } 6916 }
6918 6917
@@ -6927,7 +6926,7 @@ ahd_alloc_scbs(struct ahd_softc *ahd)
6927 scb_data->sense_left = PAGE_SIZE / AHD_SENSE_BUFSIZE; 6926 scb_data->sense_left = PAGE_SIZE / AHD_SENSE_BUFSIZE;
6928#ifdef AHD_DEBUG 6927#ifdef AHD_DEBUG
6929 if (ahd_debug & AHD_SHOW_MEMORY) 6928 if (ahd_debug & AHD_SHOW_MEMORY)
6930 printf("Mapped sense data\n"); 6929 printk("Mapped sense data\n");
6931#endif 6930#endif
6932 } 6931 }
6933 6932
@@ -6941,15 +6940,13 @@ ahd_alloc_scbs(struct ahd_softc *ahd)
6941 int error; 6940 int error;
6942#endif 6941#endif
6943 6942
6944 next_scb = (struct scb *)malloc(sizeof(*next_scb), 6943 next_scb = kmalloc(sizeof(*next_scb), GFP_ATOMIC);
6945 M_DEVBUF, M_NOWAIT);
6946 if (next_scb == NULL) 6944 if (next_scb == NULL)
6947 break; 6945 break;
6948 6946
6949 pdata = (struct scb_platform_data *)malloc(sizeof(*pdata), 6947 pdata = kmalloc(sizeof(*pdata), GFP_ATOMIC);
6950 M_DEVBUF, M_NOWAIT);
6951 if (pdata == NULL) { 6948 if (pdata == NULL) {
6952 free(next_scb, M_DEVBUF); 6949 kfree(next_scb);
6953 break; 6950 break;
6954 } 6951 }
6955 next_scb->platform_data = pdata; 6952 next_scb->platform_data = pdata;
@@ -6979,8 +6976,8 @@ ahd_alloc_scbs(struct ahd_softc *ahd)
6979 error = ahd_dmamap_create(ahd, ahd->buffer_dmat, /*flags*/0, 6976 error = ahd_dmamap_create(ahd, ahd->buffer_dmat, /*flags*/0,
6980 &next_scb->dmamap); 6977 &next_scb->dmamap);
6981 if (error != 0) { 6978 if (error != 0) {
6982 free(next_scb, M_DEVBUF); 6979 kfree(next_scb);
6983 free(pdata, M_DEVBUF); 6980 kfree(pdata);
6984 break; 6981 break;
6985 } 6982 }
6986#endif 6983#endif
@@ -7077,8 +7074,7 @@ ahd_init(struct ahd_softc *ahd)
7077 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 7074 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
7078 7075
7079 ahd->stack_size = ahd_probe_stack_size(ahd); 7076 ahd->stack_size = ahd_probe_stack_size(ahd);
7080 ahd->saved_stack = malloc(ahd->stack_size * sizeof(uint16_t), 7077 ahd->saved_stack = kmalloc(ahd->stack_size * sizeof(uint16_t), GFP_ATOMIC);
7081 M_DEVBUF, M_NOWAIT);
7082 if (ahd->saved_stack == NULL) 7078 if (ahd->saved_stack == NULL)
7083 return (ENOMEM); 7079 return (ENOMEM);
7084 7080
@@ -7224,20 +7220,20 @@ ahd_init(struct ahd_softc *ahd)
7224 error = ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, 7220 error = ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL,
7225 CURSENSE_ENB); 7221 CURSENSE_ENB);
7226 if (error != 0) { 7222 if (error != 0) {
7227 printf("%s: current sensing timeout 1\n", ahd_name(ahd)); 7223 printk("%s: current sensing timeout 1\n", ahd_name(ahd));
7228 goto init_done; 7224 goto init_done;
7229 } 7225 }
7230 for (i = 20, fstat = FLX_FSTAT_BUSY; 7226 for (i = 20, fstat = FLX_FSTAT_BUSY;
7231 (fstat & FLX_FSTAT_BUSY) != 0 && i; i--) { 7227 (fstat & FLX_FSTAT_BUSY) != 0 && i; i--) {
7232 error = ahd_read_flexport(ahd, FLXADDR_FLEXSTAT, &fstat); 7228 error = ahd_read_flexport(ahd, FLXADDR_FLEXSTAT, &fstat);
7233 if (error != 0) { 7229 if (error != 0) {
7234 printf("%s: current sensing timeout 2\n", 7230 printk("%s: current sensing timeout 2\n",
7235 ahd_name(ahd)); 7231 ahd_name(ahd));
7236 goto init_done; 7232 goto init_done;
7237 } 7233 }
7238 } 7234 }
7239 if (i == 0) { 7235 if (i == 0) {
7240 printf("%s: Timedout during current-sensing test\n", 7236 printk("%s: Timedout during current-sensing test\n",
7241 ahd_name(ahd)); 7237 ahd_name(ahd));
7242 goto init_done; 7238 goto init_done;
7243 } 7239 }
@@ -7245,7 +7241,7 @@ ahd_init(struct ahd_softc *ahd)
7245 /* Latch Current Sensing status. */ 7241 /* Latch Current Sensing status. */
7246 error = ahd_read_flexport(ahd, FLXADDR_CURRENT_STAT, &current_sensing); 7242 error = ahd_read_flexport(ahd, FLXADDR_CURRENT_STAT, &current_sensing);
7247 if (error != 0) { 7243 if (error != 0) {
7248 printf("%s: current sensing timeout 3\n", ahd_name(ahd)); 7244 printk("%s: current sensing timeout 3\n", ahd_name(ahd));
7249 goto init_done; 7245 goto init_done;
7250 } 7246 }
7251 7247
@@ -7254,7 +7250,7 @@ ahd_init(struct ahd_softc *ahd)
7254 7250
7255#ifdef AHD_DEBUG 7251#ifdef AHD_DEBUG
7256 if ((ahd_debug & AHD_SHOW_TERMCTL) != 0) { 7252 if ((ahd_debug & AHD_SHOW_TERMCTL) != 0) {
7257 printf("%s: current_sensing == 0x%x\n", 7253 printk("%s: current_sensing == 0x%x\n",
7258 ahd_name(ahd), current_sensing); 7254 ahd_name(ahd), current_sensing);
7259 } 7255 }
7260#endif 7256#endif
@@ -7271,13 +7267,13 @@ ahd_init(struct ahd_softc *ahd)
7271 case FLX_CSTAT_OKAY: 7267 case FLX_CSTAT_OKAY:
7272 if (warn_user == 0 && bootverbose == 0) 7268 if (warn_user == 0 && bootverbose == 0)
7273 break; 7269 break;
7274 printf("%s: %s Channel %s\n", ahd_name(ahd), 7270 printk("%s: %s Channel %s\n", ahd_name(ahd),
7275 channel_strings[i], termstat_strings[term_stat]); 7271 channel_strings[i], termstat_strings[term_stat]);
7276 break; 7272 break;
7277 } 7273 }
7278 } 7274 }
7279 if (warn_user) { 7275 if (warn_user) {
7280 printf("%s: WARNING. Termination is not configured correctly.\n" 7276 printk("%s: WARNING. Termination is not configured correctly.\n"
7281 "%s: WARNING. SCSI bus operations may FAIL.\n", 7277 "%s: WARNING. SCSI bus operations may FAIL.\n",
7282 ahd_name(ahd), ahd_name(ahd)); 7278 ahd_name(ahd), ahd_name(ahd));
7283 } 7279 }
@@ -7393,7 +7389,7 @@ ahd_chip_init(struct ahd_softc *ahd)
7393 } 7389 }
7394#ifdef AHD_DEBUG 7390#ifdef AHD_DEBUG
7395 if ((ahd_debug & AHD_SHOW_MISC) != 0) 7391 if ((ahd_debug & AHD_SHOW_MISC) != 0)
7396 printf("%s: WRTBIASCTL now 0x%x\n", ahd_name(ahd), 7392 printk("%s: WRTBIASCTL now 0x%x\n", ahd_name(ahd),
7397 WRTBIASCTL_HP_DEFAULT); 7393 WRTBIASCTL_HP_DEFAULT);
7398#endif 7394#endif
7399 } 7395 }
@@ -7622,9 +7618,9 @@ ahd_chip_init(struct ahd_softc *ahd)
7622 ahd_outb(ahd, NEGCONOPTS, negodat3); 7618 ahd_outb(ahd, NEGCONOPTS, negodat3);
7623 negodat3 = ahd_inb(ahd, NEGCONOPTS); 7619 negodat3 = ahd_inb(ahd, NEGCONOPTS);
7624 if (!(negodat3 & ENSLOWCRC)) 7620 if (!(negodat3 & ENSLOWCRC))
7625 printf("aic79xx: failed to set the SLOWCRC bit\n"); 7621 printk("aic79xx: failed to set the SLOWCRC bit\n");
7626 else 7622 else
7627 printf("aic79xx: SLOWCRC bit set\n"); 7623 printk("aic79xx: SLOWCRC bit set\n");
7628 } 7624 }
7629} 7625}
7630 7626
@@ -7646,7 +7642,7 @@ ahd_default_config(struct ahd_softc *ahd)
7646 * data for any target mode initiator. 7642 * data for any target mode initiator.
7647 */ 7643 */
7648 if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) { 7644 if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) {
7649 printf("%s: unable to allocate ahd_tmode_tstate. " 7645 printk("%s: unable to allocate ahd_tmode_tstate. "
7650 "Failing attach\n", ahd_name(ahd)); 7646 "Failing attach\n", ahd_name(ahd));
7651 return (ENOMEM); 7647 return (ENOMEM);
7652 } 7648 }
@@ -7725,7 +7721,7 @@ ahd_parse_cfgdata(struct ahd_softc *ahd, struct seeprom_config *sc)
7725 * data for any target mode initiator. 7721 * data for any target mode initiator.
7726 */ 7722 */
7727 if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) { 7723 if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) {
7728 printf("%s: unable to allocate ahd_tmode_tstate. " 7724 printk("%s: unable to allocate ahd_tmode_tstate. "
7729 "Failing attach\n", ahd_name(ahd)); 7725 "Failing attach\n", ahd_name(ahd));
7730 return (ENOMEM); 7726 return (ENOMEM);
7731 } 7727 }
@@ -7795,7 +7791,7 @@ ahd_parse_cfgdata(struct ahd_softc *ahd, struct seeprom_config *sc)
7795 user_tinfo->width = MSG_EXT_WDTR_BUS_8_BIT; 7791 user_tinfo->width = MSG_EXT_WDTR_BUS_8_BIT;
7796#ifdef AHD_DEBUG 7792#ifdef AHD_DEBUG
7797 if ((ahd_debug & AHD_SHOW_MISC) != 0) 7793 if ((ahd_debug & AHD_SHOW_MISC) != 0)
7798 printf("(%d): %x:%x:%x:%x\n", targ, user_tinfo->width, 7794 printk("(%d): %x:%x:%x:%x\n", targ, user_tinfo->width,
7799 user_tinfo->period, user_tinfo->offset, 7795 user_tinfo->period, user_tinfo->offset,
7800 user_tinfo->ppr_options); 7796 user_tinfo->ppr_options);
7801#endif 7797#endif
@@ -7951,7 +7947,7 @@ ahd_pause_and_flushwork(struct ahd_softc *ahd)
7951 || (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) != 0)); 7947 || (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) != 0));
7952 7948
7953 if (maxloops == 0) { 7949 if (maxloops == 0) {
7954 printf("Infinite interrupt loop, INTSTAT = %x", 7950 printk("Infinite interrupt loop, INTSTAT = %x",
7955 ahd_inb(ahd, INTSTAT)); 7951 ahd_inb(ahd, INTSTAT));
7956 } 7952 }
7957 ahd->qfreeze_cnt++; 7953 ahd->qfreeze_cnt++;
@@ -8241,7 +8237,7 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
8241 prev_scb = NULL; 8237 prev_scb = NULL;
8242 8238
8243 if (action == SEARCH_PRINT) { 8239 if (action == SEARCH_PRINT) {
8244 printf("qinstart = %d qinfifonext = %d\nQINFIFO:", 8240 printk("qinstart = %d qinfifonext = %d\nQINFIFO:",
8245 qinstart, ahd->qinfifonext); 8241 qinstart, ahd->qinfifonext);
8246 } 8242 }
8247 8243
@@ -8256,7 +8252,7 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
8256 while (qinpos != qintail) { 8252 while (qinpos != qintail) {
8257 scb = ahd_lookup_scb(ahd, ahd->qinfifo[qinpos]); 8253 scb = ahd_lookup_scb(ahd, ahd->qinfifo[qinpos]);
8258 if (scb == NULL) { 8254 if (scb == NULL) {
8259 printf("qinpos = %d, SCB index = %d\n", 8255 printk("qinpos = %d, SCB index = %d\n",
8260 qinpos, ahd->qinfifo[qinpos]); 8256 qinpos, ahd->qinfifo[qinpos]);
8261 panic("Loop 1\n"); 8257 panic("Loop 1\n");
8262 } 8258 }
@@ -8269,13 +8265,13 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
8269 switch (action) { 8265 switch (action) {
8270 case SEARCH_COMPLETE: 8266 case SEARCH_COMPLETE:
8271 if ((scb->flags & SCB_ACTIVE) == 0) 8267 if ((scb->flags & SCB_ACTIVE) == 0)
8272 printf("Inactive SCB in qinfifo\n"); 8268 printk("Inactive SCB in qinfifo\n");
8273 ahd_done_with_status(ahd, scb, status); 8269 ahd_done_with_status(ahd, scb, status);
8274 /* FALLTHROUGH */ 8270 /* FALLTHROUGH */
8275 case SEARCH_REMOVE: 8271 case SEARCH_REMOVE:
8276 break; 8272 break;
8277 case SEARCH_PRINT: 8273 case SEARCH_PRINT:
8278 printf(" 0x%x", ahd->qinfifo[qinpos]); 8274 printk(" 0x%x", ahd->qinfifo[qinpos]);
8279 /* FALLTHROUGH */ 8275 /* FALLTHROUGH */
8280 case SEARCH_COUNT: 8276 case SEARCH_COUNT:
8281 ahd_qinfifo_requeue(ahd, prev_scb, scb); 8277 ahd_qinfifo_requeue(ahd, prev_scb, scb);
@@ -8292,7 +8288,7 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
8292 ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); 8288 ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
8293 8289
8294 if (action == SEARCH_PRINT) 8290 if (action == SEARCH_PRINT)
8295 printf("\nWAITING_TID_QUEUES:\n"); 8291 printk("\nWAITING_TID_QUEUES:\n");
8296 8292
8297 /* 8293 /*
8298 * Search waiting for selection lists. We traverse the 8294 * Search waiting for selection lists. We traverse the
@@ -8320,7 +8316,7 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
8320 panic("TID LIST LOOP"); 8316 panic("TID LIST LOOP");
8321 8317
8322 if (scbid >= ahd->scb_data.numscbs) { 8318 if (scbid >= ahd->scb_data.numscbs) {
8323 printf("%s: Waiting TID List inconsistency. " 8319 printk("%s: Waiting TID List inconsistency. "
8324 "SCB index == 0x%x, yet numscbs == 0x%x.", 8320 "SCB index == 0x%x, yet numscbs == 0x%x.",
8325 ahd_name(ahd), scbid, ahd->scb_data.numscbs); 8321 ahd_name(ahd), scbid, ahd->scb_data.numscbs);
8326 ahd_dump_card_state(ahd); 8322 ahd_dump_card_state(ahd);
@@ -8328,7 +8324,7 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
8328 } 8324 }
8329 scb = ahd_lookup_scb(ahd, scbid); 8325 scb = ahd_lookup_scb(ahd, scbid);
8330 if (scb == NULL) { 8326 if (scb == NULL) {
8331 printf("%s: SCB = 0x%x Not Active!\n", 8327 printk("%s: SCB = 0x%x Not Active!\n",
8332 ahd_name(ahd), scbid); 8328 ahd_name(ahd), scbid);
8333 panic("Waiting TID List traversal\n"); 8329 panic("Waiting TID List traversal\n");
8334 } 8330 }
@@ -8344,7 +8340,7 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
8344 * We found a list of scbs that needs to be searched. 8340 * We found a list of scbs that needs to be searched.
8345 */ 8341 */
8346 if (action == SEARCH_PRINT) 8342 if (action == SEARCH_PRINT)
8347 printf(" %d ( ", SCB_GET_TARGET(ahd, scb)); 8343 printk(" %d ( ", SCB_GET_TARGET(ahd, scb));
8348 tid_head = scbid; 8344 tid_head = scbid;
8349 found += ahd_search_scb_list(ahd, target, channel, 8345 found += ahd_search_scb_list(ahd, target, channel,
8350 lun, tag, role, status, 8346 lun, tag, role, status,
@@ -8365,14 +8361,14 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
8365 switch (action) { 8361 switch (action) {
8366 case SEARCH_COMPLETE: 8362 case SEARCH_COMPLETE:
8367 if ((mk_msg_scb->flags & SCB_ACTIVE) == 0) 8363 if ((mk_msg_scb->flags & SCB_ACTIVE) == 0)
8368 printf("Inactive SCB pending MK_MSG\n"); 8364 printk("Inactive SCB pending MK_MSG\n");
8369 ahd_done_with_status(ahd, mk_msg_scb, status); 8365 ahd_done_with_status(ahd, mk_msg_scb, status);
8370 /* FALLTHROUGH */ 8366 /* FALLTHROUGH */
8371 case SEARCH_REMOVE: 8367 case SEARCH_REMOVE:
8372 { 8368 {
8373 u_int tail_offset; 8369 u_int tail_offset;
8374 8370
8375 printf("Removing MK_MSG scb\n"); 8371 printk("Removing MK_MSG scb\n");
8376 8372
8377 /* 8373 /*
8378 * Reset our tail to the tail of the 8374 * Reset our tail to the tail of the
@@ -8390,7 +8386,7 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
8390 break; 8386 break;
8391 } 8387 }
8392 case SEARCH_PRINT: 8388 case SEARCH_PRINT:
8393 printf(" 0x%x", SCB_GET_TAG(scb)); 8389 printk(" 0x%x", SCB_GET_TAG(scb));
8394 /* FALLTHROUGH */ 8390 /* FALLTHROUGH */
8395 case SEARCH_COUNT: 8391 case SEARCH_COUNT:
8396 break; 8392 break;
@@ -8407,7 +8403,7 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
8407 * queue with a pending MK_MESSAGE scb, we 8403 * queue with a pending MK_MESSAGE scb, we
8408 * must queue the MK_MESSAGE scb. 8404 * must queue the MK_MESSAGE scb.
8409 */ 8405 */
8410 printf("Queueing mk_msg_scb\n"); 8406 printk("Queueing mk_msg_scb\n");
8411 tid_head = ahd_inw(ahd, MK_MESSAGE_SCB); 8407 tid_head = ahd_inw(ahd, MK_MESSAGE_SCB);
8412 seq_flags2 &= ~PENDING_MK_MESSAGE; 8408 seq_flags2 &= ~PENDING_MK_MESSAGE;
8413 ahd_outb(ahd, SEQ_FLAGS2, seq_flags2); 8409 ahd_outb(ahd, SEQ_FLAGS2, seq_flags2);
@@ -8418,7 +8414,7 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
8418 if (!SCBID_IS_NULL(tid_head)) 8414 if (!SCBID_IS_NULL(tid_head))
8419 tid_prev = tid_head; 8415 tid_prev = tid_head;
8420 if (action == SEARCH_PRINT) 8416 if (action == SEARCH_PRINT)
8421 printf(")\n"); 8417 printk(")\n");
8422 } 8418 }
8423 8419
8424 /* Restore saved state. */ 8420 /* Restore saved state. */
@@ -8446,7 +8442,7 @@ ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel,
8446 *list_tail = SCB_LIST_NULL; 8442 *list_tail = SCB_LIST_NULL;
8447 for (scbid = next; !SCBID_IS_NULL(scbid); scbid = next) { 8443 for (scbid = next; !SCBID_IS_NULL(scbid); scbid = next) {
8448 if (scbid >= ahd->scb_data.numscbs) { 8444 if (scbid >= ahd->scb_data.numscbs) {
8449 printf("%s:SCB List inconsistency. " 8445 printk("%s:SCB List inconsistency. "
8450 "SCB == 0x%x, yet numscbs == 0x%x.", 8446 "SCB == 0x%x, yet numscbs == 0x%x.",
8451 ahd_name(ahd), scbid, ahd->scb_data.numscbs); 8447 ahd_name(ahd), scbid, ahd->scb_data.numscbs);
8452 ahd_dump_card_state(ahd); 8448 ahd_dump_card_state(ahd);
@@ -8454,7 +8450,7 @@ ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel,
8454 } 8450 }
8455 scb = ahd_lookup_scb(ahd, scbid); 8451 scb = ahd_lookup_scb(ahd, scbid);
8456 if (scb == NULL) { 8452 if (scb == NULL) {
8457 printf("%s: SCB = %d Not Active!\n", 8453 printk("%s: SCB = %d Not Active!\n",
8458 ahd_name(ahd), scbid); 8454 ahd_name(ahd), scbid);
8459 panic("Waiting List traversal\n"); 8455 panic("Waiting List traversal\n");
8460 } 8456 }
@@ -8470,7 +8466,7 @@ ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel,
8470 switch (action) { 8466 switch (action) {
8471 case SEARCH_COMPLETE: 8467 case SEARCH_COMPLETE:
8472 if ((scb->flags & SCB_ACTIVE) == 0) 8468 if ((scb->flags & SCB_ACTIVE) == 0)
8473 printf("Inactive SCB in Waiting List\n"); 8469 printk("Inactive SCB in Waiting List\n");
8474 ahd_done_with_status(ahd, scb, status); 8470 ahd_done_with_status(ahd, scb, status);
8475 /* FALLTHROUGH */ 8471 /* FALLTHROUGH */
8476 case SEARCH_REMOVE: 8472 case SEARCH_REMOVE:
@@ -8480,7 +8476,7 @@ ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel,
8480 *list_head = next; 8476 *list_head = next;
8481 break; 8477 break;
8482 case SEARCH_PRINT: 8478 case SEARCH_PRINT:
8483 printf("0x%x ", scbid); 8479 printk("0x%x ", scbid);
8484 case SEARCH_COUNT: 8480 case SEARCH_COUNT:
8485 prev = scbid; 8481 prev = scbid;
8486 break; 8482 break;
@@ -8668,7 +8664,7 @@ ahd_abort_scbs(struct ahd_softc *ahd, int target, char channel,
8668 if (ahd_get_transaction_status(scbp) != CAM_REQ_CMP) 8664 if (ahd_get_transaction_status(scbp) != CAM_REQ_CMP)
8669 ahd_freeze_scb(scbp); 8665 ahd_freeze_scb(scbp);
8670 if ((scbp->flags & SCB_ACTIVE) == 0) 8666 if ((scbp->flags & SCB_ACTIVE) == 0)
8671 printf("Inactive SCB on pending list\n"); 8667 printk("Inactive SCB on pending list\n");
8672 ahd_done(ahd, scbp); 8668 ahd_done(ahd, scbp);
8673 found++; 8669 found++;
8674 } 8670 }
@@ -8725,7 +8721,7 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
8725 * Check if the last bus reset is cleared 8721 * Check if the last bus reset is cleared
8726 */ 8722 */
8727 if (ahd->flags & AHD_BUS_RESET_ACTIVE) { 8723 if (ahd->flags & AHD_BUS_RESET_ACTIVE) {
8728 printf("%s: bus reset still active\n", 8724 printk("%s: bus reset still active\n",
8729 ahd_name(ahd)); 8725 ahd_name(ahd));
8730 return 0; 8726 return 0;
8731 } 8727 }
@@ -8900,7 +8896,7 @@ ahd_stat_timer(void *arg)
8900 ahd_enable_coalescing(ahd, enint_coal); 8896 ahd_enable_coalescing(ahd, enint_coal);
8901#ifdef AHD_DEBUG 8897#ifdef AHD_DEBUG
8902 if ((ahd_debug & AHD_SHOW_INT_COALESCING) != 0) 8898 if ((ahd_debug & AHD_SHOW_INT_COALESCING) != 0)
8903 printf("%s: Interrupt coalescing " 8899 printk("%s: Interrupt coalescing "
8904 "now %sabled. Cmds %d\n", 8900 "now %sabled. Cmds %d\n",
8905 ahd_name(ahd), 8901 ahd_name(ahd),
8906 (enint_coal & ENINT_COALESCE) ? "en" : "dis", 8902 (enint_coal & ENINT_COALESCE) ? "en" : "dis",
@@ -8975,9 +8971,9 @@ ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb)
8975#ifdef AHD_DEBUG 8971#ifdef AHD_DEBUG
8976 if ((ahd_debug & AHD_SHOW_SENSE) != 0) { 8972 if ((ahd_debug & AHD_SHOW_SENSE) != 0) {
8977 ahd_print_path(ahd, scb); 8973 ahd_print_path(ahd, scb);
8978 printf("SCB 0x%x Received PKT Status of 0x%x\n", 8974 printk("SCB 0x%x Received PKT Status of 0x%x\n",
8979 SCB_GET_TAG(scb), siu->status); 8975 SCB_GET_TAG(scb), siu->status);
8980 printf("\tflags = 0x%x, sense len = 0x%x, " 8976 printk("\tflags = 0x%x, sense len = 0x%x, "
8981 "pktfail = 0x%x\n", 8977 "pktfail = 0x%x\n",
8982 siu->flags, scsi_4btoul(siu->sense_length), 8978 siu->flags, scsi_4btoul(siu->sense_length),
8983 scsi_4btoul(siu->pkt_failures_length)); 8979 scsi_4btoul(siu->pkt_failures_length));
@@ -8986,27 +8982,27 @@ ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb)
8986 if ((siu->flags & SIU_RSPVALID) != 0) { 8982 if ((siu->flags & SIU_RSPVALID) != 0) {
8987 ahd_print_path(ahd, scb); 8983 ahd_print_path(ahd, scb);
8988 if (scsi_4btoul(siu->pkt_failures_length) < 4) { 8984 if (scsi_4btoul(siu->pkt_failures_length) < 4) {
8989 printf("Unable to parse pkt_failures\n"); 8985 printk("Unable to parse pkt_failures\n");
8990 } else { 8986 } else {
8991 8987
8992 switch (SIU_PKTFAIL_CODE(siu)) { 8988 switch (SIU_PKTFAIL_CODE(siu)) {
8993 case SIU_PFC_NONE: 8989 case SIU_PFC_NONE:
8994 printf("No packet failure found\n"); 8990 printk("No packet failure found\n");
8995 break; 8991 break;
8996 case SIU_PFC_CIU_FIELDS_INVALID: 8992 case SIU_PFC_CIU_FIELDS_INVALID:
8997 printf("Invalid Command IU Field\n"); 8993 printk("Invalid Command IU Field\n");
8998 break; 8994 break;
8999 case SIU_PFC_TMF_NOT_SUPPORTED: 8995 case SIU_PFC_TMF_NOT_SUPPORTED:
9000 printf("TMF not supportd\n"); 8996 printk("TMF not supportd\n");
9001 break; 8997 break;
9002 case SIU_PFC_TMF_FAILED: 8998 case SIU_PFC_TMF_FAILED:
9003 printf("TMF failed\n"); 8999 printk("TMF failed\n");
9004 break; 9000 break;
9005 case SIU_PFC_INVALID_TYPE_CODE: 9001 case SIU_PFC_INVALID_TYPE_CODE:
9006 printf("Invalid L_Q Type code\n"); 9002 printk("Invalid L_Q Type code\n");
9007 break; 9003 break;
9008 case SIU_PFC_ILLEGAL_REQUEST: 9004 case SIU_PFC_ILLEGAL_REQUEST:
9009 printf("Illegal request\n"); 9005 printk("Illegal request\n");
9010 default: 9006 default:
9011 break; 9007 break;
9012 } 9008 }
@@ -9019,7 +9015,7 @@ ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb)
9019 scb->flags |= SCB_PKT_SENSE; 9015 scb->flags |= SCB_PKT_SENSE;
9020#ifdef AHD_DEBUG 9016#ifdef AHD_DEBUG
9021 if ((ahd_debug & AHD_SHOW_SENSE) != 0) 9017 if ((ahd_debug & AHD_SHOW_SENSE) != 0)
9022 printf("Sense data available\n"); 9018 printk("Sense data available\n");
9023#endif 9019#endif
9024 } 9020 }
9025 ahd_done(ahd, scb); 9021 ahd_done(ahd, scb);
@@ -9037,7 +9033,7 @@ ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb)
9037#ifdef AHD_DEBUG 9033#ifdef AHD_DEBUG
9038 if (ahd_debug & AHD_SHOW_SENSE) { 9034 if (ahd_debug & AHD_SHOW_SENSE) {
9039 ahd_print_path(ahd, scb); 9035 ahd_print_path(ahd, scb);
9040 printf("SCB %d: requests Check Status\n", 9036 printk("SCB %d: requests Check Status\n",
9041 SCB_GET_TAG(scb)); 9037 SCB_GET_TAG(scb));
9042 } 9038 }
9043#endif 9039#endif
@@ -9065,7 +9061,7 @@ ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb)
9065#ifdef AHD_DEBUG 9061#ifdef AHD_DEBUG
9066 if (ahd_debug & AHD_SHOW_SENSE) { 9062 if (ahd_debug & AHD_SHOW_SENSE) {
9067 ahd_print_path(ahd, scb); 9063 ahd_print_path(ahd, scb);
9068 printf("Sending Sense\n"); 9064 printk("Sending Sense\n");
9069 } 9065 }
9070#endif 9066#endif
9071 scb->sg_count = 0; 9067 scb->sg_count = 0;
@@ -9117,7 +9113,7 @@ ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb)
9117 break; 9113 break;
9118 } 9114 }
9119 case SCSI_STATUS_OK: 9115 case SCSI_STATUS_OK:
9120 printf("%s: Interrupted for staus of 0???\n", 9116 printk("%s: Interrupted for staus of 0???\n",
9121 ahd_name(ahd)); 9117 ahd_name(ahd));
9122 /* FALLTHROUGH */ 9118 /* FALLTHROUGH */
9123 default: 9119 default:
@@ -9192,7 +9188,7 @@ ahd_calc_residual(struct ahd_softc *ahd, struct scb *scb)
9192 return; 9188 return;
9193 } else if ((resid_sgptr & SG_OVERRUN_RESID) != 0) { 9189 } else if ((resid_sgptr & SG_OVERRUN_RESID) != 0) {
9194 ahd_print_path(ahd, scb); 9190 ahd_print_path(ahd, scb);
9195 printf("data overrun detected Tag == 0x%x.\n", 9191 printk("data overrun detected Tag == 0x%x.\n",
9196 SCB_GET_TAG(scb)); 9192 SCB_GET_TAG(scb));
9197 ahd_freeze_devq(ahd, scb); 9193 ahd_freeze_devq(ahd, scb);
9198 ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR); 9194 ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR);
@@ -9232,7 +9228,7 @@ ahd_calc_residual(struct ahd_softc *ahd, struct scb *scb)
9232#ifdef AHD_DEBUG 9228#ifdef AHD_DEBUG
9233 if ((ahd_debug & AHD_SHOW_MISC) != 0) { 9229 if ((ahd_debug & AHD_SHOW_MISC) != 0) {
9234 ahd_print_path(ahd, scb); 9230 ahd_print_path(ahd, scb);
9235 printf("Handled %sResidual of %d bytes\n", 9231 printk("Handled %sResidual of %d bytes\n",
9236 (scb->flags & SCB_SENSE) ? "Sense " : "", resid); 9232 (scb->flags & SCB_SENSE) ? "Sense " : "", resid);
9237 } 9233 }
9238#endif 9234#endif
@@ -9272,7 +9268,7 @@ ahd_queue_lstate_event(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate,
9272 9268
9273 if (pending == AHD_TMODE_EVENT_BUFFER_SIZE) { 9269 if (pending == AHD_TMODE_EVENT_BUFFER_SIZE) {
9274 xpt_print_path(lstate->path); 9270 xpt_print_path(lstate->path);
9275 printf("immediate event %x:%x lost\n", 9271 printk("immediate event %x:%x lost\n",
9276 lstate->event_buffer[lstate->event_r_idx].event_type, 9272 lstate->event_buffer[lstate->event_r_idx].event_type,
9277 lstate->event_buffer[lstate->event_r_idx].event_arg); 9273 lstate->event_buffer[lstate->event_r_idx].event_arg);
9278 lstate->event_r_idx++; 9274 lstate->event_r_idx++;
@@ -9344,7 +9340,7 @@ ahd_dumpseq(struct ahd_softc* ahd)
9344 uint8_t ins_bytes[4]; 9340 uint8_t ins_bytes[4];
9345 9341
9346 ahd_insb(ahd, SEQRAM, ins_bytes, 4); 9342 ahd_insb(ahd, SEQRAM, ins_bytes, 4);
9347 printf("0x%08x\n", ins_bytes[0] << 24 9343 printk("0x%08x\n", ins_bytes[0] << 24
9348 | ins_bytes[1] << 16 9344 | ins_bytes[1] << 16
9349 | ins_bytes[2] << 8 9345 | ins_bytes[2] << 8
9350 | ins_bytes[3]); 9346 | ins_bytes[3]);
@@ -9372,7 +9368,7 @@ ahd_loadseq(struct ahd_softc *ahd)
9372 uint8_t download_consts[DOWNLOAD_CONST_COUNT]; 9368 uint8_t download_consts[DOWNLOAD_CONST_COUNT];
9373 9369
9374 if (bootverbose) 9370 if (bootverbose)
9375 printf("%s: Downloading Sequencer Program...", 9371 printk("%s: Downloading Sequencer Program...",
9376 ahd_name(ahd)); 9372 ahd_name(ahd));
9377 9373
9378#if DOWNLOAD_CONST_COUNT != 8 9374#if DOWNLOAD_CONST_COUNT != 8
@@ -9498,7 +9494,7 @@ ahd_loadseq(struct ahd_softc *ahd)
9498 if (cs_count != 0) { 9494 if (cs_count != 0) {
9499 9495
9500 cs_count *= sizeof(struct cs); 9496 cs_count *= sizeof(struct cs);
9501 ahd->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT); 9497 ahd->critical_sections = kmalloc(cs_count, GFP_ATOMIC);
9502 if (ahd->critical_sections == NULL) 9498 if (ahd->critical_sections == NULL)
9503 panic("ahd_loadseq: Could not malloc"); 9499 panic("ahd_loadseq: Could not malloc");
9504 memcpy(ahd->critical_sections, cs_table, cs_count); 9500 memcpy(ahd->critical_sections, cs_table, cs_count);
@@ -9506,8 +9502,8 @@ ahd_loadseq(struct ahd_softc *ahd)
9506 ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE); 9502 ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE);
9507 9503
9508 if (bootverbose) { 9504 if (bootverbose) {
9509 printf(" %d instructions downloaded\n", downloaded); 9505 printk(" %d instructions downloaded\n", downloaded);
9510 printf("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n", 9506 printk("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n",
9511 ahd_name(ahd), ahd->features, ahd->bugs, ahd->flags); 9507 ahd_name(ahd), ahd->features, ahd->bugs, ahd->flags);
9512 } 9508 }
9513} 9509}
@@ -9690,12 +9686,12 @@ ahd_print_register(const ahd_reg_parse_entry_t *table, u_int num_entries,
9690 u_int printed_mask; 9686 u_int printed_mask;
9691 9687
9692 if (cur_column != NULL && *cur_column >= wrap_point) { 9688 if (cur_column != NULL && *cur_column >= wrap_point) {
9693 printf("\n"); 9689 printk("\n");
9694 *cur_column = 0; 9690 *cur_column = 0;
9695 } 9691 }
9696 printed = printf("%s[0x%x]", name, value); 9692 printed = printk("%s[0x%x]", name, value);
9697 if (table == NULL) { 9693 if (table == NULL) {
9698 printed += printf(" "); 9694 printed += printk(" ");
9699 *cur_column += printed; 9695 *cur_column += printed;
9700 return (printed); 9696 return (printed);
9701 } 9697 }
@@ -9710,7 +9706,7 @@ ahd_print_register(const ahd_reg_parse_entry_t *table, u_int num_entries,
9710 == table[entry].mask)) 9706 == table[entry].mask))
9711 continue; 9707 continue;
9712 9708
9713 printed += printf("%s%s", 9709 printed += printk("%s%s",
9714 printed_mask == 0 ? ":(" : "|", 9710 printed_mask == 0 ? ":(" : "|",
9715 table[entry].name); 9711 table[entry].name);
9716 printed_mask |= table[entry].mask; 9712 printed_mask |= table[entry].mask;
@@ -9721,9 +9717,9 @@ ahd_print_register(const ahd_reg_parse_entry_t *table, u_int num_entries,
9721 break; 9717 break;
9722 } 9718 }
9723 if (printed_mask != 0) 9719 if (printed_mask != 0)
9724 printed += printf(") "); 9720 printed += printk(") ");
9725 else 9721 else
9726 printed += printf(" "); 9722 printed += printk(" ");
9727 if (cur_column != NULL) 9723 if (cur_column != NULL)
9728 *cur_column += printed; 9724 *cur_column += printed;
9729 return (printed); 9725 return (printed);
@@ -9749,17 +9745,17 @@ ahd_dump_card_state(struct ahd_softc *ahd)
9749 } 9745 }
9750 saved_modes = ahd_save_modes(ahd); 9746 saved_modes = ahd_save_modes(ahd);
9751 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 9747 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
9752 printf(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n" 9748 printk(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n"
9753 "%s: Dumping Card State at program address 0x%x Mode 0x%x\n", 9749 "%s: Dumping Card State at program address 0x%x Mode 0x%x\n",
9754 ahd_name(ahd), 9750 ahd_name(ahd),
9755 ahd_inw(ahd, CURADDR), 9751 ahd_inw(ahd, CURADDR),
9756 ahd_build_mode_state(ahd, ahd->saved_src_mode, 9752 ahd_build_mode_state(ahd, ahd->saved_src_mode,
9757 ahd->saved_dst_mode)); 9753 ahd->saved_dst_mode));
9758 if (paused) 9754 if (paused)
9759 printf("Card was paused\n"); 9755 printk("Card was paused\n");
9760 9756
9761 if (ahd_check_cmdcmpltqueues(ahd)) 9757 if (ahd_check_cmdcmpltqueues(ahd))
9762 printf("Completions are pending\n"); 9758 printk("Completions are pending\n");
9763 9759
9764 /* 9760 /*
9765 * Mode independent registers. 9761 * Mode independent registers.
@@ -9801,8 +9797,8 @@ ahd_dump_card_state(struct ahd_softc *ahd)
9801 ahd_lqostat0_print(ahd_inb(ahd, LQOSTAT0), &cur_col, 50); 9797 ahd_lqostat0_print(ahd_inb(ahd, LQOSTAT0), &cur_col, 50);
9802 ahd_lqostat1_print(ahd_inb(ahd, LQOSTAT1), &cur_col, 50); 9798 ahd_lqostat1_print(ahd_inb(ahd, LQOSTAT1), &cur_col, 50);
9803 ahd_lqostat2_print(ahd_inb(ahd, LQOSTAT2), &cur_col, 50); 9799 ahd_lqostat2_print(ahd_inb(ahd, LQOSTAT2), &cur_col, 50);
9804 printf("\n"); 9800 printk("\n");
9805 printf("\nSCB Count = %d CMDS_PENDING = %d LASTSCB 0x%x " 9801 printk("\nSCB Count = %d CMDS_PENDING = %d LASTSCB 0x%x "
9806 "CURRSCB 0x%x NEXTSCB 0x%x\n", 9802 "CURRSCB 0x%x NEXTSCB 0x%x\n",
9807 ahd->scb_data.numscbs, ahd_inw(ahd, CMDS_PENDING), 9803 ahd->scb_data.numscbs, ahd_inw(ahd, CMDS_PENDING),
9808 ahd_inw(ahd, LASTSCB), ahd_inw(ahd, CURRSCB), 9804 ahd_inw(ahd, LASTSCB), ahd_inw(ahd, CURRSCB),
@@ -9813,12 +9809,12 @@ ahd_dump_card_state(struct ahd_softc *ahd)
9813 CAM_LUN_WILDCARD, SCB_LIST_NULL, 9809 CAM_LUN_WILDCARD, SCB_LIST_NULL,
9814 ROLE_UNKNOWN, /*status*/0, SEARCH_PRINT); 9810 ROLE_UNKNOWN, /*status*/0, SEARCH_PRINT);
9815 saved_scb_index = ahd_get_scbptr(ahd); 9811 saved_scb_index = ahd_get_scbptr(ahd);
9816 printf("Pending list:"); 9812 printk("Pending list:");
9817 i = 0; 9813 i = 0;
9818 LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { 9814 LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
9819 if (i++ > AHD_SCB_MAX) 9815 if (i++ > AHD_SCB_MAX)
9820 break; 9816 break;
9821 cur_col = printf("\n%3d FIFO_USE[0x%x] ", SCB_GET_TAG(scb), 9817 cur_col = printk("\n%3d FIFO_USE[0x%x] ", SCB_GET_TAG(scb),
9822 ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT)); 9818 ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT));
9823 ahd_set_scbptr(ahd, SCB_GET_TAG(scb)); 9819 ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
9824 ahd_scb_control_print(ahd_inb_scbram(ahd, SCB_CONTROL), 9820 ahd_scb_control_print(ahd_inb_scbram(ahd, SCB_CONTROL),
@@ -9826,16 +9822,16 @@ ahd_dump_card_state(struct ahd_softc *ahd)
9826 ahd_scb_scsiid_print(ahd_inb_scbram(ahd, SCB_SCSIID), 9822 ahd_scb_scsiid_print(ahd_inb_scbram(ahd, SCB_SCSIID),
9827 &cur_col, 60); 9823 &cur_col, 60);
9828 } 9824 }
9829 printf("\nTotal %d\n", i); 9825 printk("\nTotal %d\n", i);
9830 9826
9831 printf("Kernel Free SCB list: "); 9827 printk("Kernel Free SCB list: ");
9832 i = 0; 9828 i = 0;
9833 TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { 9829 TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) {
9834 struct scb *list_scb; 9830 struct scb *list_scb;
9835 9831
9836 list_scb = scb; 9832 list_scb = scb;
9837 do { 9833 do {
9838 printf("%d ", SCB_GET_TAG(list_scb)); 9834 printk("%d ", SCB_GET_TAG(list_scb));
9839 list_scb = LIST_NEXT(list_scb, collision_links); 9835 list_scb = LIST_NEXT(list_scb, collision_links);
9840 } while (list_scb && i++ < AHD_SCB_MAX); 9836 } while (list_scb && i++ < AHD_SCB_MAX);
9841 } 9837 }
@@ -9843,49 +9839,49 @@ ahd_dump_card_state(struct ahd_softc *ahd)
9843 LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) { 9839 LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) {
9844 if (i++ > AHD_SCB_MAX) 9840 if (i++ > AHD_SCB_MAX)
9845 break; 9841 break;
9846 printf("%d ", SCB_GET_TAG(scb)); 9842 printk("%d ", SCB_GET_TAG(scb));
9847 } 9843 }
9848 printf("\n"); 9844 printk("\n");
9849 9845
9850 printf("Sequencer Complete DMA-inprog list: "); 9846 printk("Sequencer Complete DMA-inprog list: ");
9851 scb_index = ahd_inw(ahd, COMPLETE_SCB_DMAINPROG_HEAD); 9847 scb_index = ahd_inw(ahd, COMPLETE_SCB_DMAINPROG_HEAD);
9852 i = 0; 9848 i = 0;
9853 while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { 9849 while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) {
9854 ahd_set_scbptr(ahd, scb_index); 9850 ahd_set_scbptr(ahd, scb_index);
9855 printf("%d ", scb_index); 9851 printk("%d ", scb_index);
9856 scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); 9852 scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
9857 } 9853 }
9858 printf("\n"); 9854 printk("\n");
9859 9855
9860 printf("Sequencer Complete list: "); 9856 printk("Sequencer Complete list: ");
9861 scb_index = ahd_inw(ahd, COMPLETE_SCB_HEAD); 9857 scb_index = ahd_inw(ahd, COMPLETE_SCB_HEAD);
9862 i = 0; 9858 i = 0;
9863 while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { 9859 while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) {
9864 ahd_set_scbptr(ahd, scb_index); 9860 ahd_set_scbptr(ahd, scb_index);
9865 printf("%d ", scb_index); 9861 printk("%d ", scb_index);
9866 scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); 9862 scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
9867 } 9863 }
9868 printf("\n"); 9864 printk("\n");
9869 9865
9870 9866
9871 printf("Sequencer DMA-Up and Complete list: "); 9867 printk("Sequencer DMA-Up and Complete list: ");
9872 scb_index = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); 9868 scb_index = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD);
9873 i = 0; 9869 i = 0;
9874 while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { 9870 while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) {
9875 ahd_set_scbptr(ahd, scb_index); 9871 ahd_set_scbptr(ahd, scb_index);
9876 printf("%d ", scb_index); 9872 printk("%d ", scb_index);
9877 scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); 9873 scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
9878 } 9874 }
9879 printf("\n"); 9875 printk("\n");
9880 printf("Sequencer On QFreeze and Complete list: "); 9876 printk("Sequencer On QFreeze and Complete list: ");
9881 scb_index = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD); 9877 scb_index = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD);
9882 i = 0; 9878 i = 0;
9883 while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { 9879 while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) {
9884 ahd_set_scbptr(ahd, scb_index); 9880 ahd_set_scbptr(ahd, scb_index);
9885 printf("%d ", scb_index); 9881 printk("%d ", scb_index);
9886 scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); 9882 scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
9887 } 9883 }
9888 printf("\n"); 9884 printk("\n");
9889 ahd_set_scbptr(ahd, saved_scb_index); 9885 ahd_set_scbptr(ahd, saved_scb_index);
9890 dffstat = ahd_inb(ahd, DFFSTAT); 9886 dffstat = ahd_inb(ahd, DFFSTAT);
9891 for (i = 0; i < 2; i++) { 9887 for (i = 0; i < 2; i++) {
@@ -9896,7 +9892,7 @@ ahd_dump_card_state(struct ahd_softc *ahd)
9896 9892
9897 ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i); 9893 ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i);
9898 fifo_scbptr = ahd_get_scbptr(ahd); 9894 fifo_scbptr = ahd_get_scbptr(ahd);
9899 printf("\n\n%s: FIFO%d %s, LONGJMP == 0x%x, SCB 0x%x\n", 9895 printk("\n\n%s: FIFO%d %s, LONGJMP == 0x%x, SCB 0x%x\n",
9900 ahd_name(ahd), i, 9896 ahd_name(ahd), i,
9901 (dffstat & (FIFO0FREE << i)) ? "Free" : "Active", 9897 (dffstat & (FIFO0FREE << i)) ? "Free" : "Active",
9902 ahd_inw(ahd, LONGJMP_ADDR), fifo_scbptr); 9898 ahd_inw(ahd, LONGJMP_ADDR), fifo_scbptr);
@@ -9912,20 +9908,20 @@ ahd_dump_card_state(struct ahd_softc *ahd)
9912 ahd_soffcnt_print(ahd_inb(ahd, SOFFCNT), &cur_col, 50); 9908 ahd_soffcnt_print(ahd_inb(ahd, SOFFCNT), &cur_col, 50);
9913 ahd_mdffstat_print(ahd_inb(ahd, MDFFSTAT), &cur_col, 50); 9909 ahd_mdffstat_print(ahd_inb(ahd, MDFFSTAT), &cur_col, 50);
9914 if (cur_col > 50) { 9910 if (cur_col > 50) {
9915 printf("\n"); 9911 printk("\n");
9916 cur_col = 0; 9912 cur_col = 0;
9917 } 9913 }
9918 cur_col += printf("SHADDR = 0x%x%x, SHCNT = 0x%x ", 9914 cur_col += printk("SHADDR = 0x%x%x, SHCNT = 0x%x ",
9919 ahd_inl(ahd, SHADDR+4), 9915 ahd_inl(ahd, SHADDR+4),
9920 ahd_inl(ahd, SHADDR), 9916 ahd_inl(ahd, SHADDR),
9921 (ahd_inb(ahd, SHCNT) 9917 (ahd_inb(ahd, SHCNT)
9922 | (ahd_inb(ahd, SHCNT + 1) << 8) 9918 | (ahd_inb(ahd, SHCNT + 1) << 8)
9923 | (ahd_inb(ahd, SHCNT + 2) << 16))); 9919 | (ahd_inb(ahd, SHCNT + 2) << 16)));
9924 if (cur_col > 50) { 9920 if (cur_col > 50) {
9925 printf("\n"); 9921 printk("\n");
9926 cur_col = 0; 9922 cur_col = 0;
9927 } 9923 }
9928 cur_col += printf("HADDR = 0x%x%x, HCNT = 0x%x ", 9924 cur_col += printk("HADDR = 0x%x%x, HCNT = 0x%x ",
9929 ahd_inl(ahd, HADDR+4), 9925 ahd_inl(ahd, HADDR+4),
9930 ahd_inl(ahd, HADDR), 9926 ahd_inl(ahd, HADDR),
9931 (ahd_inb(ahd, HCNT) 9927 (ahd_inb(ahd, HCNT)
@@ -9940,52 +9936,52 @@ ahd_dump_card_state(struct ahd_softc *ahd)
9940 } 9936 }
9941#endif 9937#endif
9942 } 9938 }
9943 printf("\nLQIN: "); 9939 printk("\nLQIN: ");
9944 for (i = 0; i < 20; i++) 9940 for (i = 0; i < 20; i++)
9945 printf("0x%x ", ahd_inb(ahd, LQIN + i)); 9941 printk("0x%x ", ahd_inb(ahd, LQIN + i));
9946 printf("\n"); 9942 printk("\n");
9947 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 9943 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
9948 printf("%s: LQISTATE = 0x%x, LQOSTATE = 0x%x, OPTIONMODE = 0x%x\n", 9944 printk("%s: LQISTATE = 0x%x, LQOSTATE = 0x%x, OPTIONMODE = 0x%x\n",
9949 ahd_name(ahd), ahd_inb(ahd, LQISTATE), ahd_inb(ahd, LQOSTATE), 9945 ahd_name(ahd), ahd_inb(ahd, LQISTATE), ahd_inb(ahd, LQOSTATE),
9950 ahd_inb(ahd, OPTIONMODE)); 9946 ahd_inb(ahd, OPTIONMODE));
9951 printf("%s: OS_SPACE_CNT = 0x%x MAXCMDCNT = 0x%x\n", 9947 printk("%s: OS_SPACE_CNT = 0x%x MAXCMDCNT = 0x%x\n",
9952 ahd_name(ahd), ahd_inb(ahd, OS_SPACE_CNT), 9948 ahd_name(ahd), ahd_inb(ahd, OS_SPACE_CNT),
9953 ahd_inb(ahd, MAXCMDCNT)); 9949 ahd_inb(ahd, MAXCMDCNT));
9954 printf("%s: SAVED_SCSIID = 0x%x SAVED_LUN = 0x%x\n", 9950 printk("%s: SAVED_SCSIID = 0x%x SAVED_LUN = 0x%x\n",
9955 ahd_name(ahd), ahd_inb(ahd, SAVED_SCSIID), 9951 ahd_name(ahd), ahd_inb(ahd, SAVED_SCSIID),
9956 ahd_inb(ahd, SAVED_LUN)); 9952 ahd_inb(ahd, SAVED_LUN));
9957 ahd_simode0_print(ahd_inb(ahd, SIMODE0), &cur_col, 50); 9953 ahd_simode0_print(ahd_inb(ahd, SIMODE0), &cur_col, 50);
9958 printf("\n"); 9954 printk("\n");
9959 ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); 9955 ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
9960 cur_col = 0; 9956 cur_col = 0;
9961 ahd_ccscbctl_print(ahd_inb(ahd, CCSCBCTL), &cur_col, 50); 9957 ahd_ccscbctl_print(ahd_inb(ahd, CCSCBCTL), &cur_col, 50);
9962 printf("\n"); 9958 printk("\n");
9963 ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); 9959 ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
9964 printf("%s: REG0 == 0x%x, SINDEX = 0x%x, DINDEX = 0x%x\n", 9960 printk("%s: REG0 == 0x%x, SINDEX = 0x%x, DINDEX = 0x%x\n",
9965 ahd_name(ahd), ahd_inw(ahd, REG0), ahd_inw(ahd, SINDEX), 9961 ahd_name(ahd), ahd_inw(ahd, REG0), ahd_inw(ahd, SINDEX),
9966 ahd_inw(ahd, DINDEX)); 9962 ahd_inw(ahd, DINDEX));
9967 printf("%s: SCBPTR == 0x%x, SCB_NEXT == 0x%x, SCB_NEXT2 == 0x%x\n", 9963 printk("%s: SCBPTR == 0x%x, SCB_NEXT == 0x%x, SCB_NEXT2 == 0x%x\n",
9968 ahd_name(ahd), ahd_get_scbptr(ahd), 9964 ahd_name(ahd), ahd_get_scbptr(ahd),
9969 ahd_inw_scbram(ahd, SCB_NEXT), 9965 ahd_inw_scbram(ahd, SCB_NEXT),
9970 ahd_inw_scbram(ahd, SCB_NEXT2)); 9966 ahd_inw_scbram(ahd, SCB_NEXT2));
9971 printf("CDB %x %x %x %x %x %x\n", 9967 printk("CDB %x %x %x %x %x %x\n",
9972 ahd_inb_scbram(ahd, SCB_CDB_STORE), 9968 ahd_inb_scbram(ahd, SCB_CDB_STORE),
9973 ahd_inb_scbram(ahd, SCB_CDB_STORE+1), 9969 ahd_inb_scbram(ahd, SCB_CDB_STORE+1),
9974 ahd_inb_scbram(ahd, SCB_CDB_STORE+2), 9970 ahd_inb_scbram(ahd, SCB_CDB_STORE+2),
9975 ahd_inb_scbram(ahd, SCB_CDB_STORE+3), 9971 ahd_inb_scbram(ahd, SCB_CDB_STORE+3),
9976 ahd_inb_scbram(ahd, SCB_CDB_STORE+4), 9972 ahd_inb_scbram(ahd, SCB_CDB_STORE+4),
9977 ahd_inb_scbram(ahd, SCB_CDB_STORE+5)); 9973 ahd_inb_scbram(ahd, SCB_CDB_STORE+5));
9978 printf("STACK:"); 9974 printk("STACK:");
9979 for (i = 0; i < ahd->stack_size; i++) { 9975 for (i = 0; i < ahd->stack_size; i++) {
9980 ahd->saved_stack[i] = 9976 ahd->saved_stack[i] =
9981 ahd_inb(ahd, STACK)|(ahd_inb(ahd, STACK) << 8); 9977 ahd_inb(ahd, STACK)|(ahd_inb(ahd, STACK) << 8);
9982 printf(" 0x%x", ahd->saved_stack[i]); 9978 printk(" 0x%x", ahd->saved_stack[i]);
9983 } 9979 }
9984 for (i = ahd->stack_size-1; i >= 0; i--) { 9980 for (i = ahd->stack_size-1; i >= 0; i--) {
9985 ahd_outb(ahd, STACK, ahd->saved_stack[i] & 0xFF); 9981 ahd_outb(ahd, STACK, ahd->saved_stack[i] & 0xFF);
9986 ahd_outb(ahd, STACK, (ahd->saved_stack[i] >> 8) & 0xFF); 9982 ahd_outb(ahd, STACK, (ahd->saved_stack[i] >> 8) & 0xFF);
9987 } 9983 }
9988 printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); 9984 printk("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n");
9989 ahd_restore_modes(ahd, saved_modes); 9985 ahd_restore_modes(ahd, saved_modes);
9990 if (paused == 0) 9986 if (paused == 0)
9991 ahd_unpause(ahd); 9987 ahd_unpause(ahd);
@@ -10004,8 +10000,8 @@ ahd_dump_scbs(struct ahd_softc *ahd)
10004 saved_scb_index = ahd_get_scbptr(ahd); 10000 saved_scb_index = ahd_get_scbptr(ahd);
10005 for (i = 0; i < AHD_SCB_MAX; i++) { 10001 for (i = 0; i < AHD_SCB_MAX; i++) {
10006 ahd_set_scbptr(ahd, i); 10002 ahd_set_scbptr(ahd, i);
10007 printf("%3d", i); 10003 printk("%3d", i);
10008 printf("(CTRL 0x%x ID 0x%x N 0x%x N2 0x%x SG 0x%x, RSG 0x%x)\n", 10004 printk("(CTRL 0x%x ID 0x%x N 0x%x N2 0x%x SG 0x%x, RSG 0x%x)\n",
10009 ahd_inb_scbram(ahd, SCB_CONTROL), 10005 ahd_inb_scbram(ahd, SCB_CONTROL),
10010 ahd_inb_scbram(ahd, SCB_SCSIID), 10006 ahd_inb_scbram(ahd, SCB_SCSIID),
10011 ahd_inw_scbram(ahd, SCB_NEXT), 10007 ahd_inw_scbram(ahd, SCB_NEXT),
@@ -10013,7 +10009,7 @@ ahd_dump_scbs(struct ahd_softc *ahd)
10013 ahd_inl_scbram(ahd, SCB_SGPTR), 10009 ahd_inl_scbram(ahd, SCB_SGPTR),
10014 ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR)); 10010 ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR));
10015 } 10011 }
10016 printf("\n"); 10012 printk("\n");
10017 ahd_set_scbptr(ahd, saved_scb_index); 10013 ahd_set_scbptr(ahd, saved_scb_index);
10018 ahd_restore_modes(ahd, saved_modes); 10014 ahd_restore_modes(ahd, saved_modes);
10019} 10015}
@@ -10383,7 +10379,7 @@ ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
10383 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 10379 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
10384 u_long s; 10380 u_long s;
10385 10381
10386 printf("Configuring Target Mode\n"); 10382 printk("Configuring Target Mode\n");
10387 ahd_lock(ahd, &s); 10383 ahd_lock(ahd, &s);
10388 if (LIST_FIRST(&ahd->pending_scbs) != NULL) { 10384 if (LIST_FIRST(&ahd->pending_scbs) != NULL) {
10389 ccb->ccb_h.status = CAM_BUSY; 10385 ccb->ccb_h.status = CAM_BUSY;
@@ -10412,7 +10408,7 @@ ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
10412 /* Are we already enabled?? */ 10408 /* Are we already enabled?? */
10413 if (lstate != NULL) { 10409 if (lstate != NULL) {
10414 xpt_print_path(ccb->ccb_h.path); 10410 xpt_print_path(ccb->ccb_h.path);
10415 printf("Lun already enabled\n"); 10411 printk("Lun already enabled\n");
10416 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 10412 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
10417 return; 10413 return;
10418 } 10414 }
@@ -10424,7 +10420,7 @@ ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
10424 * specific commands. 10420 * specific commands.
10425 */ 10421 */
10426 ccb->ccb_h.status = CAM_REQ_INVALID; 10422 ccb->ccb_h.status = CAM_REQ_INVALID;
10427 printf("Non-zero Group Codes\n"); 10423 printk("Non-zero Group Codes\n");
10428 return; 10424 return;
10429 } 10425 }
10430 10426
@@ -10436,15 +10432,15 @@ ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
10436 tstate = ahd_alloc_tstate(ahd, target, channel); 10432 tstate = ahd_alloc_tstate(ahd, target, channel);
10437 if (tstate == NULL) { 10433 if (tstate == NULL) {
10438 xpt_print_path(ccb->ccb_h.path); 10434 xpt_print_path(ccb->ccb_h.path);
10439 printf("Couldn't allocate tstate\n"); 10435 printk("Couldn't allocate tstate\n");
10440 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 10436 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
10441 return; 10437 return;
10442 } 10438 }
10443 } 10439 }
10444 lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT); 10440 lstate = kmalloc(sizeof(*lstate), GFP_ATOMIC);
10445 if (lstate == NULL) { 10441 if (lstate == NULL) {
10446 xpt_print_path(ccb->ccb_h.path); 10442 xpt_print_path(ccb->ccb_h.path);
10447 printf("Couldn't allocate lstate\n"); 10443 printk("Couldn't allocate lstate\n");
10448 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 10444 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
10449 return; 10445 return;
10450 } 10446 }
@@ -10454,9 +10450,9 @@ ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
10454 xpt_path_target_id(ccb->ccb_h.path), 10450 xpt_path_target_id(ccb->ccb_h.path),
10455 xpt_path_lun_id(ccb->ccb_h.path)); 10451 xpt_path_lun_id(ccb->ccb_h.path));
10456 if (status != CAM_REQ_CMP) { 10452 if (status != CAM_REQ_CMP) {
10457 free(lstate, M_DEVBUF); 10453 kfree(lstate);
10458 xpt_print_path(ccb->ccb_h.path); 10454 xpt_print_path(ccb->ccb_h.path);
10459 printf("Couldn't allocate path\n"); 10455 printk("Couldn't allocate path\n");
10460 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 10456 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
10461 return; 10457 return;
10462 } 10458 }
@@ -10524,7 +10520,7 @@ ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
10524 ahd_unlock(ahd, &s); 10520 ahd_unlock(ahd, &s);
10525 ccb->ccb_h.status = CAM_REQ_CMP; 10521 ccb->ccb_h.status = CAM_REQ_CMP;
10526 xpt_print_path(ccb->ccb_h.path); 10522 xpt_print_path(ccb->ccb_h.path);
10527 printf("Lun now enabled for target mode\n"); 10523 printk("Lun now enabled for target mode\n");
10528 } else { 10524 } else {
10529 struct scb *scb; 10525 struct scb *scb;
10530 int i, empty; 10526 int i, empty;
@@ -10543,7 +10539,7 @@ ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
10543 ccbh = &scb->io_ctx->ccb_h; 10539 ccbh = &scb->io_ctx->ccb_h;
10544 if (ccbh->func_code == XPT_CONT_TARGET_IO 10540 if (ccbh->func_code == XPT_CONT_TARGET_IO
10545 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ 10541 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){
10546 printf("CTIO pending\n"); 10542 printk("CTIO pending\n");
10547 ccb->ccb_h.status = CAM_REQ_INVALID; 10543 ccb->ccb_h.status = CAM_REQ_INVALID;
10548 ahd_unlock(ahd, &s); 10544 ahd_unlock(ahd, &s);
10549 return; 10545 return;
@@ -10551,12 +10547,12 @@ ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
10551 } 10547 }
10552 10548
10553 if (SLIST_FIRST(&lstate->accept_tios) != NULL) { 10549 if (SLIST_FIRST(&lstate->accept_tios) != NULL) {
10554 printf("ATIOs pending\n"); 10550 printk("ATIOs pending\n");
10555 ccb->ccb_h.status = CAM_REQ_INVALID; 10551 ccb->ccb_h.status = CAM_REQ_INVALID;
10556 } 10552 }
10557 10553
10558 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { 10554 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) {
10559 printf("INOTs pending\n"); 10555 printk("INOTs pending\n");
10560 ccb->ccb_h.status = CAM_REQ_INVALID; 10556 ccb->ccb_h.status = CAM_REQ_INVALID;
10561 } 10557 }
10562 10558
@@ -10566,9 +10562,9 @@ ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
10566 } 10562 }
10567 10563
10568 xpt_print_path(ccb->ccb_h.path); 10564 xpt_print_path(ccb->ccb_h.path);
10569 printf("Target mode disabled\n"); 10565 printk("Target mode disabled\n");
10570 xpt_free_path(lstate->path); 10566 xpt_free_path(lstate->path);
10571 free(lstate, M_DEVBUF); 10567 kfree(lstate);
10572 10568
10573 ahd_pause(ahd); 10569 ahd_pause(ahd);
10574 /* Can we clean up the target too? */ 10570 /* Can we clean up the target too? */
@@ -10615,7 +10611,7 @@ ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
10615 ahd_outb(ahd, SCSISEQ1, scsiseq1); 10611 ahd_outb(ahd, SCSISEQ1, scsiseq1);
10616 10612
10617 if ((ahd->features & AHD_MULTIROLE) == 0) { 10613 if ((ahd->features & AHD_MULTIROLE) == 0) {
10618 printf("Configuring Initiator Mode\n"); 10614 printk("Configuring Initiator Mode\n");
10619 ahd->flags &= ~AHD_TARGETROLE; 10615 ahd->flags &= ~AHD_TARGETROLE;
10620 ahd->flags |= AHD_INITIATORROLE; 10616 ahd->flags |= AHD_INITIATORROLE;
10621 ahd_pause(ahd); 10617 ahd_pause(ahd);
@@ -10749,7 +10745,7 @@ ahd_handle_target_cmd(struct ahd_softc *ahd, struct target_cmd *cmd)
10749 ahd->flags &= ~AHD_TQINFIFO_BLOCKED; 10745 ahd->flags &= ~AHD_TQINFIFO_BLOCKED;
10750#ifdef AHD_DEBUG 10746#ifdef AHD_DEBUG
10751 if ((ahd_debug & AHD_SHOW_TQIN) != 0) 10747 if ((ahd_debug & AHD_SHOW_TQIN) != 0)
10752 printf("Incoming command from %d for %d:%d%s\n", 10748 printk("Incoming command from %d for %d:%d%s\n",
10753 initiator, target, lun, 10749 initiator, target, lun,
10754 lstate == ahd->black_hole ? "(Black Holed)" : ""); 10750 lstate == ahd->black_hole ? "(Black Holed)" : "");
10755#endif 10751#endif
@@ -10796,7 +10792,7 @@ ahd_handle_target_cmd(struct ahd_softc *ahd, struct target_cmd *cmd)
10796 default: 10792 default:
10797 /* Only copy the opcode. */ 10793 /* Only copy the opcode. */
10798 atio->cdb_len = 1; 10794 atio->cdb_len = 1;
10799 printf("Reserved or VU command code type encountered\n"); 10795 printk("Reserved or VU command code type encountered\n");
10800 break; 10796 break;
10801 } 10797 }
10802 10798
@@ -10813,7 +10809,7 @@ ahd_handle_target_cmd(struct ahd_softc *ahd, struct target_cmd *cmd)
10813 */ 10809 */
10814#ifdef AHD_DEBUG 10810#ifdef AHD_DEBUG
10815 if ((ahd_debug & AHD_SHOW_TQIN) != 0) 10811 if ((ahd_debug & AHD_SHOW_TQIN) != 0)
10816 printf("Received Immediate Command %d:%d:%d - %p\n", 10812 printk("Received Immediate Command %d:%d:%d - %p\n",
10817 initiator, target, lun, ahd->pending_device); 10813 initiator, target, lun, ahd->pending_device);
10818#endif 10814#endif
10819 ahd->pending_device = lstate; 10815 ahd->pending_device = lstate;
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 4c41332a354b..88ad8482ef59 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -674,7 +674,7 @@ ahd_linux_slave_alloc(struct scsi_device *sdev)
674 struct ahd_linux_device *dev; 674 struct ahd_linux_device *dev;
675 675
676 if (bootverbose) 676 if (bootverbose)
677 printf("%s: Slave Alloc %d\n", ahd_name(ahd), sdev->id); 677 printk("%s: Slave Alloc %d\n", ahd_name(ahd), sdev->id);
678 678
679 dev = scsi_transport_device_data(sdev); 679 dev = scsi_transport_device_data(sdev);
680 memset(dev, 0, sizeof(*dev)); 680 memset(dev, 0, sizeof(*dev));
@@ -798,10 +798,10 @@ ahd_linux_dev_reset(struct scsi_cmnd *cmd)
798 scmd_printk(KERN_INFO, cmd, 798 scmd_printk(KERN_INFO, cmd,
799 "Attempting to queue a TARGET RESET message:"); 799 "Attempting to queue a TARGET RESET message:");
800 800
801 printf("CDB:"); 801 printk("CDB:");
802 for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++) 802 for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
803 printf(" 0x%x", cmd->cmnd[cdb_byte]); 803 printk(" 0x%x", cmd->cmnd[cdb_byte]);
804 printf("\n"); 804 printk("\n");
805 805
806 /* 806 /*
807 * Determine if we currently own this command. 807 * Determine if we currently own this command.
@@ -857,16 +857,16 @@ ahd_linux_dev_reset(struct scsi_cmnd *cmd)
857 ahd->platform_data->eh_done = &done; 857 ahd->platform_data->eh_done = &done;
858 ahd_unlock(ahd, &flags); 858 ahd_unlock(ahd, &flags);
859 859
860 printf("%s: Device reset code sleeping\n", ahd_name(ahd)); 860 printk("%s: Device reset code sleeping\n", ahd_name(ahd));
861 if (!wait_for_completion_timeout(&done, 5 * HZ)) { 861 if (!wait_for_completion_timeout(&done, 5 * HZ)) {
862 ahd_lock(ahd, &flags); 862 ahd_lock(ahd, &flags);
863 ahd->platform_data->eh_done = NULL; 863 ahd->platform_data->eh_done = NULL;
864 ahd_unlock(ahd, &flags); 864 ahd_unlock(ahd, &flags);
865 printf("%s: Device reset timer expired (active %d)\n", 865 printk("%s: Device reset timer expired (active %d)\n",
866 ahd_name(ahd), dev->active); 866 ahd_name(ahd), dev->active);
867 retval = FAILED; 867 retval = FAILED;
868 } 868 }
869 printf("%s: Device reset returning 0x%x\n", ahd_name(ahd), retval); 869 printk("%s: Device reset returning 0x%x\n", ahd_name(ahd), retval);
870 870
871 return (retval); 871 return (retval);
872} 872}
@@ -884,7 +884,7 @@ ahd_linux_bus_reset(struct scsi_cmnd *cmd)
884 ahd = *(struct ahd_softc **)cmd->device->host->hostdata; 884 ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
885#ifdef AHD_DEBUG 885#ifdef AHD_DEBUG
886 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) 886 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
887 printf("%s: Bus reset called for cmd %p\n", 887 printk("%s: Bus reset called for cmd %p\n",
888 ahd_name(ahd), cmd); 888 ahd_name(ahd), cmd);
889#endif 889#endif
890 ahd_lock(ahd, &flags); 890 ahd_lock(ahd, &flags);
@@ -894,7 +894,7 @@ ahd_linux_bus_reset(struct scsi_cmnd *cmd)
894 ahd_unlock(ahd, &flags); 894 ahd_unlock(ahd, &flags);
895 895
896 if (bootverbose) 896 if (bootverbose)
897 printf("%s: SCSI bus reset delivered. " 897 printk("%s: SCSI bus reset delivered. "
898 "%d SCBs aborted.\n", ahd_name(ahd), found); 898 "%d SCBs aborted.\n", ahd_name(ahd), found);
899 899
900 return (SUCCESS); 900 return (SUCCESS);
@@ -935,7 +935,7 @@ ahd_dma_tag_create(struct ahd_softc *ahd, bus_dma_tag_t parent,
935{ 935{
936 bus_dma_tag_t dmat; 936 bus_dma_tag_t dmat;
937 937
938 dmat = malloc(sizeof(*dmat), M_DEVBUF, M_NOWAIT); 938 dmat = kmalloc(sizeof(*dmat), GFP_ATOMIC);
939 if (dmat == NULL) 939 if (dmat == NULL)
940 return (ENOMEM); 940 return (ENOMEM);
941 941
@@ -956,7 +956,7 @@ ahd_dma_tag_create(struct ahd_softc *ahd, bus_dma_tag_t parent,
956void 956void
957ahd_dma_tag_destroy(struct ahd_softc *ahd, bus_dma_tag_t dmat) 957ahd_dma_tag_destroy(struct ahd_softc *ahd, bus_dma_tag_t dmat)
958{ 958{
959 free(dmat, M_DEVBUF); 959 kfree(dmat);
960} 960}
961 961
962int 962int
@@ -1019,7 +1019,7 @@ ahd_linux_setup_iocell_info(u_long index, int instance, int targ, int32_t value)
1019 iocell_info = (uint8_t*)&aic79xx_iocell_info[instance]; 1019 iocell_info = (uint8_t*)&aic79xx_iocell_info[instance];
1020 iocell_info[index] = value & 0xFFFF; 1020 iocell_info[index] = value & 0xFFFF;
1021 if (bootverbose) 1021 if (bootverbose)
1022 printf("iocell[%d:%ld] = %d\n", instance, index, value); 1022 printk("iocell[%d:%ld] = %d\n", instance, index, value);
1023 } 1023 }
1024} 1024}
1025 1025
@@ -1029,7 +1029,7 @@ ahd_linux_setup_tag_info_global(char *p)
1029 int tags, i, j; 1029 int tags, i, j;
1030 1030
1031 tags = simple_strtoul(p + 1, NULL, 0) & 0xff; 1031 tags = simple_strtoul(p + 1, NULL, 0) & 0xff;
1032 printf("Setting Global Tags= %d\n", tags); 1032 printk("Setting Global Tags= %d\n", tags);
1033 1033
1034 for (i = 0; i < ARRAY_SIZE(aic79xx_tag_info); i++) { 1034 for (i = 0; i < ARRAY_SIZE(aic79xx_tag_info); i++) {
1035 for (j = 0; j < AHD_NUM_TARGETS; j++) { 1035 for (j = 0; j < AHD_NUM_TARGETS; j++) {
@@ -1047,7 +1047,7 @@ ahd_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
1047 && (targ < AHD_NUM_TARGETS)) { 1047 && (targ < AHD_NUM_TARGETS)) {
1048 aic79xx_tag_info[instance].tag_commands[targ] = value & 0x1FF; 1048 aic79xx_tag_info[instance].tag_commands[targ] = value & 0x1FF;
1049 if (bootverbose) 1049 if (bootverbose)
1050 printf("tag_info[%d:%d] = %d\n", instance, targ, value); 1050 printk("tag_info[%d:%d] = %d\n", instance, targ, value);
1051 } 1051 }
1052} 1052}
1053 1053
@@ -1088,7 +1088,7 @@ ahd_parse_brace_option(char *opt_name, char *opt_arg, char *end, int depth,
1088 if (targ == -1) 1088 if (targ == -1)
1089 targ = 0; 1089 targ = 0;
1090 } else { 1090 } else {
1091 printf("Malformed Option %s\n", 1091 printk("Malformed Option %s\n",
1092 opt_name); 1092 opt_name);
1093 done = TRUE; 1093 done = TRUE;
1094 } 1094 }
@@ -1246,7 +1246,7 @@ ahd_linux_register_host(struct ahd_softc *ahd, struct scsi_host_template *templa
1246 ahd_set_unit(ahd, ahd_linux_unit++); 1246 ahd_set_unit(ahd, ahd_linux_unit++);
1247 ahd_unlock(ahd, &s); 1247 ahd_unlock(ahd, &s);
1248 sprintf(buf, "scsi%d", host->host_no); 1248 sprintf(buf, "scsi%d", host->host_no);
1249 new_name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT); 1249 new_name = kmalloc(strlen(buf) + 1, GFP_ATOMIC);
1250 if (new_name != NULL) { 1250 if (new_name != NULL) {
1251 strcpy(new_name, buf); 1251 strcpy(new_name, buf);
1252 ahd_set_name(ahd, new_name); 1252 ahd_set_name(ahd, new_name);
@@ -1322,7 +1322,7 @@ int
1322ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg) 1322ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
1323{ 1323{
1324 ahd->platform_data = 1324 ahd->platform_data =
1325 malloc(sizeof(struct ahd_platform_data), M_DEVBUF, M_NOWAIT); 1325 kmalloc(sizeof(struct ahd_platform_data), GFP_ATOMIC);
1326 if (ahd->platform_data == NULL) 1326 if (ahd->platform_data == NULL)
1327 return (ENOMEM); 1327 return (ENOMEM);
1328 memset(ahd->platform_data, 0, sizeof(struct ahd_platform_data)); 1328 memset(ahd->platform_data, 0, sizeof(struct ahd_platform_data));
@@ -1364,7 +1364,7 @@ ahd_platform_free(struct ahd_softc *ahd)
1364 if (ahd->platform_data->host) 1364 if (ahd->platform_data->host)
1365 scsi_host_put(ahd->platform_data->host); 1365 scsi_host_put(ahd->platform_data->host);
1366 1366
1367 free(ahd->platform_data, M_DEVBUF); 1367 kfree(ahd->platform_data);
1368 } 1368 }
1369} 1369}
1370 1370
@@ -1502,7 +1502,7 @@ ahd_linux_user_tagdepth(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
1502 if (ahd->unit >= ARRAY_SIZE(aic79xx_tag_info)) { 1502 if (ahd->unit >= ARRAY_SIZE(aic79xx_tag_info)) {
1503 1503
1504 if (warned_user == 0) { 1504 if (warned_user == 0) {
1505 printf(KERN_WARNING 1505 printk(KERN_WARNING
1506"aic79xx: WARNING: Insufficient tag_info instances\n" 1506"aic79xx: WARNING: Insufficient tag_info instances\n"
1507"aic79xx: for installed controllers. Using defaults\n" 1507"aic79xx: for installed controllers. Using defaults\n"
1508"aic79xx: Please update the aic79xx_tag_info array in\n" 1508"aic79xx: Please update the aic79xx_tag_info array in\n"
@@ -1544,7 +1544,7 @@ ahd_linux_device_queue_depth(struct scsi_device *sdev)
1544 ahd_send_async(ahd, devinfo.channel, devinfo.target, 1544 ahd_send_async(ahd, devinfo.channel, devinfo.target,
1545 devinfo.lun, AC_TRANSFER_NEG); 1545 devinfo.lun, AC_TRANSFER_NEG);
1546 ahd_print_devinfo(ahd, &devinfo); 1546 ahd_print_devinfo(ahd, &devinfo);
1547 printf("Tagged Queuing enabled. Depth %d\n", tags); 1547 printk("Tagged Queuing enabled. Depth %d\n", tags);
1548 } else { 1548 } else {
1549 ahd_platform_set_tags(ahd, sdev, &devinfo, AHD_QUEUE_NONE); 1549 ahd_platform_set_tags(ahd, sdev, &devinfo, AHD_QUEUE_NONE);
1550 ahd_send_async(ahd, devinfo.channel, devinfo.target, 1550 ahd_send_async(ahd, devinfo.channel, devinfo.target,
@@ -1794,7 +1794,7 @@ ahd_done(struct ahd_softc *ahd, struct scb *scb)
1794 struct ahd_linux_device *dev; 1794 struct ahd_linux_device *dev;
1795 1795
1796 if ((scb->flags & SCB_ACTIVE) == 0) { 1796 if ((scb->flags & SCB_ACTIVE) == 0) {
1797 printf("SCB %d done'd twice\n", SCB_GET_TAG(scb)); 1797 printk("SCB %d done'd twice\n", SCB_GET_TAG(scb));
1798 ahd_dump_card_state(ahd); 1798 ahd_dump_card_state(ahd);
1799 panic("Stopping for safety"); 1799 panic("Stopping for safety");
1800 } 1800 }
@@ -1825,7 +1825,7 @@ ahd_done(struct ahd_softc *ahd, struct scb *scb)
1825#ifdef AHD_DEBUG 1825#ifdef AHD_DEBUG
1826 if ((ahd_debug & AHD_SHOW_MISC) != 0) { 1826 if ((ahd_debug & AHD_SHOW_MISC) != 0) {
1827 ahd_print_path(ahd, scb); 1827 ahd_print_path(ahd, scb);
1828 printf("Set CAM_UNCOR_PARITY\n"); 1828 printk("Set CAM_UNCOR_PARITY\n");
1829 } 1829 }
1830#endif 1830#endif
1831 ahd_set_transaction_status(scb, CAM_UNCOR_PARITY); 1831 ahd_set_transaction_status(scb, CAM_UNCOR_PARITY);
@@ -1843,12 +1843,12 @@ ahd_done(struct ahd_softc *ahd, struct scb *scb)
1843 u_int i; 1843 u_int i;
1844 1844
1845 ahd_print_path(ahd, scb); 1845 ahd_print_path(ahd, scb);
1846 printf("CDB:"); 1846 printk("CDB:");
1847 for (i = 0; i < scb->io_ctx->cmd_len; i++) 1847 for (i = 0; i < scb->io_ctx->cmd_len; i++)
1848 printf(" 0x%x", scb->io_ctx->cmnd[i]); 1848 printk(" 0x%x", scb->io_ctx->cmnd[i]);
1849 printf("\n"); 1849 printk("\n");
1850 ahd_print_path(ahd, scb); 1850 ahd_print_path(ahd, scb);
1851 printf("Saw underflow (%ld of %ld bytes). " 1851 printk("Saw underflow (%ld of %ld bytes). "
1852 "Treated as error\n", 1852 "Treated as error\n",
1853 ahd_get_residual(scb), 1853 ahd_get_residual(scb),
1854 ahd_get_transfer_length(scb)); 1854 ahd_get_transfer_length(scb));
@@ -1881,7 +1881,7 @@ ahd_done(struct ahd_softc *ahd, struct scb *scb)
1881 dev->commands_since_idle_or_otag = 0; 1881 dev->commands_since_idle_or_otag = 0;
1882 1882
1883 if ((scb->flags & SCB_RECOVERY_SCB) != 0) { 1883 if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
1884 printf("Recovery SCB completes\n"); 1884 printk("Recovery SCB completes\n");
1885 if (ahd_get_transaction_status(scb) == CAM_BDR_SENT 1885 if (ahd_get_transaction_status(scb) == CAM_BDR_SENT
1886 || ahd_get_transaction_status(scb) == CAM_REQ_ABORTED) 1886 || ahd_get_transaction_status(scb) == CAM_REQ_ABORTED)
1887 ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT); 1887 ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT);
@@ -1963,14 +1963,14 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
1963 if (ahd_debug & AHD_SHOW_SENSE) { 1963 if (ahd_debug & AHD_SHOW_SENSE) {
1964 int i; 1964 int i;
1965 1965
1966 printf("Copied %d bytes of sense data at %d:", 1966 printk("Copied %d bytes of sense data at %d:",
1967 sense_size, sense_offset); 1967 sense_size, sense_offset);
1968 for (i = 0; i < sense_size; i++) { 1968 for (i = 0; i < sense_size; i++) {
1969 if ((i & 0xF) == 0) 1969 if ((i & 0xF) == 0)
1970 printf("\n"); 1970 printk("\n");
1971 printf("0x%x ", cmd->sense_buffer[i]); 1971 printk("0x%x ", cmd->sense_buffer[i]);
1972 } 1972 }
1973 printf("\n"); 1973 printk("\n");
1974 } 1974 }
1975#endif 1975#endif
1976 } 1976 }
@@ -1995,7 +1995,7 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
1995#ifdef AHD_DEBUG 1995#ifdef AHD_DEBUG
1996 if ((ahd_debug & AHD_SHOW_QFULL) != 0) { 1996 if ((ahd_debug & AHD_SHOW_QFULL) != 0) {
1997 ahd_print_path(ahd, scb); 1997 ahd_print_path(ahd, scb);
1998 printf("Dropping tag count to %d\n", 1998 printk("Dropping tag count to %d\n",
1999 dev->active); 1999 dev->active);
2000 } 2000 }
2001#endif 2001#endif
@@ -2014,7 +2014,7 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
2014 == AHD_LOCK_TAGS_COUNT) { 2014 == AHD_LOCK_TAGS_COUNT) {
2015 dev->maxtags = dev->active; 2015 dev->maxtags = dev->active;
2016 ahd_print_path(ahd, scb); 2016 ahd_print_path(ahd, scb);
2017 printf("Locking max tag count at %d\n", 2017 printk("Locking max tag count at %d\n",
2018 dev->active); 2018 dev->active);
2019 } 2019 }
2020 } else { 2020 } else {
@@ -2138,7 +2138,7 @@ ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd)
2138 } 2138 }
2139 2139
2140 if (do_fallback) { 2140 if (do_fallback) {
2141 printf("%s: device overrun (status %x) on %d:%d:%d\n", 2141 printk("%s: device overrun (status %x) on %d:%d:%d\n",
2142 ahd_name(ahd), status, cmd->device->channel, 2142 ahd_name(ahd), status, cmd->device->channel,
2143 cmd->device->id, cmd->device->lun); 2143 cmd->device->id, cmd->device->lun);
2144 } 2144 }
@@ -2187,10 +2187,10 @@ ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd)
2187 scmd_printk(KERN_INFO, cmd, 2187 scmd_printk(KERN_INFO, cmd,
2188 "Attempting to queue an ABORT message:"); 2188 "Attempting to queue an ABORT message:");
2189 2189
2190 printf("CDB:"); 2190 printk("CDB:");
2191 for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++) 2191 for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
2192 printf(" 0x%x", cmd->cmnd[cdb_byte]); 2192 printk(" 0x%x", cmd->cmnd[cdb_byte]);
2193 printf("\n"); 2193 printk("\n");
2194 2194
2195 ahd_lock(ahd, &flags); 2195 ahd_lock(ahd, &flags);
2196 2196
@@ -2249,7 +2249,7 @@ ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd)
2249 goto no_cmd; 2249 goto no_cmd;
2250 } 2250 }
2251 2251
2252 printf("%s: At time of recovery, card was %spaused\n", 2252 printk("%s: At time of recovery, card was %spaused\n",
2253 ahd_name(ahd), was_paused ? "" : "not "); 2253 ahd_name(ahd), was_paused ? "" : "not ");
2254 ahd_dump_card_state(ahd); 2254 ahd_dump_card_state(ahd);
2255 2255
@@ -2260,7 +2260,7 @@ ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd)
2260 pending_scb->hscb->tag, 2260 pending_scb->hscb->tag,
2261 ROLE_INITIATOR, CAM_REQ_ABORTED, 2261 ROLE_INITIATOR, CAM_REQ_ABORTED,
2262 SEARCH_COMPLETE) > 0) { 2262 SEARCH_COMPLETE) > 0) {
2263 printf("%s:%d:%d:%d: Cmd aborted from QINFIFO\n", 2263 printk("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
2264 ahd_name(ahd), cmd->device->channel, 2264 ahd_name(ahd), cmd->device->channel,
2265 cmd->device->id, cmd->device->lun); 2265 cmd->device->id, cmd->device->lun);
2266 retval = SUCCESS; 2266 retval = SUCCESS;
@@ -2355,7 +2355,7 @@ ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd)
2355 ahd_qinfifo_requeue_tail(ahd, pending_scb); 2355 ahd_qinfifo_requeue_tail(ahd, pending_scb);
2356 ahd_set_scbptr(ahd, saved_scbptr); 2356 ahd_set_scbptr(ahd, saved_scbptr);
2357 ahd_print_path(ahd, pending_scb); 2357 ahd_print_path(ahd, pending_scb);
2358 printf("Device is disconnected, re-queuing SCB\n"); 2358 printk("Device is disconnected, re-queuing SCB\n");
2359 wait = TRUE; 2359 wait = TRUE;
2360 } else { 2360 } else {
2361 scmd_printk(KERN_INFO, cmd, "Unable to deliver message\n"); 2361 scmd_printk(KERN_INFO, cmd, "Unable to deliver message\n");
@@ -2380,21 +2380,21 @@ done:
2380 ahd->platform_data->eh_done = &done; 2380 ahd->platform_data->eh_done = &done;
2381 ahd_unlock(ahd, &flags); 2381 ahd_unlock(ahd, &flags);
2382 2382
2383 printf("%s: Recovery code sleeping\n", ahd_name(ahd)); 2383 printk("%s: Recovery code sleeping\n", ahd_name(ahd));
2384 if (!wait_for_completion_timeout(&done, 5 * HZ)) { 2384 if (!wait_for_completion_timeout(&done, 5 * HZ)) {
2385 ahd_lock(ahd, &flags); 2385 ahd_lock(ahd, &flags);
2386 ahd->platform_data->eh_done = NULL; 2386 ahd->platform_data->eh_done = NULL;
2387 ahd_unlock(ahd, &flags); 2387 ahd_unlock(ahd, &flags);
2388 printf("%s: Timer Expired (active %d)\n", 2388 printk("%s: Timer Expired (active %d)\n",
2389 ahd_name(ahd), dev->active); 2389 ahd_name(ahd), dev->active);
2390 retval = FAILED; 2390 retval = FAILED;
2391 } 2391 }
2392 printf("Recovery code awake\n"); 2392 printk("Recovery code awake\n");
2393 } else 2393 } else
2394 ahd_unlock(ahd, &flags); 2394 ahd_unlock(ahd, &flags);
2395 2395
2396 if (retval != SUCCESS) 2396 if (retval != SUCCESS)
2397 printf("%s: Command abort returning 0x%x\n", 2397 printk("%s: Command abort returning 0x%x\n",
2398 ahd_name(ahd), retval); 2398 ahd_name(ahd), retval);
2399 2399
2400 return retval; 2400 return retval;
@@ -2431,7 +2431,7 @@ static void ahd_linux_set_period(struct scsi_target *starget, int period)
2431 2431
2432#ifdef AHD_DEBUG 2432#ifdef AHD_DEBUG
2433 if ((ahd_debug & AHD_SHOW_DV) != 0) 2433 if ((ahd_debug & AHD_SHOW_DV) != 0)
2434 printf("%s: set period to %d\n", ahd_name(ahd), period); 2434 printk("%s: set period to %d\n", ahd_name(ahd), period);
2435#endif 2435#endif
2436 if (offset == 0) 2436 if (offset == 0)
2437 offset = MAX_OFFSET; 2437 offset = MAX_OFFSET;
@@ -2484,7 +2484,7 @@ static void ahd_linux_set_offset(struct scsi_target *starget, int offset)
2484 2484
2485#ifdef AHD_DEBUG 2485#ifdef AHD_DEBUG
2486 if ((ahd_debug & AHD_SHOW_DV) != 0) 2486 if ((ahd_debug & AHD_SHOW_DV) != 0)
2487 printf("%s: set offset to %d\n", ahd_name(ahd), offset); 2487 printk("%s: set offset to %d\n", ahd_name(ahd), offset);
2488#endif 2488#endif
2489 2489
2490 ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, 2490 ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
@@ -2520,7 +2520,7 @@ static void ahd_linux_set_dt(struct scsi_target *starget, int dt)
2520 2520
2521#ifdef AHD_DEBUG 2521#ifdef AHD_DEBUG
2522 if ((ahd_debug & AHD_SHOW_DV) != 0) 2522 if ((ahd_debug & AHD_SHOW_DV) != 0)
2523 printf("%s: %s DT\n", ahd_name(ahd), 2523 printk("%s: %s DT\n", ahd_name(ahd),
2524 dt ? "enabling" : "disabling"); 2524 dt ? "enabling" : "disabling");
2525#endif 2525#endif
2526 if (dt && spi_max_width(starget)) { 2526 if (dt && spi_max_width(starget)) {
@@ -2562,7 +2562,7 @@ static void ahd_linux_set_qas(struct scsi_target *starget, int qas)
2562 2562
2563#ifdef AHD_DEBUG 2563#ifdef AHD_DEBUG
2564 if ((ahd_debug & AHD_SHOW_DV) != 0) 2564 if ((ahd_debug & AHD_SHOW_DV) != 0)
2565 printf("%s: %s QAS\n", ahd_name(ahd), 2565 printk("%s: %s QAS\n", ahd_name(ahd),
2566 qas ? "enabling" : "disabling"); 2566 qas ? "enabling" : "disabling");
2567#endif 2567#endif
2568 2568
@@ -2601,7 +2601,7 @@ static void ahd_linux_set_iu(struct scsi_target *starget, int iu)
2601 2601
2602#ifdef AHD_DEBUG 2602#ifdef AHD_DEBUG
2603 if ((ahd_debug & AHD_SHOW_DV) != 0) 2603 if ((ahd_debug & AHD_SHOW_DV) != 0)
2604 printf("%s: %s IU\n", ahd_name(ahd), 2604 printk("%s: %s IU\n", ahd_name(ahd),
2605 iu ? "enabling" : "disabling"); 2605 iu ? "enabling" : "disabling");
2606#endif 2606#endif
2607 2607
@@ -2641,7 +2641,7 @@ static void ahd_linux_set_rd_strm(struct scsi_target *starget, int rdstrm)
2641 2641
2642#ifdef AHD_DEBUG 2642#ifdef AHD_DEBUG
2643 if ((ahd_debug & AHD_SHOW_DV) != 0) 2643 if ((ahd_debug & AHD_SHOW_DV) != 0)
2644 printf("%s: %s Read Streaming\n", ahd_name(ahd), 2644 printk("%s: %s Read Streaming\n", ahd_name(ahd),
2645 rdstrm ? "enabling" : "disabling"); 2645 rdstrm ? "enabling" : "disabling");
2646#endif 2646#endif
2647 2647
@@ -2677,7 +2677,7 @@ static void ahd_linux_set_wr_flow(struct scsi_target *starget, int wrflow)
2677 2677
2678#ifdef AHD_DEBUG 2678#ifdef AHD_DEBUG
2679 if ((ahd_debug & AHD_SHOW_DV) != 0) 2679 if ((ahd_debug & AHD_SHOW_DV) != 0)
2680 printf("%s: %s Write Flow Control\n", ahd_name(ahd), 2680 printk("%s: %s Write Flow Control\n", ahd_name(ahd),
2681 wrflow ? "enabling" : "disabling"); 2681 wrflow ? "enabling" : "disabling");
2682#endif 2682#endif
2683 2683
@@ -2714,14 +2714,14 @@ static void ahd_linux_set_rti(struct scsi_target *starget, int rti)
2714 if ((ahd->features & AHD_RTI) == 0) { 2714 if ((ahd->features & AHD_RTI) == 0) {
2715#ifdef AHD_DEBUG 2715#ifdef AHD_DEBUG
2716 if ((ahd_debug & AHD_SHOW_DV) != 0) 2716 if ((ahd_debug & AHD_SHOW_DV) != 0)
2717 printf("%s: RTI not available\n", ahd_name(ahd)); 2717 printk("%s: RTI not available\n", ahd_name(ahd));
2718#endif 2718#endif
2719 return; 2719 return;
2720 } 2720 }
2721 2721
2722#ifdef AHD_DEBUG 2722#ifdef AHD_DEBUG
2723 if ((ahd_debug & AHD_SHOW_DV) != 0) 2723 if ((ahd_debug & AHD_SHOW_DV) != 0)
2724 printf("%s: %s RTI\n", ahd_name(ahd), 2724 printk("%s: %s RTI\n", ahd_name(ahd),
2725 rti ? "enabling" : "disabling"); 2725 rti ? "enabling" : "disabling");
2726#endif 2726#endif
2727 2727
@@ -2757,7 +2757,7 @@ static void ahd_linux_set_pcomp_en(struct scsi_target *starget, int pcomp)
2757 2757
2758#ifdef AHD_DEBUG 2758#ifdef AHD_DEBUG
2759 if ((ahd_debug & AHD_SHOW_DV) != 0) 2759 if ((ahd_debug & AHD_SHOW_DV) != 0)
2760 printf("%s: %s Precompensation\n", ahd_name(ahd), 2760 printk("%s: %s Precompensation\n", ahd_name(ahd),
2761 pcomp ? "Enable" : "Disable"); 2761 pcomp ? "Enable" : "Disable");
2762#endif 2762#endif
2763 2763
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
index 55c1fe07969f..28e43498cdff 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.h
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -363,13 +363,6 @@ struct ahd_platform_data {
363 resource_size_t mem_busaddr; /* Mem Base Addr */ 363 resource_size_t mem_busaddr; /* Mem Base Addr */
364}; 364};
365 365
366/************************** OS Utility Wrappers *******************************/
367#define printf printk
368#define M_NOWAIT GFP_ATOMIC
369#define M_WAITOK 0
370#define malloc(size, type, flags) kmalloc(size, flags)
371#define free(ptr, type) kfree(ptr)
372
373void ahd_delay(long); 366void ahd_delay(long);
374 367
375/***************************** Low Level I/O **********************************/ 368/***************************** Low Level I/O **********************************/
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
index 8f686122d54e..3c85873b14b9 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -178,7 +178,7 @@ ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
178 ahd_get_pci_bus(pci), 178 ahd_get_pci_bus(pci),
179 ahd_get_pci_slot(pci), 179 ahd_get_pci_slot(pci),
180 ahd_get_pci_function(pci)); 180 ahd_get_pci_function(pci));
181 name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT); 181 name = kmalloc(strlen(buf) + 1, GFP_ATOMIC);
182 if (name == NULL) 182 if (name == NULL)
183 return (-ENOMEM); 183 return (-ENOMEM);
184 strcpy(name, buf); 184 strcpy(name, buf);
@@ -333,7 +333,7 @@ ahd_pci_map_registers(struct ahd_softc *ahd)
333 333
334 if (ahd_pci_test_register_access(ahd) != 0) { 334 if (ahd_pci_test_register_access(ahd) != 0) {
335 335
336 printf("aic79xx: PCI Device %d:%d:%d " 336 printk("aic79xx: PCI Device %d:%d:%d "
337 "failed memory mapped test. Using PIO.\n", 337 "failed memory mapped test. Using PIO.\n",
338 ahd_get_pci_bus(ahd->dev_softc), 338 ahd_get_pci_bus(ahd->dev_softc),
339 ahd_get_pci_slot(ahd->dev_softc), 339 ahd_get_pci_slot(ahd->dev_softc),
@@ -346,7 +346,7 @@ ahd_pci_map_registers(struct ahd_softc *ahd)
346 } else 346 } else
347 command |= PCIM_CMD_MEMEN; 347 command |= PCIM_CMD_MEMEN;
348 } else if (bootverbose) { 348 } else if (bootverbose) {
349 printf("aic79xx: PCI%d:%d:%d MEM region 0x%llx " 349 printk("aic79xx: PCI%d:%d:%d MEM region 0x%llx "
350 "unavailable. Cannot memory map device.\n", 350 "unavailable. Cannot memory map device.\n",
351 ahd_get_pci_bus(ahd->dev_softc), 351 ahd_get_pci_bus(ahd->dev_softc),
352 ahd_get_pci_slot(ahd->dev_softc), 352 ahd_get_pci_slot(ahd->dev_softc),
@@ -365,7 +365,7 @@ ahd_pci_map_registers(struct ahd_softc *ahd)
365 ahd->bshs[1].ioport = (u_long)base2; 365 ahd->bshs[1].ioport = (u_long)base2;
366 command |= PCIM_CMD_PORTEN; 366 command |= PCIM_CMD_PORTEN;
367 } else { 367 } else {
368 printf("aic79xx: PCI%d:%d:%d IO regions 0x%llx and " 368 printk("aic79xx: PCI%d:%d:%d IO regions 0x%llx and "
369 "0x%llx unavailable. Cannot map device.\n", 369 "0x%llx unavailable. Cannot map device.\n",
370 ahd_get_pci_bus(ahd->dev_softc), 370 ahd_get_pci_bus(ahd->dev_softc),
371 ahd_get_pci_slot(ahd->dev_softc), 371 ahd_get_pci_slot(ahd->dev_softc),
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index 90a04a37b4f7..14b5f8d0e7f4 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -338,7 +338,7 @@ ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
338 */ 338 */
339 if ((ahd->flags & (AHD_39BIT_ADDRESSING|AHD_64BIT_ADDRESSING)) != 0) { 339 if ((ahd->flags & (AHD_39BIT_ADDRESSING|AHD_64BIT_ADDRESSING)) != 0) {
340 if (bootverbose) 340 if (bootverbose)
341 printf("%s: Enabling 39Bit Addressing\n", 341 printk("%s: Enabling 39Bit Addressing\n",
342 ahd_name(ahd)); 342 ahd_name(ahd));
343 devconfig = ahd_pci_read_config(ahd->dev_softc, 343 devconfig = ahd_pci_read_config(ahd->dev_softc,
344 DEVCONFIG, /*bytes*/4); 344 DEVCONFIG, /*bytes*/4);
@@ -528,7 +528,7 @@ ahd_check_extport(struct ahd_softc *ahd)
528 * Fetch VPD for this function and parse it. 528 * Fetch VPD for this function and parse it.
529 */ 529 */
530 if (bootverbose) 530 if (bootverbose)
531 printf("%s: Reading VPD from SEEPROM...", 531 printk("%s: Reading VPD from SEEPROM...",
532 ahd_name(ahd)); 532 ahd_name(ahd));
533 533
534 /* Address is always in units of 16bit words */ 534 /* Address is always in units of 16bit words */
@@ -541,12 +541,12 @@ ahd_check_extport(struct ahd_softc *ahd)
541 if (error == 0) 541 if (error == 0)
542 error = ahd_parse_vpddata(ahd, &vpd); 542 error = ahd_parse_vpddata(ahd, &vpd);
543 if (bootverbose) 543 if (bootverbose)
544 printf("%s: VPD parsing %s\n", 544 printk("%s: VPD parsing %s\n",
545 ahd_name(ahd), 545 ahd_name(ahd),
546 error == 0 ? "successful" : "failed"); 546 error == 0 ? "successful" : "failed");
547 547
548 if (bootverbose) 548 if (bootverbose)
549 printf("%s: Reading SEEPROM...", ahd_name(ahd)); 549 printk("%s: Reading SEEPROM...", ahd_name(ahd));
550 550
551 /* Address is always in units of 16bit words */ 551 /* Address is always in units of 16bit words */
552 start_addr = (sizeof(*sc) / 2) * (ahd->channel - 'A'); 552 start_addr = (sizeof(*sc) / 2) * (ahd->channel - 'A');
@@ -556,16 +556,16 @@ ahd_check_extport(struct ahd_softc *ahd)
556 /*bytestream*/FALSE); 556 /*bytestream*/FALSE);
557 557
558 if (error != 0) { 558 if (error != 0) {
559 printf("Unable to read SEEPROM\n"); 559 printk("Unable to read SEEPROM\n");
560 have_seeprom = 0; 560 have_seeprom = 0;
561 } else { 561 } else {
562 have_seeprom = ahd_verify_cksum(sc); 562 have_seeprom = ahd_verify_cksum(sc);
563 563
564 if (bootverbose) { 564 if (bootverbose) {
565 if (have_seeprom == 0) 565 if (have_seeprom == 0)
566 printf ("checksum error\n"); 566 printk ("checksum error\n");
567 else 567 else
568 printf ("done.\n"); 568 printk ("done.\n");
569 } 569 }
570 } 570 }
571 ahd_release_seeprom(ahd); 571 ahd_release_seeprom(ahd);
@@ -615,21 +615,21 @@ ahd_check_extport(struct ahd_softc *ahd)
615 uint16_t *sc_data; 615 uint16_t *sc_data;
616 int i; 616 int i;
617 617
618 printf("%s: Seeprom Contents:", ahd_name(ahd)); 618 printk("%s: Seeprom Contents:", ahd_name(ahd));
619 sc_data = (uint16_t *)sc; 619 sc_data = (uint16_t *)sc;
620 for (i = 0; i < (sizeof(*sc)); i += 2) 620 for (i = 0; i < (sizeof(*sc)); i += 2)
621 printf("\n\t0x%.4x", sc_data[i]); 621 printk("\n\t0x%.4x", sc_data[i]);
622 printf("\n"); 622 printk("\n");
623 } 623 }
624#endif 624#endif
625 625
626 if (!have_seeprom) { 626 if (!have_seeprom) {
627 if (bootverbose) 627 if (bootverbose)
628 printf("%s: No SEEPROM available.\n", ahd_name(ahd)); 628 printk("%s: No SEEPROM available.\n", ahd_name(ahd));
629 ahd->flags |= AHD_USEDEFAULTS; 629 ahd->flags |= AHD_USEDEFAULTS;
630 error = ahd_default_config(ahd); 630 error = ahd_default_config(ahd);
631 adapter_control = CFAUTOTERM|CFSEAUTOTERM; 631 adapter_control = CFAUTOTERM|CFSEAUTOTERM;
632 free(ahd->seep_config, M_DEVBUF); 632 kfree(ahd->seep_config);
633 ahd->seep_config = NULL; 633 ahd->seep_config = NULL;
634 } else { 634 } else {
635 error = ahd_parse_cfgdata(ahd, sc); 635 error = ahd_parse_cfgdata(ahd, sc);
@@ -656,7 +656,7 @@ ahd_configure_termination(struct ahd_softc *ahd, u_int adapter_control)
656 if ((ahd->flags & AHD_STPWLEVEL_A) != 0) 656 if ((ahd->flags & AHD_STPWLEVEL_A) != 0)
657 devconfig |= STPWLEVEL; 657 devconfig |= STPWLEVEL;
658 if (bootverbose) 658 if (bootverbose)
659 printf("%s: STPWLEVEL is %s\n", 659 printk("%s: STPWLEVEL is %s\n",
660 ahd_name(ahd), (devconfig & STPWLEVEL) ? "on" : "off"); 660 ahd_name(ahd), (devconfig & STPWLEVEL) ? "on" : "off");
661 ahd_pci_write_config(ahd->dev_softc, DEVCONFIG, devconfig, /*bytes*/4); 661 ahd_pci_write_config(ahd->dev_softc, DEVCONFIG, devconfig, /*bytes*/4);
662 662
@@ -671,7 +671,7 @@ ahd_configure_termination(struct ahd_softc *ahd, u_int adapter_control)
671 error = ahd_read_flexport(ahd, FLXADDR_TERMCTL, &termctl); 671 error = ahd_read_flexport(ahd, FLXADDR_TERMCTL, &termctl);
672 if ((adapter_control & CFAUTOTERM) == 0) { 672 if ((adapter_control & CFAUTOTERM) == 0) {
673 if (bootverbose) 673 if (bootverbose)
674 printf("%s: Manual Primary Termination\n", 674 printk("%s: Manual Primary Termination\n",
675 ahd_name(ahd)); 675 ahd_name(ahd));
676 termctl &= ~(FLX_TERMCTL_ENPRILOW|FLX_TERMCTL_ENPRIHIGH); 676 termctl &= ~(FLX_TERMCTL_ENPRILOW|FLX_TERMCTL_ENPRIHIGH);
677 if ((adapter_control & CFSTERM) != 0) 677 if ((adapter_control & CFSTERM) != 0)
@@ -679,14 +679,14 @@ ahd_configure_termination(struct ahd_softc *ahd, u_int adapter_control)
679 if ((adapter_control & CFWSTERM) != 0) 679 if ((adapter_control & CFWSTERM) != 0)
680 termctl |= FLX_TERMCTL_ENPRIHIGH; 680 termctl |= FLX_TERMCTL_ENPRIHIGH;
681 } else if (error != 0) { 681 } else if (error != 0) {
682 printf("%s: Primary Auto-Term Sensing failed! " 682 printk("%s: Primary Auto-Term Sensing failed! "
683 "Using Defaults.\n", ahd_name(ahd)); 683 "Using Defaults.\n", ahd_name(ahd));
684 termctl = FLX_TERMCTL_ENPRILOW|FLX_TERMCTL_ENPRIHIGH; 684 termctl = FLX_TERMCTL_ENPRILOW|FLX_TERMCTL_ENPRIHIGH;
685 } 685 }
686 686
687 if ((adapter_control & CFSEAUTOTERM) == 0) { 687 if ((adapter_control & CFSEAUTOTERM) == 0) {
688 if (bootverbose) 688 if (bootverbose)
689 printf("%s: Manual Secondary Termination\n", 689 printk("%s: Manual Secondary Termination\n",
690 ahd_name(ahd)); 690 ahd_name(ahd));
691 termctl &= ~(FLX_TERMCTL_ENSECLOW|FLX_TERMCTL_ENSECHIGH); 691 termctl &= ~(FLX_TERMCTL_ENSECLOW|FLX_TERMCTL_ENSECHIGH);
692 if ((adapter_control & CFSELOWTERM) != 0) 692 if ((adapter_control & CFSELOWTERM) != 0)
@@ -694,7 +694,7 @@ ahd_configure_termination(struct ahd_softc *ahd, u_int adapter_control)
694 if ((adapter_control & CFSEHIGHTERM) != 0) 694 if ((adapter_control & CFSEHIGHTERM) != 0)
695 termctl |= FLX_TERMCTL_ENSECHIGH; 695 termctl |= FLX_TERMCTL_ENSECHIGH;
696 } else if (error != 0) { 696 } else if (error != 0) {
697 printf("%s: Secondary Auto-Term Sensing failed! " 697 printk("%s: Secondary Auto-Term Sensing failed! "
698 "Using Defaults.\n", ahd_name(ahd)); 698 "Using Defaults.\n", ahd_name(ahd));
699 termctl |= FLX_TERMCTL_ENSECLOW|FLX_TERMCTL_ENSECHIGH; 699 termctl |= FLX_TERMCTL_ENSECLOW|FLX_TERMCTL_ENSECHIGH;
700 } 700 }
@@ -714,22 +714,22 @@ ahd_configure_termination(struct ahd_softc *ahd, u_int adapter_control)
714 714
715 error = ahd_write_flexport(ahd, FLXADDR_TERMCTL, termctl); 715 error = ahd_write_flexport(ahd, FLXADDR_TERMCTL, termctl);
716 if (error != 0) { 716 if (error != 0) {
717 printf("%s: Unable to set termination settings!\n", 717 printk("%s: Unable to set termination settings!\n",
718 ahd_name(ahd)); 718 ahd_name(ahd));
719 } else if (bootverbose) { 719 } else if (bootverbose) {
720 printf("%s: Primary High byte termination %sabled\n", 720 printk("%s: Primary High byte termination %sabled\n",
721 ahd_name(ahd), 721 ahd_name(ahd),
722 (termctl & FLX_TERMCTL_ENPRIHIGH) ? "En" : "Dis"); 722 (termctl & FLX_TERMCTL_ENPRIHIGH) ? "En" : "Dis");
723 723
724 printf("%s: Primary Low byte termination %sabled\n", 724 printk("%s: Primary Low byte termination %sabled\n",
725 ahd_name(ahd), 725 ahd_name(ahd),
726 (termctl & FLX_TERMCTL_ENPRILOW) ? "En" : "Dis"); 726 (termctl & FLX_TERMCTL_ENPRILOW) ? "En" : "Dis");
727 727
728 printf("%s: Secondary High byte termination %sabled\n", 728 printk("%s: Secondary High byte termination %sabled\n",
729 ahd_name(ahd), 729 ahd_name(ahd),
730 (termctl & FLX_TERMCTL_ENSECHIGH) ? "En" : "Dis"); 730 (termctl & FLX_TERMCTL_ENSECHIGH) ? "En" : "Dis");
731 731
732 printf("%s: Secondary Low byte termination %sabled\n", 732 printk("%s: Secondary Low byte termination %sabled\n",
733 ahd_name(ahd), 733 ahd_name(ahd),
734 (termctl & FLX_TERMCTL_ENSECLOW) ? "En" : "Dis"); 734 (termctl & FLX_TERMCTL_ENSECLOW) ? "En" : "Dis");
735 } 735 }
@@ -805,7 +805,7 @@ ahd_pci_intr(struct ahd_softc *ahd)
805 if ((intstat & PCIINT) == 0) 805 if ((intstat & PCIINT) == 0)
806 return; 806 return;
807 807
808 printf("%s: PCI error Interrupt\n", ahd_name(ahd)); 808 printk("%s: PCI error Interrupt\n", ahd_name(ahd));
809 saved_modes = ahd_save_modes(ahd); 809 saved_modes = ahd_save_modes(ahd);
810 ahd_dump_card_state(ahd); 810 ahd_dump_card_state(ahd);
811 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 811 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
@@ -832,7 +832,7 @@ ahd_pci_intr(struct ahd_softc *ahd)
832 s = pci_status_strings[bit]; 832 s = pci_status_strings[bit];
833 if (i == 7/*TARG*/ && bit == 3) 833 if (i == 7/*TARG*/ && bit == 3)
834 s = "%s: Signaled Target Abort\n"; 834 s = "%s: Signaled Target Abort\n";
835 printf(s, ahd_name(ahd), pci_status_source[i]); 835 printk(s, ahd_name(ahd), pci_status_source[i]);
836 } 836 }
837 } 837 }
838 } 838 }
@@ -862,7 +862,7 @@ ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat)
862 */ 862 */
863 pcix_status = ahd_pci_read_config(ahd->dev_softc, PCIXR_STATUS, 863 pcix_status = ahd_pci_read_config(ahd->dev_softc, PCIXR_STATUS,
864 /*bytes*/2); 864 /*bytes*/2);
865 printf("%s: PCI Split Interrupt - PCI-X status = 0x%x\n", 865 printk("%s: PCI Split Interrupt - PCI-X status = 0x%x\n",
866 ahd_name(ahd), pcix_status); 866 ahd_name(ahd), pcix_status);
867 saved_modes = ahd_save_modes(ahd); 867 saved_modes = ahd_save_modes(ahd);
868 for (i = 0; i < 4; i++) { 868 for (i = 0; i < 4; i++) {
@@ -891,7 +891,7 @@ ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat)
891 static const char *s; 891 static const char *s;
892 892
893 s = split_status_strings[bit]; 893 s = split_status_strings[bit];
894 printf(s, ahd_name(ahd), 894 printk(s, ahd_name(ahd),
895 split_status_source[i]); 895 split_status_source[i]);
896 } 896 }
897 897
@@ -902,7 +902,7 @@ ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat)
902 static const char *s; 902 static const char *s;
903 903
904 s = split_status_strings[bit]; 904 s = split_status_strings[bit];
905 printf(s, ahd_name(ahd), "SG"); 905 printk(s, ahd_name(ahd), "SG");
906 } 906 }
907 } 907 }
908 } 908 }
@@ -950,7 +950,7 @@ ahd_aic790X_setup(struct ahd_softc *ahd)
950 pci = ahd->dev_softc; 950 pci = ahd->dev_softc;
951 rev = ahd_pci_read_config(pci, PCIR_REVID, /*bytes*/1); 951 rev = ahd_pci_read_config(pci, PCIR_REVID, /*bytes*/1);
952 if (rev < ID_AIC7902_PCI_REV_A4) { 952 if (rev < ID_AIC7902_PCI_REV_A4) {
953 printf("%s: Unable to attach to unsupported chip revision %d\n", 953 printk("%s: Unable to attach to unsupported chip revision %d\n",
954 ahd_name(ahd), rev); 954 ahd_name(ahd), rev);
955 ahd_pci_write_config(pci, PCIR_COMMAND, 0, /*bytes*/2); 955 ahd_pci_write_config(pci, PCIR_COMMAND, 0, /*bytes*/2);
956 return (ENXIO); 956 return (ENXIO);
diff --git a/drivers/scsi/aic7xxx/aic79xx_proc.c b/drivers/scsi/aic7xxx/aic79xx_proc.c
index 014bed716e7c..59c85d5a153a 100644
--- a/drivers/scsi/aic7xxx/aic79xx_proc.c
+++ b/drivers/scsi/aic7xxx/aic79xx_proc.c
@@ -272,33 +272,32 @@ ahd_proc_write_seeprom(struct ahd_softc *ahd, char *buffer, int length)
272 saved_modes = ahd_save_modes(ahd); 272 saved_modes = ahd_save_modes(ahd);
273 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 273 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
274 if (length != sizeof(struct seeprom_config)) { 274 if (length != sizeof(struct seeprom_config)) {
275 printf("ahd_proc_write_seeprom: incorrect buffer size\n"); 275 printk("ahd_proc_write_seeprom: incorrect buffer size\n");
276 goto done; 276 goto done;
277 } 277 }
278 278
279 have_seeprom = ahd_verify_cksum((struct seeprom_config*)buffer); 279 have_seeprom = ahd_verify_cksum((struct seeprom_config*)buffer);
280 if (have_seeprom == 0) { 280 if (have_seeprom == 0) {
281 printf("ahd_proc_write_seeprom: cksum verification failed\n"); 281 printk("ahd_proc_write_seeprom: cksum verification failed\n");
282 goto done; 282 goto done;
283 } 283 }
284 284
285 have_seeprom = ahd_acquire_seeprom(ahd); 285 have_seeprom = ahd_acquire_seeprom(ahd);
286 if (!have_seeprom) { 286 if (!have_seeprom) {
287 printf("ahd_proc_write_seeprom: No Serial EEPROM\n"); 287 printk("ahd_proc_write_seeprom: No Serial EEPROM\n");
288 goto done; 288 goto done;
289 } else { 289 } else {
290 u_int start_addr; 290 u_int start_addr;
291 291
292 if (ahd->seep_config == NULL) { 292 if (ahd->seep_config == NULL) {
293 ahd->seep_config = malloc(sizeof(*ahd->seep_config), 293 ahd->seep_config = kmalloc(sizeof(*ahd->seep_config), GFP_ATOMIC);
294 M_DEVBUF, M_NOWAIT);
295 if (ahd->seep_config == NULL) { 294 if (ahd->seep_config == NULL) {
296 printf("aic79xx: Unable to allocate serial " 295 printk("aic79xx: Unable to allocate serial "
297 "eeprom buffer. Write failing\n"); 296 "eeprom buffer. Write failing\n");
298 goto done; 297 goto done;
299 } 298 }
300 } 299 }
301 printf("aic79xx: Writing Serial EEPROM\n"); 300 printk("aic79xx: Writing Serial EEPROM\n");
302 start_addr = 32 * (ahd->channel - 'A'); 301 start_addr = 32 * (ahd->channel - 'A');
303 ahd_write_seeprom(ahd, (u_int16_t *)buffer, start_addr, 302 ahd_write_seeprom(ahd, (u_int16_t *)buffer, start_addr,
304 sizeof(struct seeprom_config)/2); 303 sizeof(struct seeprom_config)/2);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
index dd11999b77b6..9e85a7ef9c8e 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
@@ -207,14 +207,14 @@ ahc_read_seeprom(struct seeprom_descriptor *sd, uint16_t *buf,
207 reset_seeprom(sd); 207 reset_seeprom(sd);
208 } 208 }
209#ifdef AHC_DUMP_EEPROM 209#ifdef AHC_DUMP_EEPROM
210 printf("\nSerial EEPROM:\n\t"); 210 printk("\nSerial EEPROM:\n\t");
211 for (k = 0; k < count; k = k + 1) { 211 for (k = 0; k < count; k = k + 1) {
212 if (((k % 8) == 0) && (k != 0)) { 212 if (((k % 8) == 0) && (k != 0)) {
213 printf ("\n\t"); 213 printk(KERN_CONT "\n\t");
214 } 214 }
215 printf (" 0x%x", buf[k]); 215 printk(KERN_CONT " 0x%x", buf[k]);
216 } 216 }
217 printf ("\n"); 217 printk(KERN_CONT "\n");
218#endif 218#endif
219 return (1); 219 return (1);
220} 220}
@@ -240,7 +240,7 @@ ahc_write_seeprom(struct seeprom_descriptor *sd, uint16_t *buf,
240 ewen = &seeprom_long_ewen; 240 ewen = &seeprom_long_ewen;
241 ewds = &seeprom_long_ewds; 241 ewds = &seeprom_long_ewds;
242 } else { 242 } else {
243 printf("ahc_write_seeprom: unsupported seeprom type %d\n", 243 printk("ahc_write_seeprom: unsupported seeprom type %d\n",
244 sd->sd_chip); 244 sd->sd_chip);
245 return (0); 245 return (0);
246 } 246 }
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 45aa728a76b2..3f5a542a7793 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -910,7 +910,7 @@ ahc_run_qoutfifo(struct ahc_softc *ahc)
910 910
911 scb = ahc_lookup_scb(ahc, scb_index); 911 scb = ahc_lookup_scb(ahc, scb_index);
912 if (scb == NULL) { 912 if (scb == NULL) {
913 printf("%s: WARNING no command for scb %d " 913 printk("%s: WARNING no command for scb %d "
914 "(cmdcmplt)\nQOUTPOS = %d\n", 914 "(cmdcmplt)\nQOUTPOS = %d\n",
915 ahc_name(ahc), scb_index, 915 ahc_name(ahc), scb_index,
916 (ahc->qoutfifonext - 1) & 0xFF); 916 (ahc->qoutfifonext - 1) & 0xFF);
@@ -964,7 +964,7 @@ ahc_handle_brkadrint(struct ahc_softc *ahc)
964 error = ahc_inb(ahc, ERROR); 964 error = ahc_inb(ahc, ERROR);
965 for (i = 0; error != 1 && i < num_errors; i++) 965 for (i = 0; error != 1 && i < num_errors; i++)
966 error >>= 1; 966 error >>= 1;
967 printf("%s: brkadrint, %s at seqaddr = 0x%x\n", 967 printk("%s: brkadrint, %s at seqaddr = 0x%x\n",
968 ahc_name(ahc), ahc_hard_errors[i].errmesg, 968 ahc_name(ahc), ahc_hard_errors[i].errmesg,
969 ahc_inb(ahc, SEQADDR0) | 969 ahc_inb(ahc, SEQADDR0) |
970 (ahc_inb(ahc, SEQADDR1) << 8)); 970 (ahc_inb(ahc, SEQADDR1) << 8));
@@ -1021,7 +1021,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
1021 scb = ahc_lookup_scb(ahc, scb_index); 1021 scb = ahc_lookup_scb(ahc, scb_index);
1022 if (scb == NULL) { 1022 if (scb == NULL) {
1023 ahc_print_devinfo(ahc, &devinfo); 1023 ahc_print_devinfo(ahc, &devinfo);
1024 printf("ahc_intr - referenced scb " 1024 printk("ahc_intr - referenced scb "
1025 "not valid during seqint 0x%x scb(%d)\n", 1025 "not valid during seqint 0x%x scb(%d)\n",
1026 intstat, scb_index); 1026 intstat, scb_index);
1027 ahc_dump_card_state(ahc); 1027 ahc_dump_card_state(ahc);
@@ -1049,7 +1049,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
1049 ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status); 1049 ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status);
1050 switch (hscb->shared_data.status.scsi_status) { 1050 switch (hscb->shared_data.status.scsi_status) {
1051 case SCSI_STATUS_OK: 1051 case SCSI_STATUS_OK:
1052 printf("%s: Interrupted for staus of 0???\n", 1052 printk("%s: Interrupted for staus of 0???\n",
1053 ahc_name(ahc)); 1053 ahc_name(ahc));
1054 break; 1054 break;
1055 case SCSI_STATUS_CMD_TERMINATED: 1055 case SCSI_STATUS_CMD_TERMINATED:
@@ -1063,7 +1063,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
1063#ifdef AHC_DEBUG 1063#ifdef AHC_DEBUG
1064 if (ahc_debug & AHC_SHOW_SENSE) { 1064 if (ahc_debug & AHC_SHOW_SENSE) {
1065 ahc_print_path(ahc, scb); 1065 ahc_print_path(ahc, scb);
1066 printf("SCB %d: requests Check Status\n", 1066 printk("SCB %d: requests Check Status\n",
1067 scb->hscb->tag); 1067 scb->hscb->tag);
1068 } 1068 }
1069#endif 1069#endif
@@ -1086,7 +1086,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
1086#ifdef AHC_DEBUG 1086#ifdef AHC_DEBUG
1087 if (ahc_debug & AHC_SHOW_SENSE) { 1087 if (ahc_debug & AHC_SHOW_SENSE) {
1088 ahc_print_path(ahc, scb); 1088 ahc_print_path(ahc, scb);
1089 printf("Sending Sense\n"); 1089 printk("Sending Sense\n");
1090 } 1090 }
1091#endif 1091#endif
1092 sg->addr = ahc_get_sense_bufaddr(ahc, scb); 1092 sg->addr = ahc_get_sense_bufaddr(ahc, scb);
@@ -1162,29 +1162,29 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
1162 ahc_outb(ahc, SCSISEQ, 1162 ahc_outb(ahc, SCSISEQ,
1163 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 1163 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
1164 1164
1165 printf("%s:%c:%d: no active SCB for reconnecting " 1165 printk("%s:%c:%d: no active SCB for reconnecting "
1166 "target - issuing BUS DEVICE RESET\n", 1166 "target - issuing BUS DEVICE RESET\n",
1167 ahc_name(ahc), devinfo.channel, devinfo.target); 1167 ahc_name(ahc), devinfo.channel, devinfo.target);
1168 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " 1168 printk("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
1169 "ARG_1 == 0x%x ACCUM = 0x%x\n", 1169 "ARG_1 == 0x%x ACCUM = 0x%x\n",
1170 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), 1170 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
1171 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); 1171 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
1172 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " 1172 printk("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
1173 "SINDEX == 0x%x\n", 1173 "SINDEX == 0x%x\n",
1174 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), 1174 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
1175 ahc_index_busy_tcl(ahc, 1175 ahc_index_busy_tcl(ahc,
1176 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), 1176 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID),
1177 ahc_inb(ahc, SAVED_LUN))), 1177 ahc_inb(ahc, SAVED_LUN))),
1178 ahc_inb(ahc, SINDEX)); 1178 ahc_inb(ahc, SINDEX));
1179 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " 1179 printk("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
1180 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", 1180 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
1181 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), 1181 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
1182 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), 1182 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
1183 ahc_inb(ahc, SCB_CONTROL)); 1183 ahc_inb(ahc, SCB_CONTROL));
1184 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", 1184 printk("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n",
1185 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); 1185 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI));
1186 printf("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0)); 1186 printk("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0));
1187 printf("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL)); 1187 printk("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL));
1188 ahc_dump_card_state(ahc); 1188 ahc_dump_card_state(ahc);
1189 ahc->msgout_buf[0] = MSG_BUS_DEV_RESET; 1189 ahc->msgout_buf[0] = MSG_BUS_DEV_RESET;
1190 ahc->msgout_len = 1; 1190 ahc->msgout_len = 1;
@@ -1197,7 +1197,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
1197 case SEND_REJECT: 1197 case SEND_REJECT:
1198 { 1198 {
1199 u_int rejbyte = ahc_inb(ahc, ACCUM); 1199 u_int rejbyte = ahc_inb(ahc, ACCUM);
1200 printf("%s:%c:%d: Warning - unknown message received from " 1200 printk("%s:%c:%d: Warning - unknown message received from "
1201 "target (0x%x). Rejecting\n", 1201 "target (0x%x). Rejecting\n",
1202 ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte); 1202 ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte);
1203 break; 1203 break;
@@ -1218,7 +1218,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
1218 u_int lastphase; 1218 u_int lastphase;
1219 1219
1220 lastphase = ahc_inb(ahc, LASTPHASE); 1220 lastphase = ahc_inb(ahc, LASTPHASE);
1221 printf("%s:%c:%d: unknown scsi bus phase %x, " 1221 printk("%s:%c:%d: unknown scsi bus phase %x, "
1222 "lastphase = 0x%x. Attempting to continue\n", 1222 "lastphase = 0x%x. Attempting to continue\n",
1223 ahc_name(ahc), devinfo.channel, devinfo.target, 1223 ahc_name(ahc), devinfo.channel, devinfo.target,
1224 lastphase, ahc_inb(ahc, SCSISIGI)); 1224 lastphase, ahc_inb(ahc, SCSISIGI));
@@ -1229,7 +1229,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
1229 u_int lastphase; 1229 u_int lastphase;
1230 1230
1231 lastphase = ahc_inb(ahc, LASTPHASE); 1231 lastphase = ahc_inb(ahc, LASTPHASE);
1232 printf("%s:%c:%d: Missed busfree. " 1232 printk("%s:%c:%d: Missed busfree. "
1233 "Lastphase = 0x%x, Curphase = 0x%x\n", 1233 "Lastphase = 0x%x, Curphase = 0x%x\n",
1234 ahc_name(ahc), devinfo.channel, devinfo.target, 1234 ahc_name(ahc), devinfo.channel, devinfo.target,
1235 lastphase, ahc_inb(ahc, SCSISIGI)); 1235 lastphase, ahc_inb(ahc, SCSISIGI));
@@ -1257,7 +1257,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
1257 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 1257 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
1258 if (bus_phase != P_MESGIN 1258 if (bus_phase != P_MESGIN
1259 && bus_phase != P_MESGOUT) { 1259 && bus_phase != P_MESGOUT) {
1260 printf("ahc_intr: HOST_MSG_LOOP bad " 1260 printk("ahc_intr: HOST_MSG_LOOP bad "
1261 "phase 0x%x\n", 1261 "phase 0x%x\n",
1262 bus_phase); 1262 bus_phase);
1263 /* 1263 /*
@@ -1359,7 +1359,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
1359 u_int scb_index; 1359 u_int scb_index;
1360 1360
1361 ahc_print_devinfo(ahc, &devinfo); 1361 ahc_print_devinfo(ahc, &devinfo);
1362 printf("Unable to clear parity error. " 1362 printk("Unable to clear parity error. "
1363 "Resetting bus.\n"); 1363 "Resetting bus.\n");
1364 scb_index = ahc_inb(ahc, SCB_TAG); 1364 scb_index = ahc_inb(ahc, SCB_TAG);
1365 scb = ahc_lookup_scb(ahc, scb_index); 1365 scb = ahc_lookup_scb(ahc, scb_index);
@@ -1395,18 +1395,18 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
1395 break; 1395 break;
1396 } 1396 }
1397 ahc_print_path(ahc, scb); 1397 ahc_print_path(ahc, scb);
1398 printf("data overrun detected %s." 1398 printk("data overrun detected %s."
1399 " Tag == 0x%x.\n", 1399 " Tag == 0x%x.\n",
1400 ahc_phase_table[i].phasemsg, 1400 ahc_phase_table[i].phasemsg,
1401 scb->hscb->tag); 1401 scb->hscb->tag);
1402 ahc_print_path(ahc, scb); 1402 ahc_print_path(ahc, scb);
1403 printf("%s seen Data Phase. Length = %ld. NumSGs = %d.\n", 1403 printk("%s seen Data Phase. Length = %ld. NumSGs = %d.\n",
1404 ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", 1404 ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't",
1405 ahc_get_transfer_length(scb), scb->sg_count); 1405 ahc_get_transfer_length(scb), scb->sg_count);
1406 if (scb->sg_count > 0) { 1406 if (scb->sg_count > 0) {
1407 for (i = 0; i < scb->sg_count; i++) { 1407 for (i = 0; i < scb->sg_count; i++) {
1408 1408
1409 printf("sg[%d] - Addr 0x%x%x : Length %d\n", 1409 printk("sg[%d] - Addr 0x%x%x : Length %d\n",
1410 i, 1410 i,
1411 (ahc_le32toh(scb->sg_list[i].len) >> 24 1411 (ahc_le32toh(scb->sg_list[i].len) >> 24
1412 & SG_HIGH_ADDR_BITS), 1412 & SG_HIGH_ADDR_BITS),
@@ -1453,7 +1453,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
1453 { 1453 {
1454 u_int scbindex; 1454 u_int scbindex;
1455 1455
1456 printf("%s:%c:%d:%d: Attempt to issue message failed\n", 1456 printk("%s:%c:%d:%d: Attempt to issue message failed\n",
1457 ahc_name(ahc), devinfo.channel, devinfo.target, 1457 ahc_name(ahc), devinfo.channel, devinfo.target,
1458 devinfo.lun); 1458 devinfo.lun);
1459 scbindex = ahc_inb(ahc, SCB_TAG); 1459 scbindex = ahc_inb(ahc, SCB_TAG);
@@ -1473,7 +1473,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
1473 } 1473 }
1474 case NO_FREE_SCB: 1474 case NO_FREE_SCB:
1475 { 1475 {
1476 printf("%s: No free or disconnected SCBs\n", ahc_name(ahc)); 1476 printk("%s: No free or disconnected SCBs\n", ahc_name(ahc));
1477 ahc_dump_card_state(ahc); 1477 ahc_dump_card_state(ahc);
1478 panic("for safety"); 1478 panic("for safety");
1479 break; 1479 break;
@@ -1483,7 +1483,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
1483 u_int scbptr; 1483 u_int scbptr;
1484 1484
1485 scbptr = ahc_inb(ahc, SCBPTR); 1485 scbptr = ahc_inb(ahc, SCBPTR);
1486 printf("Bogus TAG after DMA. SCBPTR %d, tag %d, our tag %d\n", 1486 printk("Bogus TAG after DMA. SCBPTR %d, tag %d, our tag %d\n",
1487 scbptr, ahc_inb(ahc, ARG_1), 1487 scbptr, ahc_inb(ahc, ARG_1),
1488 ahc->scb_data->hscbs[scbptr].tag); 1488 ahc->scb_data->hscbs[scbptr].tag);
1489 ahc_dump_card_state(ahc); 1489 ahc_dump_card_state(ahc);
@@ -1492,12 +1492,12 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
1492 } 1492 }
1493 case OUT_OF_RANGE: 1493 case OUT_OF_RANGE:
1494 { 1494 {
1495 printf("%s: BTT calculation out of range\n", ahc_name(ahc)); 1495 printk("%s: BTT calculation out of range\n", ahc_name(ahc));
1496 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " 1496 printk("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
1497 "ARG_1 == 0x%x ACCUM = 0x%x\n", 1497 "ARG_1 == 0x%x ACCUM = 0x%x\n",
1498 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), 1498 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
1499 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); 1499 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
1500 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " 1500 printk("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
1501 "SINDEX == 0x%x\n, A == 0x%x\n", 1501 "SINDEX == 0x%x\n, A == 0x%x\n",
1502 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), 1502 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
1503 ahc_index_busy_tcl(ahc, 1503 ahc_index_busy_tcl(ahc,
@@ -1505,19 +1505,19 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
1505 ahc_inb(ahc, SAVED_LUN))), 1505 ahc_inb(ahc, SAVED_LUN))),
1506 ahc_inb(ahc, SINDEX), 1506 ahc_inb(ahc, SINDEX),
1507 ahc_inb(ahc, ACCUM)); 1507 ahc_inb(ahc, ACCUM));
1508 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " 1508 printk("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
1509 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", 1509 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
1510 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), 1510 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
1511 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), 1511 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
1512 ahc_inb(ahc, SCB_CONTROL)); 1512 ahc_inb(ahc, SCB_CONTROL));
1513 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", 1513 printk("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n",
1514 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); 1514 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI));
1515 ahc_dump_card_state(ahc); 1515 ahc_dump_card_state(ahc);
1516 panic("for safety"); 1516 panic("for safety");
1517 break; 1517 break;
1518 } 1518 }
1519 default: 1519 default:
1520 printf("ahc_intr: seqint, " 1520 printk("ahc_intr: seqint, "
1521 "intstat == 0x%x, scsisigi = 0x%x\n", 1521 "intstat == 0x%x, scsisigi = 0x%x\n",
1522 intstat, ahc_inb(ahc, SCSISIGI)); 1522 intstat, ahc_inb(ahc, SCSISIGI));
1523 break; 1523 break;
@@ -1562,7 +1562,7 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
1562 intr_channel = (cur_channel == 'A') ? 'B' : 'A'; 1562 intr_channel = (cur_channel == 'A') ? 'B' : 'A';
1563 } 1563 }
1564 if (status == 0) { 1564 if (status == 0) {
1565 printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc)); 1565 printk("%s: Spurious SCSI interrupt\n", ahc_name(ahc));
1566 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1566 ahc_outb(ahc, CLRINT, CLRSCSIINT);
1567 ahc_unpause(ahc); 1567 ahc_unpause(ahc);
1568 return; 1568 return;
@@ -1583,7 +1583,7 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
1583 int now_lvd; 1583 int now_lvd;
1584 1584
1585 now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40; 1585 now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40;
1586 printf("%s: Transceiver State Has Changed to %s mode\n", 1586 printk("%s: Transceiver State Has Changed to %s mode\n",
1587 ahc_name(ahc), now_lvd ? "LVD" : "SE"); 1587 ahc_name(ahc), now_lvd ? "LVD" : "SE");
1588 ahc_outb(ahc, CLRSINT0, CLRIOERR); 1588 ahc_outb(ahc, CLRSINT0, CLRIOERR);
1589 /* 1589 /*
@@ -1599,7 +1599,7 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
1599 ahc_reset_channel(ahc, intr_channel, 1599 ahc_reset_channel(ahc, intr_channel,
1600 /*Initiate Reset*/now_lvd == 0); 1600 /*Initiate Reset*/now_lvd == 0);
1601 } else if ((status & SCSIRSTI) != 0) { 1601 } else if ((status & SCSIRSTI) != 0) {
1602 printf("%s: Someone reset channel %c\n", 1602 printk("%s: Someone reset channel %c\n",
1603 ahc_name(ahc), intr_channel); 1603 ahc_name(ahc), intr_channel);
1604 if (intr_channel != cur_channel) 1604 if (intr_channel != cur_channel)
1605 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 1605 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
@@ -1659,26 +1659,26 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
1659 ahc_print_path(ahc, scb); 1659 ahc_print_path(ahc, scb);
1660 scb->flags |= SCB_TRANSMISSION_ERROR; 1660 scb->flags |= SCB_TRANSMISSION_ERROR;
1661 } else 1661 } else
1662 printf("%s:%c:%d: ", ahc_name(ahc), intr_channel, 1662 printk("%s:%c:%d: ", ahc_name(ahc), intr_channel,
1663 SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID))); 1663 SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID)));
1664 scsirate = ahc_inb(ahc, SCSIRATE); 1664 scsirate = ahc_inb(ahc, SCSIRATE);
1665 if (silent == FALSE) { 1665 if (silent == FALSE) {
1666 printf("parity error detected %s. " 1666 printk("parity error detected %s. "
1667 "SEQADDR(0x%x) SCSIRATE(0x%x)\n", 1667 "SEQADDR(0x%x) SCSIRATE(0x%x)\n",
1668 ahc_phase_table[i].phasemsg, 1668 ahc_phase_table[i].phasemsg,
1669 ahc_inw(ahc, SEQADDR0), 1669 ahc_inw(ahc, SEQADDR0),
1670 scsirate); 1670 scsirate);
1671 if ((ahc->features & AHC_DT) != 0) { 1671 if ((ahc->features & AHC_DT) != 0) {
1672 if ((sstat2 & CRCVALERR) != 0) 1672 if ((sstat2 & CRCVALERR) != 0)
1673 printf("\tCRC Value Mismatch\n"); 1673 printk("\tCRC Value Mismatch\n");
1674 if ((sstat2 & CRCENDERR) != 0) 1674 if ((sstat2 & CRCENDERR) != 0)
1675 printf("\tNo terminal CRC packet " 1675 printk("\tNo terminal CRC packet "
1676 "recevied\n"); 1676 "recevied\n");
1677 if ((sstat2 & CRCREQERR) != 0) 1677 if ((sstat2 & CRCREQERR) != 0)
1678 printf("\tIllegal CRC packet " 1678 printk("\tIllegal CRC packet "
1679 "request\n"); 1679 "request\n");
1680 if ((sstat2 & DUAL_EDGE_ERR) != 0) 1680 if ((sstat2 & DUAL_EDGE_ERR) != 0)
1681 printf("\tUnexpected %sDT Data Phase\n", 1681 printk("\tUnexpected %sDT Data Phase\n",
1682 (scsirate & SINGLE_EDGE) 1682 (scsirate & SINGLE_EDGE)
1683 ? "" : "non-"); 1683 ? "" : "non-");
1684 } 1684 }
@@ -1746,7 +1746,7 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
1746 1746
1747 scb = ahc_lookup_scb(ahc, scb_index); 1747 scb = ahc_lookup_scb(ahc, scb_index);
1748 if (scb == NULL) { 1748 if (scb == NULL) {
1749 printf("%s: ahc_intr - referenced scb not " 1749 printk("%s: ahc_intr - referenced scb not "
1750 "valid during SELTO scb(%d, %d)\n", 1750 "valid during SELTO scb(%d, %d)\n",
1751 ahc_name(ahc), scbptr, scb_index); 1751 ahc_name(ahc), scbptr, scb_index);
1752 ahc_dump_card_state(ahc); 1752 ahc_dump_card_state(ahc);
@@ -1755,7 +1755,7 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
1755#ifdef AHC_DEBUG 1755#ifdef AHC_DEBUG
1756 if ((ahc_debug & AHC_SHOW_SELTO) != 0) { 1756 if ((ahc_debug & AHC_SHOW_SELTO) != 0) {
1757 ahc_print_path(ahc, scb); 1757 ahc_print_path(ahc, scb);
1758 printf("Saw Selection Timeout for SCB 0x%x\n", 1758 printk("Saw Selection Timeout for SCB 0x%x\n",
1759 scb_index); 1759 scb_index);
1760 } 1760 }
1761#endif 1761#endif
@@ -1831,7 +1831,7 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
1831 == MSG_ABORT_TAG) 1831 == MSG_ABORT_TAG)
1832 tag = scb->hscb->tag; 1832 tag = scb->hscb->tag;
1833 ahc_print_path(ahc, scb); 1833 ahc_print_path(ahc, scb);
1834 printf("SCB %d - Abort%s Completed.\n", 1834 printk("SCB %d - Abort%s Completed.\n",
1835 scb->hscb->tag, tag == SCB_LIST_NULL ? 1835 scb->hscb->tag, tag == SCB_LIST_NULL ?
1836 "" : " Tag"); 1836 "" : " Tag");
1837 ahc_abort_scbs(ahc, target, channel, 1837 ahc_abort_scbs(ahc, target, channel,
@@ -1934,7 +1934,7 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
1934 * We had not fully identified this connection, 1934 * We had not fully identified this connection,
1935 * so we cannot abort anything. 1935 * so we cannot abort anything.
1936 */ 1936 */
1937 printf("%s: ", ahc_name(ahc)); 1937 printk("%s: ", ahc_name(ahc));
1938 } 1938 }
1939 for (i = 0; i < num_phases; i++) { 1939 for (i = 0; i < num_phases; i++) {
1940 if (lastphase == ahc_phase_table[i].phase) 1940 if (lastphase == ahc_phase_table[i].phase)
@@ -1949,7 +1949,7 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
1949 */ 1949 */
1950 ahc_force_renegotiation(ahc, &devinfo); 1950 ahc_force_renegotiation(ahc, &devinfo);
1951 } 1951 }
1952 printf("Unexpected busfree %s\n" 1952 printk("Unexpected busfree %s\n"
1953 "SEQADDR == 0x%x\n", 1953 "SEQADDR == 0x%x\n",
1954 ahc_phase_table[i].phasemsg, 1954 ahc_phase_table[i].phasemsg,
1955 ahc_inb(ahc, SEQADDR0) 1955 ahc_inb(ahc, SEQADDR0)
@@ -1958,7 +1958,7 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
1958 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1958 ahc_outb(ahc, CLRINT, CLRSCSIINT);
1959 ahc_restart(ahc); 1959 ahc_restart(ahc);
1960 } else { 1960 } else {
1961 printf("%s: Missing case in ahc_handle_scsiint. status = %x\n", 1961 printk("%s: Missing case in ahc_handle_scsiint. status = %x\n",
1962 ahc_name(ahc), status); 1962 ahc_name(ahc), status);
1963 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1963 ahc_outb(ahc, CLRINT, CLRSCSIINT);
1964 } 1964 }
@@ -2025,7 +2025,7 @@ ahc_clear_critical_section(struct ahc_softc *ahc)
2025 break; 2025 break;
2026 2026
2027 if (steps > AHC_MAX_STEPS) { 2027 if (steps > AHC_MAX_STEPS) {
2028 printf("%s: Infinite loop in critical section\n", 2028 printk("%s: Infinite loop in critical section\n",
2029 ahc_name(ahc)); 2029 ahc_name(ahc));
2030 ahc_dump_card_state(ahc); 2030 ahc_dump_card_state(ahc);
2031 panic("critical section loop"); 2031 panic("critical section loop");
@@ -2104,23 +2104,23 @@ ahc_print_scb(struct scb *scb)
2104 2104
2105 struct hardware_scb *hscb = scb->hscb; 2105 struct hardware_scb *hscb = scb->hscb;
2106 2106
2107 printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", 2107 printk("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n",
2108 (void *)scb, 2108 (void *)scb,
2109 hscb->control, 2109 hscb->control,
2110 hscb->scsiid, 2110 hscb->scsiid,
2111 hscb->lun, 2111 hscb->lun,
2112 hscb->cdb_len); 2112 hscb->cdb_len);
2113 printf("Shared Data: "); 2113 printk("Shared Data: ");
2114 for (i = 0; i < sizeof(hscb->shared_data.cdb); i++) 2114 for (i = 0; i < sizeof(hscb->shared_data.cdb); i++)
2115 printf("%#02x", hscb->shared_data.cdb[i]); 2115 printk("%#02x", hscb->shared_data.cdb[i]);
2116 printf(" dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n", 2116 printk(" dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n",
2117 ahc_le32toh(hscb->dataptr), 2117 ahc_le32toh(hscb->dataptr),
2118 ahc_le32toh(hscb->datacnt), 2118 ahc_le32toh(hscb->datacnt),
2119 ahc_le32toh(hscb->sgptr), 2119 ahc_le32toh(hscb->sgptr),
2120 hscb->tag); 2120 hscb->tag);
2121 if (scb->sg_count > 0) { 2121 if (scb->sg_count > 0) {
2122 for (i = 0; i < scb->sg_count; i++) { 2122 for (i = 0; i < scb->sg_count; i++) {
2123 printf("sg[%d] - Addr 0x%x%x : Length %d\n", 2123 printk("sg[%d] - Addr 0x%x%x : Length %d\n",
2124 i, 2124 i,
2125 (ahc_le32toh(scb->sg_list[i].len) >> 24 2125 (ahc_le32toh(scb->sg_list[i].len) >> 24
2126 & SG_HIGH_ADDR_BITS), 2126 & SG_HIGH_ADDR_BITS),
@@ -2152,8 +2152,7 @@ ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel)
2152 && ahc->enabled_targets[scsi_id] != master_tstate) 2152 && ahc->enabled_targets[scsi_id] != master_tstate)
2153 panic("%s: ahc_alloc_tstate - Target already allocated", 2153 panic("%s: ahc_alloc_tstate - Target already allocated",
2154 ahc_name(ahc)); 2154 ahc_name(ahc));
2155 tstate = (struct ahc_tmode_tstate*)malloc(sizeof(*tstate), 2155 tstate = kmalloc(sizeof(*tstate), GFP_ATOMIC);
2156 M_DEVBUF, M_NOWAIT);
2157 if (tstate == NULL) 2156 if (tstate == NULL)
2158 return (NULL); 2157 return (NULL);
2159 2158
@@ -2202,7 +2201,7 @@ ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
2202 scsi_id += 8; 2201 scsi_id += 8;
2203 tstate = ahc->enabled_targets[scsi_id]; 2202 tstate = ahc->enabled_targets[scsi_id];
2204 if (tstate != NULL) 2203 if (tstate != NULL)
2205 free(tstate, M_DEVBUF); 2204 kfree(tstate);
2206 ahc->enabled_targets[scsi_id] = NULL; 2205 ahc->enabled_targets[scsi_id] = NULL;
2207} 2206}
2208#endif 2207#endif
@@ -2589,13 +2588,13 @@ ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2589 CAM_LUN_WILDCARD, AC_TRANSFER_NEG); 2588 CAM_LUN_WILDCARD, AC_TRANSFER_NEG);
2590 if (bootverbose) { 2589 if (bootverbose) {
2591 if (offset != 0) { 2590 if (offset != 0) {
2592 printf("%s: target %d synchronous at %sMHz%s, " 2591 printk("%s: target %d synchronous at %sMHz%s, "
2593 "offset = 0x%x\n", ahc_name(ahc), 2592 "offset = 0x%x\n", ahc_name(ahc),
2594 devinfo->target, syncrate->rate, 2593 devinfo->target, syncrate->rate,
2595 (ppr_options & MSG_EXT_PPR_DT_REQ) 2594 (ppr_options & MSG_EXT_PPR_DT_REQ)
2596 ? " DT" : "", offset); 2595 ? " DT" : "", offset);
2597 } else { 2596 } else {
2598 printf("%s: target %d using " 2597 printk("%s: target %d using "
2599 "asynchronous transfers\n", 2598 "asynchronous transfers\n",
2600 ahc_name(ahc), devinfo->target); 2599 ahc_name(ahc), devinfo->target);
2601 } 2600 }
@@ -2658,7 +2657,7 @@ ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2658 ahc_send_async(ahc, devinfo->channel, devinfo->target, 2657 ahc_send_async(ahc, devinfo->channel, devinfo->target,
2659 CAM_LUN_WILDCARD, AC_TRANSFER_NEG); 2658 CAM_LUN_WILDCARD, AC_TRANSFER_NEG);
2660 if (bootverbose) { 2659 if (bootverbose) {
2661 printf("%s: target %d using %dbit transfers\n", 2660 printk("%s: target %d using %dbit transfers\n",
2662 ahc_name(ahc), devinfo->target, 2661 ahc_name(ahc), devinfo->target,
2663 8 * (0x01 << width)); 2662 8 * (0x01 << width));
2664 } 2663 }
@@ -2835,7 +2834,7 @@ ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target,
2835void 2834void
2836ahc_print_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2835ahc_print_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2837{ 2836{
2838 printf("%s:%c:%d:%d: ", ahc_name(ahc), devinfo->channel, 2837 printk("%s:%c:%d:%d: ", ahc_name(ahc), devinfo->channel,
2839 devinfo->target, devinfo->lun); 2838 devinfo->target, devinfo->lun);
2840} 2839}
2841 2840
@@ -2907,7 +2906,7 @@ ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2907 ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET; 2906 ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET;
2908 ahc->msgout_len++; 2907 ahc->msgout_len++;
2909 ahc_print_path(ahc, scb); 2908 ahc_print_path(ahc, scb);
2910 printf("Bus Device Reset Message Sent\n"); 2909 printk("Bus Device Reset Message Sent\n");
2911 /* 2910 /*
2912 * Clear our selection hardware in advance of 2911 * Clear our selection hardware in advance of
2913 * the busfree. We may have an entry in the waiting 2912 * the busfree. We may have an entry in the waiting
@@ -2923,7 +2922,7 @@ ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2923 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT; 2922 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT;
2924 ahc->msgout_len++; 2923 ahc->msgout_len++;
2925 ahc_print_path(ahc, scb); 2924 ahc_print_path(ahc, scb);
2926 printf("Abort%s Message Sent\n", 2925 printk("Abort%s Message Sent\n",
2927 (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); 2926 (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : "");
2928 /* 2927 /*
2929 * Clear our selection hardware in advance of 2928 * Clear our selection hardware in advance of
@@ -2936,9 +2935,9 @@ ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2936 } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { 2935 } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) {
2937 ahc_build_transfer_msg(ahc, devinfo); 2936 ahc_build_transfer_msg(ahc, devinfo);
2938 } else { 2937 } else {
2939 printf("ahc_intr: AWAITING_MSG for an SCB that " 2938 printk("ahc_intr: AWAITING_MSG for an SCB that "
2940 "does not have a waiting message\n"); 2939 "does not have a waiting message\n");
2941 printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, 2940 printk("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid,
2942 devinfo->target_mask); 2941 devinfo->target_mask);
2943 panic("SCB = %d, SCB Control = %x, MSG_OUT = %x " 2942 panic("SCB = %d, SCB Control = %x, MSG_OUT = %x "
2944 "SCB flags = %x", scb->hscb->tag, scb->hscb->control, 2943 "SCB flags = %x", scb->hscb->tag, scb->hscb->control,
@@ -3019,7 +3018,7 @@ ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3019 3018
3020 if (bootverbose) { 3019 if (bootverbose) {
3021 ahc_print_devinfo(ahc, devinfo); 3020 ahc_print_devinfo(ahc, devinfo);
3022 printf("Ensuring async\n"); 3021 printk("Ensuring async\n");
3023 } 3022 }
3024 } 3023 }
3025 3024
@@ -3067,7 +3066,7 @@ ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
3067 ahc->msgout_buf + ahc->msgout_index, period, offset); 3066 ahc->msgout_buf + ahc->msgout_index, period, offset);
3068 ahc->msgout_len += 5; 3067 ahc->msgout_len += 5;
3069 if (bootverbose) { 3068 if (bootverbose) {
3070 printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", 3069 printk("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n",
3071 ahc_name(ahc), devinfo->channel, devinfo->target, 3070 ahc_name(ahc), devinfo->channel, devinfo->target,
3072 devinfo->lun, period, offset); 3071 devinfo->lun, period, offset);
3073 } 3072 }
@@ -3085,7 +3084,7 @@ ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
3085 ahc->msgout_buf + ahc->msgout_index, bus_width); 3084 ahc->msgout_buf + ahc->msgout_index, bus_width);
3086 ahc->msgout_len += 4; 3085 ahc->msgout_len += 4;
3087 if (bootverbose) { 3086 if (bootverbose) {
3088 printf("(%s:%c:%d:%d): Sending WDTR %x\n", 3087 printk("(%s:%c:%d:%d): Sending WDTR %x\n",
3089 ahc_name(ahc), devinfo->channel, devinfo->target, 3088 ahc_name(ahc), devinfo->channel, devinfo->target,
3090 devinfo->lun, bus_width); 3089 devinfo->lun, bus_width);
3091 } 3090 }
@@ -3107,7 +3106,7 @@ ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
3107 bus_width, ppr_options); 3106 bus_width, ppr_options);
3108 ahc->msgout_len += 8; 3107 ahc->msgout_len += 8;
3109 if (bootverbose) { 3108 if (bootverbose) {
3110 printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " 3109 printk("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, "
3111 "offset %x, ppr_options %x\n", ahc_name(ahc), 3110 "offset %x, ppr_options %x\n", ahc_name(ahc),
3112 devinfo->channel, devinfo->target, devinfo->lun, 3111 devinfo->channel, devinfo->target, devinfo->lun,
3113 bus_width, period, offset, ppr_options); 3112 bus_width, period, offset, ppr_options);
@@ -3160,7 +3159,7 @@ ahc_handle_proto_violation(struct ahc_softc *ahc)
3160 * to match. 3159 * to match.
3161 */ 3160 */
3162 ahc_print_devinfo(ahc, &devinfo); 3161 ahc_print_devinfo(ahc, &devinfo);
3163 printf("Target did not send an IDENTIFY message. " 3162 printk("Target did not send an IDENTIFY message. "
3164 "LASTPHASE = 0x%x.\n", lastphase); 3163 "LASTPHASE = 0x%x.\n", lastphase);
3165 scb = NULL; 3164 scb = NULL;
3166 } else if (scb == NULL) { 3165 } else if (scb == NULL) {
@@ -3169,13 +3168,13 @@ ahc_handle_proto_violation(struct ahc_softc *ahc)
3169 * transaction. Print an error and reset the bus. 3168 * transaction. Print an error and reset the bus.
3170 */ 3169 */
3171 ahc_print_devinfo(ahc, &devinfo); 3170 ahc_print_devinfo(ahc, &devinfo);
3172 printf("No SCB found during protocol violation\n"); 3171 printk("No SCB found during protocol violation\n");
3173 goto proto_violation_reset; 3172 goto proto_violation_reset;
3174 } else { 3173 } else {
3175 ahc_set_transaction_status(scb, CAM_SEQUENCE_FAIL); 3174 ahc_set_transaction_status(scb, CAM_SEQUENCE_FAIL);
3176 if ((seq_flags & NO_CDB_SENT) != 0) { 3175 if ((seq_flags & NO_CDB_SENT) != 0) {
3177 ahc_print_path(ahc, scb); 3176 ahc_print_path(ahc, scb);
3178 printf("No or incomplete CDB sent to device.\n"); 3177 printk("No or incomplete CDB sent to device.\n");
3179 } else if ((ahc_inb(ahc, SCB_CONTROL) & STATUS_RCVD) == 0) { 3178 } else if ((ahc_inb(ahc, SCB_CONTROL) & STATUS_RCVD) == 0) {
3180 /* 3179 /*
3181 * The target never bothered to provide status to 3180 * The target never bothered to provide status to
@@ -3185,10 +3184,10 @@ ahc_handle_proto_violation(struct ahc_softc *ahc)
3185 * message. 3184 * message.
3186 */ 3185 */
3187 ahc_print_path(ahc, scb); 3186 ahc_print_path(ahc, scb);
3188 printf("Completed command without status.\n"); 3187 printk("Completed command without status.\n");
3189 } else { 3188 } else {
3190 ahc_print_path(ahc, scb); 3189 ahc_print_path(ahc, scb);
3191 printf("Unknown protocol violation.\n"); 3190 printk("Unknown protocol violation.\n");
3192 ahc_dump_card_state(ahc); 3191 ahc_dump_card_state(ahc);
3193 } 3192 }
3194 } 3193 }
@@ -3202,7 +3201,7 @@ proto_violation_reset:
3202 * it away with a bus reset. 3201 * it away with a bus reset.
3203 */ 3202 */
3204 found = ahc_reset_channel(ahc, 'A', TRUE); 3203 found = ahc_reset_channel(ahc, 'A', TRUE);
3205 printf("%s: Issued Channel %c Bus Reset. " 3204 printk("%s: Issued Channel %c Bus Reset. "
3206 "%d SCBs aborted\n", ahc_name(ahc), 'A', found); 3205 "%d SCBs aborted\n", ahc_name(ahc), 'A', found);
3207 } else { 3206 } else {
3208 /* 3207 /*
@@ -3224,7 +3223,7 @@ proto_violation_reset:
3224 ahc_print_path(ahc, scb); 3223 ahc_print_path(ahc, scb);
3225 scb->flags |= SCB_ABORT; 3224 scb->flags |= SCB_ABORT;
3226 } 3225 }
3227 printf("Protocol violation %s. Attempting to abort.\n", 3226 printk("Protocol violation %s. Attempting to abort.\n",
3228 ahc_lookup_phase_entry(curphase)->phasemsg); 3227 ahc_lookup_phase_entry(curphase)->phasemsg);
3229 } 3228 }
3230} 3229}
@@ -3257,14 +3256,14 @@ reswitch:
3257#ifdef AHC_DEBUG 3256#ifdef AHC_DEBUG
3258 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 3257 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
3259 ahc_print_devinfo(ahc, &devinfo); 3258 ahc_print_devinfo(ahc, &devinfo);
3260 printf("INITIATOR_MSG_OUT"); 3259 printk("INITIATOR_MSG_OUT");
3261 } 3260 }
3262#endif 3261#endif
3263 phasemis = bus_phase != P_MESGOUT; 3262 phasemis = bus_phase != P_MESGOUT;
3264 if (phasemis) { 3263 if (phasemis) {
3265#ifdef AHC_DEBUG 3264#ifdef AHC_DEBUG
3266 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 3265 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
3267 printf(" PHASEMIS %s\n", 3266 printk(" PHASEMIS %s\n",
3268 ahc_lookup_phase_entry(bus_phase) 3267 ahc_lookup_phase_entry(bus_phase)
3269 ->phasemsg); 3268 ->phasemsg);
3270 } 3269 }
@@ -3291,7 +3290,7 @@ reswitch:
3291 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 3290 ahc_outb(ahc, CLRSINT1, CLRREQINIT);
3292#ifdef AHC_DEBUG 3291#ifdef AHC_DEBUG
3293 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) 3292 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
3294 printf(" byte 0x%x\n", ahc->send_msg_perror); 3293 printk(" byte 0x%x\n", ahc->send_msg_perror);
3295#endif 3294#endif
3296 ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR); 3295 ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR);
3297 break; 3296 break;
@@ -3321,7 +3320,7 @@ reswitch:
3321 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 3320 ahc_outb(ahc, CLRSINT1, CLRREQINIT);
3322#ifdef AHC_DEBUG 3321#ifdef AHC_DEBUG
3323 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) 3322 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
3324 printf(" byte 0x%x\n", 3323 printk(" byte 0x%x\n",
3325 ahc->msgout_buf[ahc->msgout_index]); 3324 ahc->msgout_buf[ahc->msgout_index]);
3326#endif 3325#endif
3327 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 3326 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
@@ -3335,14 +3334,14 @@ reswitch:
3335#ifdef AHC_DEBUG 3334#ifdef AHC_DEBUG
3336 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 3335 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
3337 ahc_print_devinfo(ahc, &devinfo); 3336 ahc_print_devinfo(ahc, &devinfo);
3338 printf("INITIATOR_MSG_IN"); 3337 printk("INITIATOR_MSG_IN");
3339 } 3338 }
3340#endif 3339#endif
3341 phasemis = bus_phase != P_MESGIN; 3340 phasemis = bus_phase != P_MESGIN;
3342 if (phasemis) { 3341 if (phasemis) {
3343#ifdef AHC_DEBUG 3342#ifdef AHC_DEBUG
3344 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 3343 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
3345 printf(" PHASEMIS %s\n", 3344 printk(" PHASEMIS %s\n",
3346 ahc_lookup_phase_entry(bus_phase) 3345 ahc_lookup_phase_entry(bus_phase)
3347 ->phasemsg); 3346 ->phasemsg);
3348 } 3347 }
@@ -3363,7 +3362,7 @@ reswitch:
3363 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL); 3362 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL);
3364#ifdef AHC_DEBUG 3363#ifdef AHC_DEBUG
3365 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) 3364 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
3366 printf(" byte 0x%x\n", 3365 printk(" byte 0x%x\n",
3367 ahc->msgin_buf[ahc->msgin_index]); 3366 ahc->msgin_buf[ahc->msgin_index]);
3368#endif 3367#endif
3369 3368
@@ -3385,7 +3384,7 @@ reswitch:
3385#ifdef AHC_DEBUG 3384#ifdef AHC_DEBUG
3386 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 3385 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
3387 ahc_print_devinfo(ahc, &devinfo); 3386 ahc_print_devinfo(ahc, &devinfo);
3388 printf("Asserting ATN for response\n"); 3387 printk("Asserting ATN for response\n");
3389 } 3388 }
3390#endif 3389#endif
3391 ahc_assert_atn(ahc); 3390 ahc_assert_atn(ahc);
@@ -3666,7 +3665,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3666 targ_scsirate & WIDEXFER, 3665 targ_scsirate & WIDEXFER,
3667 devinfo->role); 3666 devinfo->role);
3668 if (bootverbose) { 3667 if (bootverbose) {
3669 printf("(%s:%c:%d:%d): Received " 3668 printk("(%s:%c:%d:%d): Received "
3670 "SDTR period %x, offset %x\n\t" 3669 "SDTR period %x, offset %x\n\t"
3671 "Filtered to period %x, offset %x\n", 3670 "Filtered to period %x, offset %x\n",
3672 ahc_name(ahc), devinfo->channel, 3671 ahc_name(ahc), devinfo->channel,
@@ -3697,7 +3696,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3697 */ 3696 */
3698 if (bootverbose 3697 if (bootverbose
3699 && devinfo->role == ROLE_INITIATOR) { 3698 && devinfo->role == ROLE_INITIATOR) {
3700 printf("(%s:%c:%d:%d): Target " 3699 printk("(%s:%c:%d:%d): Target "
3701 "Initiated SDTR\n", 3700 "Initiated SDTR\n",
3702 ahc_name(ahc), devinfo->channel, 3701 ahc_name(ahc), devinfo->channel,
3703 devinfo->target, devinfo->lun); 3702 devinfo->target, devinfo->lun);
@@ -3739,7 +3738,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3739 ahc_validate_width(ahc, tinfo, &bus_width, 3738 ahc_validate_width(ahc, tinfo, &bus_width,
3740 devinfo->role); 3739 devinfo->role);
3741 if (bootverbose) { 3740 if (bootverbose) {
3742 printf("(%s:%c:%d:%d): Received WDTR " 3741 printk("(%s:%c:%d:%d): Received WDTR "
3743 "%x filtered to %x\n", 3742 "%x filtered to %x\n",
3744 ahc_name(ahc), devinfo->channel, 3743 ahc_name(ahc), devinfo->channel,
3745 devinfo->target, devinfo->lun, 3744 devinfo->target, devinfo->lun,
@@ -3755,7 +3754,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3755 */ 3754 */
3756 if (saved_width > bus_width) { 3755 if (saved_width > bus_width) {
3757 reject = TRUE; 3756 reject = TRUE;
3758 printf("(%s:%c:%d:%d): requested %dBit " 3757 printk("(%s:%c:%d:%d): requested %dBit "
3759 "transfers. Rejecting...\n", 3758 "transfers. Rejecting...\n",
3760 ahc_name(ahc), devinfo->channel, 3759 ahc_name(ahc), devinfo->channel,
3761 devinfo->target, devinfo->lun, 3760 devinfo->target, devinfo->lun,
@@ -3768,7 +3767,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3768 */ 3767 */
3769 if (bootverbose 3768 if (bootverbose
3770 && devinfo->role == ROLE_INITIATOR) { 3769 && devinfo->role == ROLE_INITIATOR) {
3771 printf("(%s:%c:%d:%d): Target " 3770 printk("(%s:%c:%d:%d): Target "
3772 "Initiated WDTR\n", 3771 "Initiated WDTR\n",
3773 ahc_name(ahc), devinfo->channel, 3772 ahc_name(ahc), devinfo->channel,
3774 devinfo->target, devinfo->lun); 3773 devinfo->target, devinfo->lun);
@@ -3886,12 +3885,12 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3886 } 3885 }
3887 } else { 3886 } else {
3888 if (devinfo->role != ROLE_TARGET) 3887 if (devinfo->role != ROLE_TARGET)
3889 printf("(%s:%c:%d:%d): Target " 3888 printk("(%s:%c:%d:%d): Target "
3890 "Initiated PPR\n", 3889 "Initiated PPR\n",
3891 ahc_name(ahc), devinfo->channel, 3890 ahc_name(ahc), devinfo->channel,
3892 devinfo->target, devinfo->lun); 3891 devinfo->target, devinfo->lun);
3893 else 3892 else
3894 printf("(%s:%c:%d:%d): Initiator " 3893 printk("(%s:%c:%d:%d): Initiator "
3895 "Initiated PPR\n", 3894 "Initiated PPR\n",
3896 ahc_name(ahc), devinfo->channel, 3895 ahc_name(ahc), devinfo->channel,
3897 devinfo->target, devinfo->lun); 3896 devinfo->target, devinfo->lun);
@@ -3903,7 +3902,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3903 response = TRUE; 3902 response = TRUE;
3904 } 3903 }
3905 if (bootverbose) { 3904 if (bootverbose) {
3906 printf("(%s:%c:%d:%d): Received PPR width %x, " 3905 printk("(%s:%c:%d:%d): Received PPR width %x, "
3907 "period %x, offset %x,options %x\n" 3906 "period %x, offset %x,options %x\n"
3908 "\tFiltered to width %x, period %x, " 3907 "\tFiltered to width %x, period %x, "
3909 "offset %x, options %x\n", 3908 "offset %x, options %x\n",
@@ -4033,7 +4032,7 @@ ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
4033 * Attempt to negotiate SPI-2 style. 4032 * Attempt to negotiate SPI-2 style.
4034 */ 4033 */
4035 if (bootverbose) { 4034 if (bootverbose) {
4036 printf("(%s:%c:%d:%d): PPR Rejected. " 4035 printk("(%s:%c:%d:%d): PPR Rejected. "
4037 "Trying WDTR/SDTR\n", 4036 "Trying WDTR/SDTR\n",
4038 ahc_name(ahc), devinfo->channel, 4037 ahc_name(ahc), devinfo->channel,
4039 devinfo->target, devinfo->lun); 4038 devinfo->target, devinfo->lun);
@@ -4049,7 +4048,7 @@ ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
4049 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) { 4048 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) {
4050 4049
4051 /* note 8bit xfers */ 4050 /* note 8bit xfers */
4052 printf("(%s:%c:%d:%d): refuses WIDE negotiation. Using " 4051 printk("(%s:%c:%d:%d): refuses WIDE negotiation. Using "
4053 "8bit transfers\n", ahc_name(ahc), 4052 "8bit transfers\n", ahc_name(ahc),
4054 devinfo->channel, devinfo->target, devinfo->lun); 4053 devinfo->channel, devinfo->target, devinfo->lun);
4055 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 4054 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
@@ -4077,7 +4076,7 @@ ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
4077 /*offset*/0, /*ppr_options*/0, 4076 /*offset*/0, /*ppr_options*/0,
4078 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 4077 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
4079 /*paused*/TRUE); 4078 /*paused*/TRUE);
4080 printf("(%s:%c:%d:%d): refuses synchronous negotiation. " 4079 printk("(%s:%c:%d:%d): refuses synchronous negotiation. "
4081 "Using asynchronous transfers\n", 4080 "Using asynchronous transfers\n",
4082 ahc_name(ahc), devinfo->channel, 4081 ahc_name(ahc), devinfo->channel,
4083 devinfo->target, devinfo->lun); 4082 devinfo->target, devinfo->lun);
@@ -4088,13 +4087,13 @@ ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
4088 tag_type = (scb->hscb->control & MSG_SIMPLE_TASK); 4087 tag_type = (scb->hscb->control & MSG_SIMPLE_TASK);
4089 4088
4090 if (tag_type == MSG_SIMPLE_TASK) { 4089 if (tag_type == MSG_SIMPLE_TASK) {
4091 printf("(%s:%c:%d:%d): refuses tagged commands. " 4090 printk("(%s:%c:%d:%d): refuses tagged commands. "
4092 "Performing non-tagged I/O\n", ahc_name(ahc), 4091 "Performing non-tagged I/O\n", ahc_name(ahc),
4093 devinfo->channel, devinfo->target, devinfo->lun); 4092 devinfo->channel, devinfo->target, devinfo->lun);
4094 ahc_set_tags(ahc, scb->io_ctx, devinfo, AHC_QUEUE_NONE); 4093 ahc_set_tags(ahc, scb->io_ctx, devinfo, AHC_QUEUE_NONE);
4095 mask = ~0x23; 4094 mask = ~0x23;
4096 } else { 4095 } else {
4097 printf("(%s:%c:%d:%d): refuses %s tagged commands. " 4096 printk("(%s:%c:%d:%d): refuses %s tagged commands. "
4098 "Performing simple queue tagged I/O only\n", 4097 "Performing simple queue tagged I/O only\n",
4099 ahc_name(ahc), devinfo->channel, devinfo->target, 4098 ahc_name(ahc), devinfo->channel, devinfo->target,
4100 devinfo->lun, tag_type == MSG_ORDERED_TASK 4099 devinfo->lun, tag_type == MSG_ORDERED_TASK
@@ -4144,7 +4143,7 @@ ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
4144 /* 4143 /*
4145 * Otherwise, we ignore it. 4144 * Otherwise, we ignore it.
4146 */ 4145 */
4147 printf("%s:%c:%d: Message reject for %x -- ignored\n", 4146 printk("%s:%c:%d: Message reject for %x -- ignored\n",
4148 ahc_name(ahc), devinfo->channel, devinfo->target, 4147 ahc_name(ahc), devinfo->channel, devinfo->target,
4149 last_msg); 4148 last_msg);
4150 } 4149 }
@@ -4369,7 +4368,7 @@ ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
4369 4368
4370 if (message != NULL 4369 if (message != NULL
4371 && (verbose_level <= bootverbose)) 4370 && (verbose_level <= bootverbose))
4372 printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc), 4371 printk("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc),
4373 message, devinfo->channel, devinfo->target, found); 4372 message, devinfo->channel, devinfo->target, found);
4374} 4373}
4375 4374
@@ -4408,23 +4407,22 @@ ahc_alloc(void *platform_arg, char *name)
4408 int i; 4407 int i;
4409 4408
4410#ifndef __FreeBSD__ 4409#ifndef __FreeBSD__
4411 ahc = malloc(sizeof(*ahc), M_DEVBUF, M_NOWAIT); 4410 ahc = kmalloc(sizeof(*ahc), GFP_ATOMIC);
4412 if (!ahc) { 4411 if (!ahc) {
4413 printf("aic7xxx: cannot malloc softc!\n"); 4412 printk("aic7xxx: cannot malloc softc!\n");
4414 free(name, M_DEVBUF); 4413 kfree(name);
4415 return NULL; 4414 return NULL;
4416 } 4415 }
4417#else 4416#else
4418 ahc = device_get_softc((device_t)platform_arg); 4417 ahc = device_get_softc((device_t)platform_arg);
4419#endif 4418#endif
4420 memset(ahc, 0, sizeof(*ahc)); 4419 memset(ahc, 0, sizeof(*ahc));
4421 ahc->seep_config = malloc(sizeof(*ahc->seep_config), 4420 ahc->seep_config = kmalloc(sizeof(*ahc->seep_config), GFP_ATOMIC);
4422 M_DEVBUF, M_NOWAIT);
4423 if (ahc->seep_config == NULL) { 4421 if (ahc->seep_config == NULL) {
4424#ifndef __FreeBSD__ 4422#ifndef __FreeBSD__
4425 free(ahc, M_DEVBUF); 4423 kfree(ahc);
4426#endif 4424#endif
4427 free(name, M_DEVBUF); 4425 kfree(name);
4428 return (NULL); 4426 return (NULL);
4429 } 4427 }
4430 LIST_INIT(&ahc->pending_scbs); 4428 LIST_INIT(&ahc->pending_scbs);
@@ -4466,8 +4464,7 @@ ahc_softc_init(struct ahc_softc *ahc)
4466 ahc->pause = ahc->unpause | PAUSE; 4464 ahc->pause = ahc->unpause | PAUSE;
4467 /* XXX The shared scb data stuff should be deprecated */ 4465 /* XXX The shared scb data stuff should be deprecated */
4468 if (ahc->scb_data == NULL) { 4466 if (ahc->scb_data == NULL) {
4469 ahc->scb_data = malloc(sizeof(*ahc->scb_data), 4467 ahc->scb_data = kmalloc(sizeof(*ahc->scb_data), GFP_ATOMIC);
4470 M_DEVBUF, M_NOWAIT);
4471 if (ahc->scb_data == NULL) 4468 if (ahc->scb_data == NULL)
4472 return (ENOMEM); 4469 return (ENOMEM);
4473 memset(ahc->scb_data, 0, sizeof(*ahc->scb_data)); 4470 memset(ahc->scb_data, 0, sizeof(*ahc->scb_data));
@@ -4486,7 +4483,7 @@ void
4486ahc_set_name(struct ahc_softc *ahc, char *name) 4483ahc_set_name(struct ahc_softc *ahc, char *name)
4487{ 4484{
4488 if (ahc->name != NULL) 4485 if (ahc->name != NULL)
4489 free(ahc->name, M_DEVBUF); 4486 kfree(ahc->name);
4490 ahc->name = name; 4487 ahc->name = name;
4491} 4488}
4492 4489
@@ -4540,25 +4537,25 @@ ahc_free(struct ahc_softc *ahc)
4540 lstate = tstate->enabled_luns[j]; 4537 lstate = tstate->enabled_luns[j];
4541 if (lstate != NULL) { 4538 if (lstate != NULL) {
4542 xpt_free_path(lstate->path); 4539 xpt_free_path(lstate->path);
4543 free(lstate, M_DEVBUF); 4540 kfree(lstate);
4544 } 4541 }
4545 } 4542 }
4546#endif 4543#endif
4547 free(tstate, M_DEVBUF); 4544 kfree(tstate);
4548 } 4545 }
4549 } 4546 }
4550#ifdef AHC_TARGET_MODE 4547#ifdef AHC_TARGET_MODE
4551 if (ahc->black_hole != NULL) { 4548 if (ahc->black_hole != NULL) {
4552 xpt_free_path(ahc->black_hole->path); 4549 xpt_free_path(ahc->black_hole->path);
4553 free(ahc->black_hole, M_DEVBUF); 4550 kfree(ahc->black_hole);
4554 } 4551 }
4555#endif 4552#endif
4556 if (ahc->name != NULL) 4553 if (ahc->name != NULL)
4557 free(ahc->name, M_DEVBUF); 4554 kfree(ahc->name);
4558 if (ahc->seep_config != NULL) 4555 if (ahc->seep_config != NULL)
4559 free(ahc->seep_config, M_DEVBUF); 4556 kfree(ahc->seep_config);
4560#ifndef __FreeBSD__ 4557#ifndef __FreeBSD__
4561 free(ahc, M_DEVBUF); 4558 kfree(ahc);
4562#endif 4559#endif
4563 return; 4560 return;
4564} 4561}
@@ -4633,7 +4630,7 @@ ahc_reset(struct ahc_softc *ahc, int reinit)
4633 } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK)); 4630 } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK));
4634 4631
4635 if (wait == 0) { 4632 if (wait == 0) {
4636 printf("%s: WARNING - Failed chip reset! " 4633 printk("%s: WARNING - Failed chip reset! "
4637 "Trying to initialize anyway.\n", ahc_name(ahc)); 4634 "Trying to initialize anyway.\n", ahc_name(ahc));
4638 } 4635 }
4639 ahc_outb(ahc, HCNTRL, ahc->pause); 4636 ahc_outb(ahc, HCNTRL, ahc->pause);
@@ -4656,7 +4653,7 @@ ahc_reset(struct ahc_softc *ahc, int reinit)
4656 ahc->features |= AHC_TWIN; 4653 ahc->features |= AHC_TWIN;
4657 break; 4654 break;
4658 default: 4655 default:
4659 printf(" Unsupported adapter type. Ignoring\n"); 4656 printk(" Unsupported adapter type. Ignoring\n");
4660 return(-1); 4657 return(-1);
4661 } 4658 }
4662 4659
@@ -4783,9 +4780,7 @@ ahc_init_scbdata(struct ahc_softc *ahc)
4783 SLIST_INIT(&scb_data->sg_maps); 4780 SLIST_INIT(&scb_data->sg_maps);
4784 4781
4785 /* Allocate SCB resources */ 4782 /* Allocate SCB resources */
4786 scb_data->scbarray = 4783 scb_data->scbarray = (struct scb *)kmalloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, GFP_ATOMIC);
4787 (struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC,
4788 M_DEVBUF, M_NOWAIT);
4789 if (scb_data->scbarray == NULL) 4784 if (scb_data->scbarray == NULL)
4790 return (ENOMEM); 4785 return (ENOMEM);
4791 memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC); 4786 memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC);
@@ -4794,7 +4789,7 @@ ahc_init_scbdata(struct ahc_softc *ahc)
4794 4789
4795 scb_data->maxhscbs = ahc_probe_scbs(ahc); 4790 scb_data->maxhscbs = ahc_probe_scbs(ahc);
4796 if (ahc->scb_data->maxhscbs == 0) { 4791 if (ahc->scb_data->maxhscbs == 0) {
4797 printf("%s: No SCB space found\n", ahc_name(ahc)); 4792 printk("%s: No SCB space found\n", ahc_name(ahc));
4798 return (ENXIO); 4793 return (ENXIO);
4799 } 4794 }
4800 4795
@@ -4892,7 +4887,7 @@ ahc_init_scbdata(struct ahc_softc *ahc)
4892 ahc_alloc_scbs(ahc); 4887 ahc_alloc_scbs(ahc);
4893 4888
4894 if (scb_data->numscbs == 0) { 4889 if (scb_data->numscbs == 0) {
4895 printf("%s: ahc_init_scbdata - " 4890 printk("%s: ahc_init_scbdata - "
4896 "Unable to allocate initial scbs\n", 4891 "Unable to allocate initial scbs\n",
4897 ahc_name(ahc)); 4892 ahc_name(ahc));
4898 goto error_exit; 4893 goto error_exit;
@@ -4935,7 +4930,7 @@ ahc_fini_scbdata(struct ahc_softc *ahc)
4935 ahc_dmamem_free(ahc, scb_data->sg_dmat, 4930 ahc_dmamem_free(ahc, scb_data->sg_dmat,
4936 sg_map->sg_vaddr, 4931 sg_map->sg_vaddr,
4937 sg_map->sg_dmamap); 4932 sg_map->sg_dmamap);
4938 free(sg_map, M_DEVBUF); 4933 kfree(sg_map);
4939 } 4934 }
4940 ahc_dma_tag_destroy(ahc, scb_data->sg_dmat); 4935 ahc_dma_tag_destroy(ahc, scb_data->sg_dmat);
4941 } 4936 }
@@ -4964,7 +4959,7 @@ ahc_fini_scbdata(struct ahc_softc *ahc)
4964 break; 4959 break;
4965 } 4960 }
4966 if (scb_data->scbarray != NULL) 4961 if (scb_data->scbarray != NULL)
4967 free(scb_data->scbarray, M_DEVBUF); 4962 kfree(scb_data->scbarray);
4968} 4963}
4969 4964
4970static void 4965static void
@@ -4985,7 +4980,7 @@ ahc_alloc_scbs(struct ahc_softc *ahc)
4985 4980
4986 next_scb = &scb_data->scbarray[scb_data->numscbs]; 4981 next_scb = &scb_data->scbarray[scb_data->numscbs];
4987 4982
4988 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); 4983 sg_map = kmalloc(sizeof(*sg_map), GFP_ATOMIC);
4989 4984
4990 if (sg_map == NULL) 4985 if (sg_map == NULL)
4991 return; 4986 return;
@@ -4994,7 +4989,7 @@ ahc_alloc_scbs(struct ahc_softc *ahc)
4994 if (ahc_dmamem_alloc(ahc, scb_data->sg_dmat, 4989 if (ahc_dmamem_alloc(ahc, scb_data->sg_dmat,
4995 (void **)&sg_map->sg_vaddr, 4990 (void **)&sg_map->sg_vaddr,
4996 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { 4991 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
4997 free(sg_map, M_DEVBUF); 4992 kfree(sg_map);
4998 return; 4993 return;
4999 } 4994 }
5000 4995
@@ -5014,8 +5009,7 @@ ahc_alloc_scbs(struct ahc_softc *ahc)
5014#ifndef __linux__ 5009#ifndef __linux__
5015 int error; 5010 int error;
5016#endif 5011#endif
5017 pdata = (struct scb_platform_data *)malloc(sizeof(*pdata), 5012 pdata = kmalloc(sizeof(*pdata), GFP_ATOMIC);
5018 M_DEVBUF, M_NOWAIT);
5019 if (pdata == NULL) 5013 if (pdata == NULL)
5020 break; 5014 break;
5021 next_scb->platform_data = pdata; 5015 next_scb->platform_data = pdata;
@@ -5244,7 +5238,7 @@ ahc_chip_init(struct ahc_softc *ahc)
5244 * in "fast" mode. 5238 * in "fast" mode.
5245 */ 5239 */
5246 if (bootverbose) 5240 if (bootverbose)
5247 printf("%s: Downloading Sequencer Program...", 5241 printk("%s: Downloading Sequencer Program...",
5248 ahc_name(ahc)); 5242 ahc_name(ahc));
5249 5243
5250 error = ahc_loadseq(ahc); 5244 error = ahc_loadseq(ahc);
@@ -5290,22 +5284,22 @@ ahc_init(struct ahc_softc *ahc)
5290#endif 5284#endif
5291 5285
5292#ifdef AHC_PRINT_SRAM 5286#ifdef AHC_PRINT_SRAM
5293 printf("Scratch Ram:"); 5287 printk("Scratch Ram:");
5294 for (i = 0x20; i < 0x5f; i++) { 5288 for (i = 0x20; i < 0x5f; i++) {
5295 if (((i % 8) == 0) && (i != 0)) { 5289 if (((i % 8) == 0) && (i != 0)) {
5296 printf ("\n "); 5290 printk ("\n ");
5297 } 5291 }
5298 printf (" 0x%x", ahc_inb(ahc, i)); 5292 printk (" 0x%x", ahc_inb(ahc, i));
5299 } 5293 }
5300 if ((ahc->features & AHC_MORE_SRAM) != 0) { 5294 if ((ahc->features & AHC_MORE_SRAM) != 0) {
5301 for (i = 0x70; i < 0x7f; i++) { 5295 for (i = 0x70; i < 0x7f; i++) {
5302 if (((i % 8) == 0) && (i != 0)) { 5296 if (((i % 8) == 0) && (i != 0)) {
5303 printf ("\n "); 5297 printk ("\n ");
5304 } 5298 }
5305 printf (" 0x%x", ahc_inb(ahc, i)); 5299 printk (" 0x%x", ahc_inb(ahc, i));
5306 } 5300 }
5307 } 5301 }
5308 printf ("\n"); 5302 printk ("\n");
5309 /* 5303 /*
5310 * Reading uninitialized scratch ram may 5304 * Reading uninitialized scratch ram may
5311 * generate parity errors. 5305 * generate parity errors.
@@ -5419,14 +5413,14 @@ ahc_init(struct ahc_softc *ahc)
5419 * data for any target mode initiator. 5413 * data for any target mode initiator.
5420 */ 5414 */
5421 if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) { 5415 if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) {
5422 printf("%s: unable to allocate ahc_tmode_tstate. " 5416 printk("%s: unable to allocate ahc_tmode_tstate. "
5423 "Failing attach\n", ahc_name(ahc)); 5417 "Failing attach\n", ahc_name(ahc));
5424 return (ENOMEM); 5418 return (ENOMEM);
5425 } 5419 }
5426 5420
5427 if ((ahc->features & AHC_TWIN) != 0) { 5421 if ((ahc->features & AHC_TWIN) != 0) {
5428 if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) { 5422 if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) {
5429 printf("%s: unable to allocate ahc_tmode_tstate. " 5423 printk("%s: unable to allocate ahc_tmode_tstate. "
5430 "Failing attach\n", ahc_name(ahc)); 5424 "Failing attach\n", ahc_name(ahc));
5431 return (ENOMEM); 5425 return (ENOMEM);
5432 } 5426 }
@@ -5440,7 +5434,7 @@ ahc_init(struct ahc_softc *ahc)
5440 5434
5441#ifdef AHC_DEBUG 5435#ifdef AHC_DEBUG
5442 if (ahc_debug & AHC_SHOW_MISC) { 5436 if (ahc_debug & AHC_SHOW_MISC) {
5443 printf("%s: hardware scb %u bytes; kernel scb %u bytes; " 5437 printk("%s: hardware scb %u bytes; kernel scb %u bytes; "
5444 "ahc_dma %u bytes\n", 5438 "ahc_dma %u bytes\n",
5445 ahc_name(ahc), 5439 ahc_name(ahc),
5446 (u_int)sizeof(struct hardware_scb), 5440 (u_int)sizeof(struct hardware_scb),
@@ -5470,7 +5464,7 @@ ahc_init(struct ahc_softc *ahc)
5470 5464
5471 /* Grab the disconnection disable table and invert it for our needs */ 5465 /* Grab the disconnection disable table and invert it for our needs */
5472 if ((ahc->flags & AHC_USEDEFAULTS) != 0) { 5466 if ((ahc->flags & AHC_USEDEFAULTS) != 0) {
5473 printf("%s: Host Adapter Bios disabled. Using default SCSI " 5467 printk("%s: Host Adapter Bios disabled. Using default SCSI "
5474 "device parameters\n", ahc_name(ahc)); 5468 "device parameters\n", ahc_name(ahc));
5475 ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B| 5469 ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B|
5476 AHC_TERM_ENB_A|AHC_TERM_ENB_B; 5470 AHC_TERM_ENB_A|AHC_TERM_ENB_B;
@@ -5651,7 +5645,7 @@ ahc_pause_and_flushwork(struct ahc_softc *ahc)
5651 && ((intstat & INT_PEND) != 0 5645 && ((intstat & INT_PEND) != 0
5652 || (ahc_inb(ahc, SSTAT0) & (SELDO|SELINGO)) != 0)); 5646 || (ahc_inb(ahc, SSTAT0) & (SELDO|SELINGO)) != 0));
5653 if (maxloops == 0) { 5647 if (maxloops == 0) {
5654 printf("Infinite interrupt loop, INTSTAT = %x", 5648 printk("Infinite interrupt loop, INTSTAT = %x",
5655 ahc_inb(ahc, INTSTAT)); 5649 ahc_inb(ahc, INTSTAT));
5656 } 5650 }
5657 ahc_platform_flushwork(ahc); 5651 ahc_platform_flushwork(ahc);
@@ -5910,7 +5904,7 @@ ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
5910 while (qinpos != qintail) { 5904 while (qinpos != qintail) {
5911 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]); 5905 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]);
5912 if (scb == NULL) { 5906 if (scb == NULL) {
5913 printf("qinpos = %d, SCB index = %d\n", 5907 printk("qinpos = %d, SCB index = %d\n",
5914 qinpos, ahc->qinfifo[qinpos]); 5908 qinpos, ahc->qinfifo[qinpos]);
5915 panic("Loop 1\n"); 5909 panic("Loop 1\n");
5916 } 5910 }
@@ -5933,7 +5927,7 @@ ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
5933 if (cstat != CAM_REQ_CMP) 5927 if (cstat != CAM_REQ_CMP)
5934 ahc_freeze_scb(scb); 5928 ahc_freeze_scb(scb);
5935 if ((scb->flags & SCB_ACTIVE) == 0) 5929 if ((scb->flags & SCB_ACTIVE) == 0)
5936 printf("Inactive SCB in qinfifo\n"); 5930 printk("Inactive SCB in qinfifo\n");
5937 ahc_done(ahc, scb); 5931 ahc_done(ahc, scb);
5938 5932
5939 /* FALLTHROUGH */ 5933 /* FALLTHROUGH */
@@ -5976,7 +5970,7 @@ ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
5976 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]); 5970 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]);
5977 5971
5978 if (scb == NULL) { 5972 if (scb == NULL) {
5979 printf("found = %d, qinstart = %d, qinfifionext = %d\n", 5973 printk("found = %d, qinstart = %d, qinfifionext = %d\n",
5980 found, qinstart, ahc->qinfifonext); 5974 found, qinstart, ahc->qinfifonext);
5981 panic("First/Second Qinfifo fixup\n"); 5975 panic("First/Second Qinfifo fixup\n");
5982 } 5976 }
@@ -6014,7 +6008,7 @@ ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
6014 ahc_outb(ahc, SCBPTR, next); 6008 ahc_outb(ahc, SCBPTR, next);
6015 scb_index = ahc_inb(ahc, SCB_TAG); 6009 scb_index = ahc_inb(ahc, SCB_TAG);
6016 if (scb_index >= ahc->scb_data->numscbs) { 6010 if (scb_index >= ahc->scb_data->numscbs) {
6017 printf("Waiting List inconsistency. " 6011 printk("Waiting List inconsistency. "
6018 "SCB index == %d, yet numscbs == %d.", 6012 "SCB index == %d, yet numscbs == %d.",
6019 scb_index, ahc->scb_data->numscbs); 6013 scb_index, ahc->scb_data->numscbs);
6020 ahc_dump_card_state(ahc); 6014 ahc_dump_card_state(ahc);
@@ -6022,7 +6016,7 @@ ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
6022 } 6016 }
6023 scb = ahc_lookup_scb(ahc, scb_index); 6017 scb = ahc_lookup_scb(ahc, scb_index);
6024 if (scb == NULL) { 6018 if (scb == NULL) {
6025 printf("scb_index = %d, next = %d\n", 6019 printk("scb_index = %d, next = %d\n",
6026 scb_index, next); 6020 scb_index, next);
6027 panic("Waiting List traversal\n"); 6021 panic("Waiting List traversal\n");
6028 } 6022 }
@@ -6046,7 +6040,7 @@ ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
6046 if (cstat != CAM_REQ_CMP) 6040 if (cstat != CAM_REQ_CMP)
6047 ahc_freeze_scb(scb); 6041 ahc_freeze_scb(scb);
6048 if ((scb->flags & SCB_ACTIVE) == 0) 6042 if ((scb->flags & SCB_ACTIVE) == 0)
6049 printf("Inactive SCB in Waiting List\n"); 6043 printk("Inactive SCB in Waiting List\n");
6050 ahc_done(ahc, scb); 6044 ahc_done(ahc, scb);
6051 /* FALLTHROUGH */ 6045 /* FALLTHROUGH */
6052 } 6046 }
@@ -6153,7 +6147,7 @@ ahc_search_untagged_queues(struct ahc_softc *ahc, ahc_io_ctx_t ctx,
6153 if (cstat != CAM_REQ_CMP) 6147 if (cstat != CAM_REQ_CMP)
6154 ahc_freeze_scb(scb); 6148 ahc_freeze_scb(scb);
6155 if ((scb->flags & SCB_ACTIVE) == 0) 6149 if ((scb->flags & SCB_ACTIVE) == 0)
6156 printf("Inactive SCB in untaggedQ\n"); 6150 printk("Inactive SCB in untaggedQ\n");
6157 ahc_done(ahc, scb); 6151 ahc_done(ahc, scb);
6158 break; 6152 break;
6159 } 6153 }
@@ -6200,7 +6194,7 @@ ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel,
6200 ahc_outb(ahc, SCBPTR, next); 6194 ahc_outb(ahc, SCBPTR, next);
6201 scb_index = ahc_inb(ahc, SCB_TAG); 6195 scb_index = ahc_inb(ahc, SCB_TAG);
6202 if (scb_index >= ahc->scb_data->numscbs) { 6196 if (scb_index >= ahc->scb_data->numscbs) {
6203 printf("Disconnected List inconsistency. " 6197 printk("Disconnected List inconsistency. "
6204 "SCB index == %d, yet numscbs == %d.", 6198 "SCB index == %d, yet numscbs == %d.",
6205 scb_index, ahc->scb_data->numscbs); 6199 scb_index, ahc->scb_data->numscbs);
6206 ahc_dump_card_state(ahc); 6200 ahc_dump_card_state(ahc);
@@ -6456,7 +6450,7 @@ ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel,
6456 if (ahc_get_transaction_status(scbp) != CAM_REQ_CMP) 6450 if (ahc_get_transaction_status(scbp) != CAM_REQ_CMP)
6457 ahc_freeze_scb(scbp); 6451 ahc_freeze_scb(scbp);
6458 if ((scbp->flags & SCB_ACTIVE) == 0) 6452 if ((scbp->flags & SCB_ACTIVE) == 0)
6459 printf("Inactive SCB on pending list\n"); 6453 printk("Inactive SCB on pending list\n");
6460 ahc_done(ahc, scbp); 6454 ahc_done(ahc, scbp);
6461 found++; 6455 found++;
6462 } 6456 }
@@ -6734,7 +6728,7 @@ ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb)
6734#ifdef AHC_DEBUG 6728#ifdef AHC_DEBUG
6735 if ((ahc_debug & AHC_SHOW_MISC) != 0) { 6729 if ((ahc_debug & AHC_SHOW_MISC) != 0) {
6736 ahc_print_path(ahc, scb); 6730 ahc_print_path(ahc, scb);
6737 printf("Handled %sResidual of %d bytes\n", 6731 printk("Handled %sResidual of %d bytes\n",
6738 (scb->flags & SCB_SENSE) ? "Sense " : "", resid); 6732 (scb->flags & SCB_SENSE) ? "Sense " : "", resid);
6739 } 6733 }
6740#endif 6734#endif
@@ -6774,7 +6768,7 @@ ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate,
6774 6768
6775 if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) { 6769 if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) {
6776 xpt_print_path(lstate->path); 6770 xpt_print_path(lstate->path);
6777 printf("immediate event %x:%x lost\n", 6771 printk("immediate event %x:%x lost\n",
6778 lstate->event_buffer[lstate->event_r_idx].event_type, 6772 lstate->event_buffer[lstate->event_r_idx].event_type,
6779 lstate->event_buffer[lstate->event_r_idx].event_arg); 6773 lstate->event_buffer[lstate->event_r_idx].event_arg);
6780 lstate->event_r_idx++; 6774 lstate->event_r_idx++;
@@ -6844,7 +6838,7 @@ ahc_dumpseq(struct ahc_softc* ahc)
6844 uint8_t ins_bytes[4]; 6838 uint8_t ins_bytes[4];
6845 6839
6846 ahc_insb(ahc, SEQRAM, ins_bytes, 4); 6840 ahc_insb(ahc, SEQRAM, ins_bytes, 4);
6847 printf("0x%08x\n", ins_bytes[0] << 24 6841 printk("0x%08x\n", ins_bytes[0] << 24
6848 | ins_bytes[1] << 16 6842 | ins_bytes[1] << 16
6849 | ins_bytes[2] << 8 6843 | ins_bytes[2] << 8
6850 | ins_bytes[3]); 6844 | ins_bytes[3]);
@@ -6912,7 +6906,7 @@ ahc_loadseq(struct ahc_softc *ahc)
6912 * storage capacity for this chip. Fail 6906 * storage capacity for this chip. Fail
6913 * the load. 6907 * the load.
6914 */ 6908 */
6915 printf("\n%s: Program too large for instruction memory " 6909 printk("\n%s: Program too large for instruction memory "
6916 "size of %d!\n", ahc_name(ahc), 6910 "size of %d!\n", ahc_name(ahc),
6917 ahc->instruction_ram_size); 6911 ahc->instruction_ram_size);
6918 return (ENOMEM); 6912 return (ENOMEM);
@@ -6947,7 +6941,7 @@ ahc_loadseq(struct ahc_softc *ahc)
6947 if (cs_count != 0) { 6941 if (cs_count != 0) {
6948 6942
6949 cs_count *= sizeof(struct cs); 6943 cs_count *= sizeof(struct cs);
6950 ahc->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT); 6944 ahc->critical_sections = kmalloc(cs_count, GFP_ATOMIC);
6951 if (ahc->critical_sections == NULL) 6945 if (ahc->critical_sections == NULL)
6952 panic("ahc_loadseq: Could not malloc"); 6946 panic("ahc_loadseq: Could not malloc");
6953 memcpy(ahc->critical_sections, cs_table, cs_count); 6947 memcpy(ahc->critical_sections, cs_table, cs_count);
@@ -6955,8 +6949,8 @@ ahc_loadseq(struct ahc_softc *ahc)
6955 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE); 6949 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE);
6956 6950
6957 if (bootverbose) { 6951 if (bootverbose) {
6958 printf(" %d instructions downloaded\n", downloaded); 6952 printk(" %d instructions downloaded\n", downloaded);
6959 printf("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n", 6953 printk("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n",
6960 ahc_name(ahc), ahc->features, ahc->bugs, ahc->flags); 6954 ahc_name(ahc), ahc->features, ahc->bugs, ahc->flags);
6961 } 6955 }
6962 return (0); 6956 return (0);
@@ -7132,12 +7126,12 @@ ahc_print_register(const ahc_reg_parse_entry_t *table, u_int num_entries,
7132 u_int printed_mask; 7126 u_int printed_mask;
7133 7127
7134 if (cur_column != NULL && *cur_column >= wrap_point) { 7128 if (cur_column != NULL && *cur_column >= wrap_point) {
7135 printf("\n"); 7129 printk("\n");
7136 *cur_column = 0; 7130 *cur_column = 0;
7137 } 7131 }
7138 printed = printf("%s[0x%x]", name, value); 7132 printed = printk("%s[0x%x]", name, value);
7139 if (table == NULL) { 7133 if (table == NULL) {
7140 printed += printf(" "); 7134 printed += printk(" ");
7141 *cur_column += printed; 7135 *cur_column += printed;
7142 return (printed); 7136 return (printed);
7143 } 7137 }
@@ -7152,7 +7146,7 @@ ahc_print_register(const ahc_reg_parse_entry_t *table, u_int num_entries,
7152 == table[entry].mask)) 7146 == table[entry].mask))
7153 continue; 7147 continue;
7154 7148
7155 printed += printf("%s%s", 7149 printed += printk("%s%s",
7156 printed_mask == 0 ? ":(" : "|", 7150 printed_mask == 0 ? ":(" : "|",
7157 table[entry].name); 7151 table[entry].name);
7158 printed_mask |= table[entry].mask; 7152 printed_mask |= table[entry].mask;
@@ -7163,9 +7157,9 @@ ahc_print_register(const ahc_reg_parse_entry_t *table, u_int num_entries,
7163 break; 7157 break;
7164 } 7158 }
7165 if (printed_mask != 0) 7159 if (printed_mask != 0)
7166 printed += printf(") "); 7160 printed += printk(") ");
7167 else 7161 else
7168 printed += printf(" "); 7162 printed += printk(" ");
7169 if (cur_column != NULL) 7163 if (cur_column != NULL)
7170 *cur_column += printed; 7164 *cur_column += printed;
7171 return (printed); 7165 return (printed);
@@ -7197,16 +7191,16 @@ ahc_dump_card_state(struct ahc_softc *ahc)
7197 7191
7198 saved_scbptr = ahc_inb(ahc, SCBPTR); 7192 saved_scbptr = ahc_inb(ahc, SCBPTR);
7199 last_phase = ahc_inb(ahc, LASTPHASE); 7193 last_phase = ahc_inb(ahc, LASTPHASE);
7200 printf(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n" 7194 printk(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n"
7201 "%s: Dumping Card State %s, at SEQADDR 0x%x\n", 7195 "%s: Dumping Card State %s, at SEQADDR 0x%x\n",
7202 ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg, 7196 ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg,
7203 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); 7197 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
7204 if (paused) 7198 if (paused)
7205 printf("Card was paused\n"); 7199 printk("Card was paused\n");
7206 printf("ACCUM = 0x%x, SINDEX = 0x%x, DINDEX = 0x%x, ARG_2 = 0x%x\n", 7200 printk("ACCUM = 0x%x, SINDEX = 0x%x, DINDEX = 0x%x, ARG_2 = 0x%x\n",
7207 ahc_inb(ahc, ACCUM), ahc_inb(ahc, SINDEX), ahc_inb(ahc, DINDEX), 7201 ahc_inb(ahc, ACCUM), ahc_inb(ahc, SINDEX), ahc_inb(ahc, DINDEX),
7208 ahc_inb(ahc, ARG_2)); 7202 ahc_inb(ahc, ARG_2));
7209 printf("HCNT = 0x%x SCBPTR = 0x%x\n", ahc_inb(ahc, HCNT), 7203 printk("HCNT = 0x%x SCBPTR = 0x%x\n", ahc_inb(ahc, HCNT),
7210 ahc_inb(ahc, SCBPTR)); 7204 ahc_inb(ahc, SCBPTR));
7211 cur_col = 0; 7205 cur_col = 0;
7212 if ((ahc->features & AHC_DT) != 0) 7206 if ((ahc->features & AHC_DT) != 0)
@@ -7230,15 +7224,15 @@ ahc_dump_card_state(struct ahc_softc *ahc)
7230 ahc_dfcntrl_print(ahc_inb(ahc, DFCNTRL), &cur_col, 50); 7224 ahc_dfcntrl_print(ahc_inb(ahc, DFCNTRL), &cur_col, 50);
7231 ahc_dfstatus_print(ahc_inb(ahc, DFSTATUS), &cur_col, 50); 7225 ahc_dfstatus_print(ahc_inb(ahc, DFSTATUS), &cur_col, 50);
7232 if (cur_col != 0) 7226 if (cur_col != 0)
7233 printf("\n"); 7227 printk("\n");
7234 printf("STACK:"); 7228 printk("STACK:");
7235 for (i = 0; i < STACK_SIZE; i++) 7229 for (i = 0; i < STACK_SIZE; i++)
7236 printf(" 0x%x", ahc_inb(ahc, STACK)|(ahc_inb(ahc, STACK) << 8)); 7230 printk(" 0x%x", ahc_inb(ahc, STACK)|(ahc_inb(ahc, STACK) << 8));
7237 printf("\nSCB count = %d\n", ahc->scb_data->numscbs); 7231 printk("\nSCB count = %d\n", ahc->scb_data->numscbs);
7238 printf("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag); 7232 printk("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag);
7239 printf("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB)); 7233 printk("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB));
7240 /* QINFIFO */ 7234 /* QINFIFO */
7241 printf("QINFIFO entries: "); 7235 printk("QINFIFO entries: ");
7242 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 7236 if ((ahc->features & AHC_QUEUE_REGS) != 0) {
7243 qinpos = ahc_inb(ahc, SNSCB_QOFF); 7237 qinpos = ahc_inb(ahc, SNSCB_QOFF);
7244 ahc_outb(ahc, SNSCB_QOFF, qinpos); 7238 ahc_outb(ahc, SNSCB_QOFF, qinpos);
@@ -7246,109 +7240,109 @@ ahc_dump_card_state(struct ahc_softc *ahc)
7246 qinpos = ahc_inb(ahc, QINPOS); 7240 qinpos = ahc_inb(ahc, QINPOS);
7247 qintail = ahc->qinfifonext; 7241 qintail = ahc->qinfifonext;
7248 while (qinpos != qintail) { 7242 while (qinpos != qintail) {
7249 printf("%d ", ahc->qinfifo[qinpos]); 7243 printk("%d ", ahc->qinfifo[qinpos]);
7250 qinpos++; 7244 qinpos++;
7251 } 7245 }
7252 printf("\n"); 7246 printk("\n");
7253 7247
7254 printf("Waiting Queue entries: "); 7248 printk("Waiting Queue entries: ");
7255 scb_index = ahc_inb(ahc, WAITING_SCBH); 7249 scb_index = ahc_inb(ahc, WAITING_SCBH);
7256 i = 0; 7250 i = 0;
7257 while (scb_index != SCB_LIST_NULL && i++ < 256) { 7251 while (scb_index != SCB_LIST_NULL && i++ < 256) {
7258 ahc_outb(ahc, SCBPTR, scb_index); 7252 ahc_outb(ahc, SCBPTR, scb_index);
7259 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); 7253 printk("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
7260 scb_index = ahc_inb(ahc, SCB_NEXT); 7254 scb_index = ahc_inb(ahc, SCB_NEXT);
7261 } 7255 }
7262 printf("\n"); 7256 printk("\n");
7263 7257
7264 printf("Disconnected Queue entries: "); 7258 printk("Disconnected Queue entries: ");
7265 scb_index = ahc_inb(ahc, DISCONNECTED_SCBH); 7259 scb_index = ahc_inb(ahc, DISCONNECTED_SCBH);
7266 i = 0; 7260 i = 0;
7267 while (scb_index != SCB_LIST_NULL && i++ < 256) { 7261 while (scb_index != SCB_LIST_NULL && i++ < 256) {
7268 ahc_outb(ahc, SCBPTR, scb_index); 7262 ahc_outb(ahc, SCBPTR, scb_index);
7269 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); 7263 printk("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
7270 scb_index = ahc_inb(ahc, SCB_NEXT); 7264 scb_index = ahc_inb(ahc, SCB_NEXT);
7271 } 7265 }
7272 printf("\n"); 7266 printk("\n");
7273 7267
7274 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); 7268 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD);
7275 printf("QOUTFIFO entries: "); 7269 printk("QOUTFIFO entries: ");
7276 qoutpos = ahc->qoutfifonext; 7270 qoutpos = ahc->qoutfifonext;
7277 i = 0; 7271 i = 0;
7278 while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) { 7272 while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) {
7279 printf("%d ", ahc->qoutfifo[qoutpos]); 7273 printk("%d ", ahc->qoutfifo[qoutpos]);
7280 qoutpos++; 7274 qoutpos++;
7281 } 7275 }
7282 printf("\n"); 7276 printk("\n");
7283 7277
7284 printf("Sequencer Free SCB List: "); 7278 printk("Sequencer Free SCB List: ");
7285 scb_index = ahc_inb(ahc, FREE_SCBH); 7279 scb_index = ahc_inb(ahc, FREE_SCBH);
7286 i = 0; 7280 i = 0;
7287 while (scb_index != SCB_LIST_NULL && i++ < 256) { 7281 while (scb_index != SCB_LIST_NULL && i++ < 256) {
7288 ahc_outb(ahc, SCBPTR, scb_index); 7282 ahc_outb(ahc, SCBPTR, scb_index);
7289 printf("%d ", scb_index); 7283 printk("%d ", scb_index);
7290 scb_index = ahc_inb(ahc, SCB_NEXT); 7284 scb_index = ahc_inb(ahc, SCB_NEXT);
7291 } 7285 }
7292 printf("\n"); 7286 printk("\n");
7293 7287
7294 printf("Sequencer SCB Info: "); 7288 printk("Sequencer SCB Info: ");
7295 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 7289 for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
7296 ahc_outb(ahc, SCBPTR, i); 7290 ahc_outb(ahc, SCBPTR, i);
7297 cur_col = printf("\n%3d ", i); 7291 cur_col = printk("\n%3d ", i);
7298 7292
7299 ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60); 7293 ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60);
7300 ahc_scb_scsiid_print(ahc_inb(ahc, SCB_SCSIID), &cur_col, 60); 7294 ahc_scb_scsiid_print(ahc_inb(ahc, SCB_SCSIID), &cur_col, 60);
7301 ahc_scb_lun_print(ahc_inb(ahc, SCB_LUN), &cur_col, 60); 7295 ahc_scb_lun_print(ahc_inb(ahc, SCB_LUN), &cur_col, 60);
7302 ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); 7296 ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60);
7303 } 7297 }
7304 printf("\n"); 7298 printk("\n");
7305 7299
7306 printf("Pending list: "); 7300 printk("Pending list: ");
7307 i = 0; 7301 i = 0;
7308 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { 7302 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
7309 if (i++ > 256) 7303 if (i++ > 256)
7310 break; 7304 break;
7311 cur_col = printf("\n%3d ", scb->hscb->tag); 7305 cur_col = printk("\n%3d ", scb->hscb->tag);
7312 ahc_scb_control_print(scb->hscb->control, &cur_col, 60); 7306 ahc_scb_control_print(scb->hscb->control, &cur_col, 60);
7313 ahc_scb_scsiid_print(scb->hscb->scsiid, &cur_col, 60); 7307 ahc_scb_scsiid_print(scb->hscb->scsiid, &cur_col, 60);
7314 ahc_scb_lun_print(scb->hscb->lun, &cur_col, 60); 7308 ahc_scb_lun_print(scb->hscb->lun, &cur_col, 60);
7315 if ((ahc->flags & AHC_PAGESCBS) == 0) { 7309 if ((ahc->flags & AHC_PAGESCBS) == 0) {
7316 ahc_outb(ahc, SCBPTR, scb->hscb->tag); 7310 ahc_outb(ahc, SCBPTR, scb->hscb->tag);
7317 printf("("); 7311 printk("(");
7318 ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), 7312 ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL),
7319 &cur_col, 60); 7313 &cur_col, 60);
7320 ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); 7314 ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60);
7321 printf(")"); 7315 printk(")");
7322 } 7316 }
7323 } 7317 }
7324 printf("\n"); 7318 printk("\n");
7325 7319
7326 printf("Kernel Free SCB list: "); 7320 printk("Kernel Free SCB list: ");
7327 i = 0; 7321 i = 0;
7328 SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) { 7322 SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) {
7329 if (i++ > 256) 7323 if (i++ > 256)
7330 break; 7324 break;
7331 printf("%d ", scb->hscb->tag); 7325 printk("%d ", scb->hscb->tag);
7332 } 7326 }
7333 printf("\n"); 7327 printk("\n");
7334 7328
7335 maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7; 7329 maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7;
7336 for (target = 0; target <= maxtarget; target++) { 7330 for (target = 0; target <= maxtarget; target++) {
7337 untagged_q = &ahc->untagged_queues[target]; 7331 untagged_q = &ahc->untagged_queues[target];
7338 if (TAILQ_FIRST(untagged_q) == NULL) 7332 if (TAILQ_FIRST(untagged_q) == NULL)
7339 continue; 7333 continue;
7340 printf("Untagged Q(%d): ", target); 7334 printk("Untagged Q(%d): ", target);
7341 i = 0; 7335 i = 0;
7342 TAILQ_FOREACH(scb, untagged_q, links.tqe) { 7336 TAILQ_FOREACH(scb, untagged_q, links.tqe) {
7343 if (i++ > 256) 7337 if (i++ > 256)
7344 break; 7338 break;
7345 printf("%d ", scb->hscb->tag); 7339 printk("%d ", scb->hscb->tag);
7346 } 7340 }
7347 printf("\n"); 7341 printk("\n");
7348 } 7342 }
7349 7343
7350 ahc_platform_dump_card_state(ahc); 7344 ahc_platform_dump_card_state(ahc);
7351 printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); 7345 printk("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n");
7352 ahc_outb(ahc, SCBPTR, saved_scbptr); 7346 ahc_outb(ahc, SCBPTR, saved_scbptr);
7353 if (paused == 0) 7347 if (paused == 0)
7354 ahc_unpause(ahc); 7348 ahc_unpause(ahc);
@@ -7489,7 +7483,7 @@ ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
7489 u_long s; 7483 u_long s;
7490 ahc_flag saved_flags; 7484 ahc_flag saved_flags;
7491 7485
7492 printf("Configuring Target Mode\n"); 7486 printk("Configuring Target Mode\n");
7493 ahc_lock(ahc, &s); 7487 ahc_lock(ahc, &s);
7494 if (LIST_FIRST(&ahc->pending_scbs) != NULL) { 7488 if (LIST_FIRST(&ahc->pending_scbs) != NULL) {
7495 ccb->ccb_h.status = CAM_BUSY; 7489 ccb->ccb_h.status = CAM_BUSY;
@@ -7535,7 +7529,7 @@ ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
7535 /* Are we already enabled?? */ 7529 /* Are we already enabled?? */
7536 if (lstate != NULL) { 7530 if (lstate != NULL) {
7537 xpt_print_path(ccb->ccb_h.path); 7531 xpt_print_path(ccb->ccb_h.path);
7538 printf("Lun already enabled\n"); 7532 printk("Lun already enabled\n");
7539 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 7533 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
7540 return; 7534 return;
7541 } 7535 }
@@ -7547,7 +7541,7 @@ ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
7547 * specific commands. 7541 * specific commands.
7548 */ 7542 */
7549 ccb->ccb_h.status = CAM_REQ_INVALID; 7543 ccb->ccb_h.status = CAM_REQ_INVALID;
7550 printf("Non-zero Group Codes\n"); 7544 printk("Non-zero Group Codes\n");
7551 return; 7545 return;
7552 } 7546 }
7553 7547
@@ -7559,15 +7553,15 @@ ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
7559 tstate = ahc_alloc_tstate(ahc, target, channel); 7553 tstate = ahc_alloc_tstate(ahc, target, channel);
7560 if (tstate == NULL) { 7554 if (tstate == NULL) {
7561 xpt_print_path(ccb->ccb_h.path); 7555 xpt_print_path(ccb->ccb_h.path);
7562 printf("Couldn't allocate tstate\n"); 7556 printk("Couldn't allocate tstate\n");
7563 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 7557 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
7564 return; 7558 return;
7565 } 7559 }
7566 } 7560 }
7567 lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT); 7561 lstate = kmalloc(sizeof(*lstate), GFP_ATOMIC);
7568 if (lstate == NULL) { 7562 if (lstate == NULL) {
7569 xpt_print_path(ccb->ccb_h.path); 7563 xpt_print_path(ccb->ccb_h.path);
7570 printf("Couldn't allocate lstate\n"); 7564 printk("Couldn't allocate lstate\n");
7571 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 7565 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
7572 return; 7566 return;
7573 } 7567 }
@@ -7577,9 +7571,9 @@ ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
7577 xpt_path_target_id(ccb->ccb_h.path), 7571 xpt_path_target_id(ccb->ccb_h.path),
7578 xpt_path_lun_id(ccb->ccb_h.path)); 7572 xpt_path_lun_id(ccb->ccb_h.path));
7579 if (status != CAM_REQ_CMP) { 7573 if (status != CAM_REQ_CMP) {
7580 free(lstate, M_DEVBUF); 7574 kfree(lstate);
7581 xpt_print_path(ccb->ccb_h.path); 7575 xpt_print_path(ccb->ccb_h.path);
7582 printf("Couldn't allocate path\n"); 7576 printk("Couldn't allocate path\n");
7583 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 7577 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
7584 return; 7578 return;
7585 } 7579 }
@@ -7654,7 +7648,7 @@ ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
7654 ahc_unlock(ahc, &s); 7648 ahc_unlock(ahc, &s);
7655 ccb->ccb_h.status = CAM_REQ_CMP; 7649 ccb->ccb_h.status = CAM_REQ_CMP;
7656 xpt_print_path(ccb->ccb_h.path); 7650 xpt_print_path(ccb->ccb_h.path);
7657 printf("Lun now enabled for target mode\n"); 7651 printk("Lun now enabled for target mode\n");
7658 } else { 7652 } else {
7659 struct scb *scb; 7653 struct scb *scb;
7660 int i, empty; 7654 int i, empty;
@@ -7673,7 +7667,7 @@ ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
7673 ccbh = &scb->io_ctx->ccb_h; 7667 ccbh = &scb->io_ctx->ccb_h;
7674 if (ccbh->func_code == XPT_CONT_TARGET_IO 7668 if (ccbh->func_code == XPT_CONT_TARGET_IO
7675 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ 7669 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){
7676 printf("CTIO pending\n"); 7670 printk("CTIO pending\n");
7677 ccb->ccb_h.status = CAM_REQ_INVALID; 7671 ccb->ccb_h.status = CAM_REQ_INVALID;
7678 ahc_unlock(ahc, &s); 7672 ahc_unlock(ahc, &s);
7679 return; 7673 return;
@@ -7681,12 +7675,12 @@ ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
7681 } 7675 }
7682 7676
7683 if (SLIST_FIRST(&lstate->accept_tios) != NULL) { 7677 if (SLIST_FIRST(&lstate->accept_tios) != NULL) {
7684 printf("ATIOs pending\n"); 7678 printk("ATIOs pending\n");
7685 ccb->ccb_h.status = CAM_REQ_INVALID; 7679 ccb->ccb_h.status = CAM_REQ_INVALID;
7686 } 7680 }
7687 7681
7688 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { 7682 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) {
7689 printf("INOTs pending\n"); 7683 printk("INOTs pending\n");
7690 ccb->ccb_h.status = CAM_REQ_INVALID; 7684 ccb->ccb_h.status = CAM_REQ_INVALID;
7691 } 7685 }
7692 7686
@@ -7696,9 +7690,9 @@ ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
7696 } 7690 }
7697 7691
7698 xpt_print_path(ccb->ccb_h.path); 7692 xpt_print_path(ccb->ccb_h.path);
7699 printf("Target mode disabled\n"); 7693 printk("Target mode disabled\n");
7700 xpt_free_path(lstate->path); 7694 xpt_free_path(lstate->path);
7701 free(lstate, M_DEVBUF); 7695 kfree(lstate);
7702 7696
7703 ahc_pause(ahc); 7697 ahc_pause(ahc);
7704 /* Can we clean up the target too? */ 7698 /* Can we clean up the target too? */
@@ -7750,7 +7744,7 @@ ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
7750 ahc_outb(ahc, SCSISEQ, scsiseq); 7744 ahc_outb(ahc, SCSISEQ, scsiseq);
7751 7745
7752 if ((ahc->features & AHC_MULTIROLE) == 0) { 7746 if ((ahc->features & AHC_MULTIROLE) == 0) {
7753 printf("Configuring Initiator Mode\n"); 7747 printk("Configuring Initiator Mode\n");
7754 ahc->flags &= ~AHC_TARGETROLE; 7748 ahc->flags &= ~AHC_TARGETROLE;
7755 ahc->flags |= AHC_INITIATORROLE; 7749 ahc->flags |= AHC_INITIATORROLE;
7756 /* 7750 /*
@@ -7897,12 +7891,12 @@ ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd)
7897 * Wait for more ATIOs from the peripheral driver for this lun. 7891 * Wait for more ATIOs from the peripheral driver for this lun.
7898 */ 7892 */
7899 if (bootverbose) 7893 if (bootverbose)
7900 printf("%s: ATIOs exhausted\n", ahc_name(ahc)); 7894 printk("%s: ATIOs exhausted\n", ahc_name(ahc));
7901 return (1); 7895 return (1);
7902 } else 7896 } else
7903 ahc->flags &= ~AHC_TQINFIFO_BLOCKED; 7897 ahc->flags &= ~AHC_TQINFIFO_BLOCKED;
7904#if 0 7898#if 0
7905 printf("Incoming command from %d for %d:%d%s\n", 7899 printk("Incoming command from %d for %d:%d%s\n",
7906 initiator, target, lun, 7900 initiator, target, lun,
7907 lstate == ahc->black_hole ? "(Black Holed)" : ""); 7901 lstate == ahc->black_hole ? "(Black Holed)" : "");
7908#endif 7902#endif
@@ -7949,7 +7943,7 @@ ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd)
7949 default: 7943 default:
7950 /* Only copy the opcode. */ 7944 /* Only copy the opcode. */
7951 atio->cdb_len = 1; 7945 atio->cdb_len = 1;
7952 printf("Reserved or VU command code type encountered\n"); 7946 printk("Reserved or VU command code type encountered\n");
7953 break; 7947 break;
7954 } 7948 }
7955 7949
@@ -7965,7 +7959,7 @@ ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd)
7965 * to this accept tio. 7959 * to this accept tio.
7966 */ 7960 */
7967#if 0 7961#if 0
7968 printf("Received Immediate Command %d:%d:%d - %p\n", 7962 printk("Received Immediate Command %d:%d:%d - %p\n",
7969 initiator, target, lun, ahc->pending_device); 7963 initiator, target, lun, ahc->pending_device);
7970#endif 7964#endif
7971 ahc->pending_device = lstate; 7965 ahc->pending_device = lstate;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 5e42dac23505..aeea7a61478e 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -653,7 +653,7 @@ ahc_linux_slave_alloc(struct scsi_device *sdev)
653 struct ahc_linux_device *dev; 653 struct ahc_linux_device *dev;
654 654
655 if (bootverbose) 655 if (bootverbose)
656 printf("%s: Slave Alloc %d\n", ahc_name(ahc), sdev->id); 656 printk("%s: Slave Alloc %d\n", ahc_name(ahc), sdev->id);
657 657
658 dev = scsi_transport_device_data(sdev); 658 dev = scsi_transport_device_data(sdev);
659 memset(dev, 0, sizeof(*dev)); 659 memset(dev, 0, sizeof(*dev));
@@ -755,7 +755,7 @@ ahc_linux_abort(struct scsi_cmnd *cmd)
755 755
756 error = ahc_linux_queue_recovery_cmd(cmd, SCB_ABORT); 756 error = ahc_linux_queue_recovery_cmd(cmd, SCB_ABORT);
757 if (error != 0) 757 if (error != 0)
758 printf("aic7xxx_abort returns 0x%x\n", error); 758 printk("aic7xxx_abort returns 0x%x\n", error);
759 return (error); 759 return (error);
760} 760}
761 761
@@ -769,7 +769,7 @@ ahc_linux_dev_reset(struct scsi_cmnd *cmd)
769 769
770 error = ahc_linux_queue_recovery_cmd(cmd, SCB_DEVICE_RESET); 770 error = ahc_linux_queue_recovery_cmd(cmd, SCB_DEVICE_RESET);
771 if (error != 0) 771 if (error != 0)
772 printf("aic7xxx_dev_reset returns 0x%x\n", error); 772 printk("aic7xxx_dev_reset returns 0x%x\n", error);
773 return (error); 773 return (error);
774} 774}
775 775
@@ -791,7 +791,7 @@ ahc_linux_bus_reset(struct scsi_cmnd *cmd)
791 ahc_unlock(ahc, &flags); 791 ahc_unlock(ahc, &flags);
792 792
793 if (bootverbose) 793 if (bootverbose)
794 printf("%s: SCSI bus reset delivered. " 794 printk("%s: SCSI bus reset delivered. "
795 "%d SCBs aborted.\n", ahc_name(ahc), found); 795 "%d SCBs aborted.\n", ahc_name(ahc), found);
796 796
797 return SUCCESS; 797 return SUCCESS;
@@ -840,7 +840,7 @@ ahc_dma_tag_create(struct ahc_softc *ahc, bus_dma_tag_t parent,
840{ 840{
841 bus_dma_tag_t dmat; 841 bus_dma_tag_t dmat;
842 842
843 dmat = malloc(sizeof(*dmat), M_DEVBUF, M_NOWAIT); 843 dmat = kmalloc(sizeof(*dmat), GFP_ATOMIC);
844 if (dmat == NULL) 844 if (dmat == NULL)
845 return (ENOMEM); 845 return (ENOMEM);
846 846
@@ -861,7 +861,7 @@ ahc_dma_tag_create(struct ahc_softc *ahc, bus_dma_tag_t parent,
861void 861void
862ahc_dma_tag_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat) 862ahc_dma_tag_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat)
863{ 863{
864 free(dmat, M_DEVBUF); 864 kfree(dmat);
865} 865}
866 866
867int 867int
@@ -918,7 +918,7 @@ ahc_linux_setup_tag_info_global(char *p)
918 int tags, i, j; 918 int tags, i, j;
919 919
920 tags = simple_strtoul(p + 1, NULL, 0) & 0xff; 920 tags = simple_strtoul(p + 1, NULL, 0) & 0xff;
921 printf("Setting Global Tags= %d\n", tags); 921 printk("Setting Global Tags= %d\n", tags);
922 922
923 for (i = 0; i < ARRAY_SIZE(aic7xxx_tag_info); i++) { 923 for (i = 0; i < ARRAY_SIZE(aic7xxx_tag_info); i++) {
924 for (j = 0; j < AHC_NUM_TARGETS; j++) { 924 for (j = 0; j < AHC_NUM_TARGETS; j++) {
@@ -936,7 +936,7 @@ ahc_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
936 && (targ < AHC_NUM_TARGETS)) { 936 && (targ < AHC_NUM_TARGETS)) {
937 aic7xxx_tag_info[instance].tag_commands[targ] = value & 0xff; 937 aic7xxx_tag_info[instance].tag_commands[targ] = value & 0xff;
938 if (bootverbose) 938 if (bootverbose)
939 printf("tag_info[%d:%d] = %d\n", instance, targ, value); 939 printk("tag_info[%d:%d] = %d\n", instance, targ, value);
940 } 940 }
941} 941}
942 942
@@ -977,7 +977,7 @@ ahc_parse_brace_option(char *opt_name, char *opt_arg, char *end, int depth,
977 if (targ == -1) 977 if (targ == -1)
978 targ = 0; 978 targ = 0;
979 } else { 979 } else {
980 printf("Malformed Option %s\n", 980 printk("Malformed Option %s\n",
981 opt_name); 981 opt_name);
982 done = TRUE; 982 done = TRUE;
983 } 983 }
@@ -1120,7 +1120,7 @@ ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *templa
1120 ahc_set_unit(ahc, ahc_linux_unit++); 1120 ahc_set_unit(ahc, ahc_linux_unit++);
1121 ahc_unlock(ahc, &s); 1121 ahc_unlock(ahc, &s);
1122 sprintf(buf, "scsi%d", host->host_no); 1122 sprintf(buf, "scsi%d", host->host_no);
1123 new_name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT); 1123 new_name = kmalloc(strlen(buf) + 1, GFP_ATOMIC);
1124 if (new_name != NULL) { 1124 if (new_name != NULL) {
1125 strcpy(new_name, buf); 1125 strcpy(new_name, buf);
1126 ahc_set_name(ahc, new_name); 1126 ahc_set_name(ahc, new_name);
@@ -1220,7 +1220,7 @@ ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
1220{ 1220{
1221 1221
1222 ahc->platform_data = 1222 ahc->platform_data =
1223 malloc(sizeof(struct ahc_platform_data), M_DEVBUF, M_NOWAIT); 1223 kmalloc(sizeof(struct ahc_platform_data), GFP_ATOMIC);
1224 if (ahc->platform_data == NULL) 1224 if (ahc->platform_data == NULL)
1225 return (ENOMEM); 1225 return (ENOMEM);
1226 memset(ahc->platform_data, 0, sizeof(struct ahc_platform_data)); 1226 memset(ahc->platform_data, 0, sizeof(struct ahc_platform_data));
@@ -1264,7 +1264,7 @@ ahc_platform_free(struct ahc_softc *ahc)
1264 if (ahc->platform_data->host) 1264 if (ahc->platform_data->host)
1265 scsi_host_put(ahc->platform_data->host); 1265 scsi_host_put(ahc->platform_data->host);
1266 1266
1267 free(ahc->platform_data, M_DEVBUF); 1267 kfree(ahc->platform_data);
1268 } 1268 }
1269} 1269}
1270 1270
@@ -1378,7 +1378,7 @@ ahc_linux_user_tagdepth(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
1378 if (ahc->unit >= ARRAY_SIZE(aic7xxx_tag_info)) { 1378 if (ahc->unit >= ARRAY_SIZE(aic7xxx_tag_info)) {
1379 if (warned_user == 0) { 1379 if (warned_user == 0) {
1380 1380
1381 printf(KERN_WARNING 1381 printk(KERN_WARNING
1382"aic7xxx: WARNING: Insufficient tag_info instances\n" 1382"aic7xxx: WARNING: Insufficient tag_info instances\n"
1383"aic7xxx: for installed controllers. Using defaults\n" 1383"aic7xxx: for installed controllers. Using defaults\n"
1384"aic7xxx: Please update the aic7xxx_tag_info array in\n" 1384"aic7xxx: Please update the aic7xxx_tag_info array in\n"
@@ -1421,7 +1421,7 @@ ahc_linux_device_queue_depth(struct scsi_device *sdev)
1421 ahc_send_async(ahc, devinfo.channel, devinfo.target, 1421 ahc_send_async(ahc, devinfo.channel, devinfo.target,
1422 devinfo.lun, AC_TRANSFER_NEG); 1422 devinfo.lun, AC_TRANSFER_NEG);
1423 ahc_print_devinfo(ahc, &devinfo); 1423 ahc_print_devinfo(ahc, &devinfo);
1424 printf("Tagged Queuing enabled. Depth %d\n", tags); 1424 printk("Tagged Queuing enabled. Depth %d\n", tags);
1425 } else { 1425 } else {
1426 ahc_platform_set_tags(ahc, sdev, &devinfo, AHC_QUEUE_NONE); 1426 ahc_platform_set_tags(ahc, sdev, &devinfo, AHC_QUEUE_NONE);
1427 ahc_send_async(ahc, devinfo.channel, devinfo.target, 1427 ahc_send_async(ahc, devinfo.channel, devinfo.target,
@@ -1735,7 +1735,7 @@ ahc_done(struct ahc_softc *ahc, struct scb *scb)
1735 * not have been dispatched to the controller, so 1735 * not have been dispatched to the controller, so
1736 * only check the SCB_ACTIVE flag for tagged transactions. 1736 * only check the SCB_ACTIVE flag for tagged transactions.
1737 */ 1737 */
1738 printf("SCB %d done'd twice\n", scb->hscb->tag); 1738 printk("SCB %d done'd twice\n", scb->hscb->tag);
1739 ahc_dump_card_state(ahc); 1739 ahc_dump_card_state(ahc);
1740 panic("Stopping for safety"); 1740 panic("Stopping for safety");
1741 } 1741 }
@@ -1765,7 +1765,7 @@ ahc_done(struct ahc_softc *ahc, struct scb *scb)
1765#ifdef AHC_DEBUG 1765#ifdef AHC_DEBUG
1766 if ((ahc_debug & AHC_SHOW_MISC) != 0) { 1766 if ((ahc_debug & AHC_SHOW_MISC) != 0) {
1767 ahc_print_path(ahc, scb); 1767 ahc_print_path(ahc, scb);
1768 printf("Set CAM_UNCOR_PARITY\n"); 1768 printk("Set CAM_UNCOR_PARITY\n");
1769 } 1769 }
1770#endif 1770#endif
1771 ahc_set_transaction_status(scb, CAM_UNCOR_PARITY); 1771 ahc_set_transaction_status(scb, CAM_UNCOR_PARITY);
@@ -1783,12 +1783,12 @@ ahc_done(struct ahc_softc *ahc, struct scb *scb)
1783 u_int i; 1783 u_int i;
1784 1784
1785 ahc_print_path(ahc, scb); 1785 ahc_print_path(ahc, scb);
1786 printf("CDB:"); 1786 printk("CDB:");
1787 for (i = 0; i < scb->io_ctx->cmd_len; i++) 1787 for (i = 0; i < scb->io_ctx->cmd_len; i++)
1788 printf(" 0x%x", scb->io_ctx->cmnd[i]); 1788 printk(" 0x%x", scb->io_ctx->cmnd[i]);
1789 printf("\n"); 1789 printk("\n");
1790 ahc_print_path(ahc, scb); 1790 ahc_print_path(ahc, scb);
1791 printf("Saw underflow (%ld of %ld bytes). " 1791 printk("Saw underflow (%ld of %ld bytes). "
1792 "Treated as error\n", 1792 "Treated as error\n",
1793 ahc_get_residual(scb), 1793 ahc_get_residual(scb),
1794 ahc_get_transfer_length(scb)); 1794 ahc_get_transfer_length(scb));
@@ -1821,7 +1821,7 @@ ahc_done(struct ahc_softc *ahc, struct scb *scb)
1821 dev->commands_since_idle_or_otag = 0; 1821 dev->commands_since_idle_or_otag = 0;
1822 1822
1823 if ((scb->flags & SCB_RECOVERY_SCB) != 0) { 1823 if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
1824 printf("Recovery SCB completes\n"); 1824 printk("Recovery SCB completes\n");
1825 if (ahc_get_transaction_status(scb) == CAM_BDR_SENT 1825 if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
1826 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED) 1826 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
1827 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT); 1827 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
@@ -1886,14 +1886,14 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
1886 if (ahc_debug & AHC_SHOW_SENSE) { 1886 if (ahc_debug & AHC_SHOW_SENSE) {
1887 int i; 1887 int i;
1888 1888
1889 printf("Copied %d bytes of sense data:", 1889 printk("Copied %d bytes of sense data:",
1890 sense_size); 1890 sense_size);
1891 for (i = 0; i < sense_size; i++) { 1891 for (i = 0; i < sense_size; i++) {
1892 if ((i & 0xF) == 0) 1892 if ((i & 0xF) == 0)
1893 printf("\n"); 1893 printk("\n");
1894 printf("0x%x ", cmd->sense_buffer[i]); 1894 printk("0x%x ", cmd->sense_buffer[i]);
1895 } 1895 }
1896 printf("\n"); 1896 printk("\n");
1897 } 1897 }
1898#endif 1898#endif
1899 } 1899 }
@@ -1918,7 +1918,7 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
1918 dev->openings = 0; 1918 dev->openings = 0;
1919/* 1919/*
1920 ahc_print_path(ahc, scb); 1920 ahc_print_path(ahc, scb);
1921 printf("Dropping tag count to %d\n", dev->active); 1921 printk("Dropping tag count to %d\n", dev->active);
1922 */ 1922 */
1923 if (dev->active == dev->tags_on_last_queuefull) { 1923 if (dev->active == dev->tags_on_last_queuefull) {
1924 1924
@@ -1935,7 +1935,7 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
1935 == AHC_LOCK_TAGS_COUNT) { 1935 == AHC_LOCK_TAGS_COUNT) {
1936 dev->maxtags = dev->active; 1936 dev->maxtags = dev->active;
1937 ahc_print_path(ahc, scb); 1937 ahc_print_path(ahc, scb);
1938 printf("Locking max tag count at %d\n", 1938 printk("Locking max tag count at %d\n",
1939 dev->active); 1939 dev->active);
1940 } 1940 }
1941 } else { 1941 } else {
@@ -2100,10 +2100,10 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
2100 scmd_printk(KERN_INFO, cmd, "Attempting to queue a%s message\n", 2100 scmd_printk(KERN_INFO, cmd, "Attempting to queue a%s message\n",
2101 flag == SCB_ABORT ? "n ABORT" : " TARGET RESET"); 2101 flag == SCB_ABORT ? "n ABORT" : " TARGET RESET");
2102 2102
2103 printf("CDB:"); 2103 printk("CDB:");
2104 for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++) 2104 for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
2105 printf(" 0x%x", cmd->cmnd[cdb_byte]); 2105 printk(" 0x%x", cmd->cmnd[cdb_byte]);
2106 printf("\n"); 2106 printk("\n");
2107 2107
2108 ahc_lock(ahc, &flags); 2108 ahc_lock(ahc, &flags);
2109 2109
@@ -2121,7 +2121,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
2121 * No target device for this command exists, 2121 * No target device for this command exists,
2122 * so we must not still own the command. 2122 * so we must not still own the command.
2123 */ 2123 */
2124 printf("%s:%d:%d:%d: Is not an active device\n", 2124 printk("%s:%d:%d:%d: Is not an active device\n",
2125 ahc_name(ahc), cmd->device->channel, cmd->device->id, 2125 ahc_name(ahc), cmd->device->channel, cmd->device->id,
2126 cmd->device->lun); 2126 cmd->device->lun);
2127 retval = SUCCESS; 2127 retval = SUCCESS;
@@ -2133,7 +2133,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
2133 cmd->device->channel + 'A', 2133 cmd->device->channel + 'A',
2134 cmd->device->lun, 2134 cmd->device->lun,
2135 CAM_REQ_ABORTED, SEARCH_COMPLETE) != 0) { 2135 CAM_REQ_ABORTED, SEARCH_COMPLETE) != 0) {
2136 printf("%s:%d:%d:%d: Command found on untagged queue\n", 2136 printk("%s:%d:%d:%d: Command found on untagged queue\n",
2137 ahc_name(ahc), cmd->device->channel, cmd->device->id, 2137 ahc_name(ahc), cmd->device->channel, cmd->device->id,
2138 cmd->device->lun); 2138 cmd->device->lun);
2139 retval = SUCCESS; 2139 retval = SUCCESS;
@@ -2187,7 +2187,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
2187 goto no_cmd; 2187 goto no_cmd;
2188 } 2188 }
2189 2189
2190 printf("%s: At time of recovery, card was %spaused\n", 2190 printk("%s: At time of recovery, card was %spaused\n",
2191 ahc_name(ahc), was_paused ? "" : "not "); 2191 ahc_name(ahc), was_paused ? "" : "not ");
2192 ahc_dump_card_state(ahc); 2192 ahc_dump_card_state(ahc);
2193 2193
@@ -2199,7 +2199,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
2199 pending_scb->hscb->tag, 2199 pending_scb->hscb->tag,
2200 ROLE_INITIATOR, CAM_REQ_ABORTED, 2200 ROLE_INITIATOR, CAM_REQ_ABORTED,
2201 SEARCH_COMPLETE) > 0) { 2201 SEARCH_COMPLETE) > 0) {
2202 printf("%s:%d:%d:%d: Cmd aborted from QINFIFO\n", 2202 printk("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
2203 ahc_name(ahc), cmd->device->channel, 2203 ahc_name(ahc), cmd->device->channel,
2204 cmd->device->id, cmd->device->lun); 2204 cmd->device->id, cmd->device->lun);
2205 retval = SUCCESS; 2205 retval = SUCCESS;
@@ -2313,7 +2313,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
2313 ahc_qinfifo_requeue_tail(ahc, pending_scb); 2313 ahc_qinfifo_requeue_tail(ahc, pending_scb);
2314 ahc_outb(ahc, SCBPTR, saved_scbptr); 2314 ahc_outb(ahc, SCBPTR, saved_scbptr);
2315 ahc_print_path(ahc, pending_scb); 2315 ahc_print_path(ahc, pending_scb);
2316 printf("Device is disconnected, re-queuing SCB\n"); 2316 printk("Device is disconnected, re-queuing SCB\n");
2317 wait = TRUE; 2317 wait = TRUE;
2318 } else { 2318 } else {
2319 scmd_printk(KERN_INFO, cmd, "Unable to deliver message\n"); 2319 scmd_printk(KERN_INFO, cmd, "Unable to deliver message\n");
@@ -2338,16 +2338,16 @@ done:
2338 ahc->platform_data->eh_done = &done; 2338 ahc->platform_data->eh_done = &done;
2339 ahc_unlock(ahc, &flags); 2339 ahc_unlock(ahc, &flags);
2340 2340
2341 printf("Recovery code sleeping\n"); 2341 printk("Recovery code sleeping\n");
2342 if (!wait_for_completion_timeout(&done, 5 * HZ)) { 2342 if (!wait_for_completion_timeout(&done, 5 * HZ)) {
2343 ahc_lock(ahc, &flags); 2343 ahc_lock(ahc, &flags);
2344 ahc->platform_data->eh_done = NULL; 2344 ahc->platform_data->eh_done = NULL;
2345 ahc_unlock(ahc, &flags); 2345 ahc_unlock(ahc, &flags);
2346 2346
2347 printf("Timer Expired\n"); 2347 printk("Timer Expired\n");
2348 retval = FAILED; 2348 retval = FAILED;
2349 } 2349 }
2350 printf("Recovery code awake\n"); 2350 printk("Recovery code awake\n");
2351 } else 2351 } else
2352 ahc_unlock(ahc, &flags); 2352 ahc_unlock(ahc, &flags);
2353 return (retval); 2353 return (retval);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index 56f07e527b48..bca0fb83f553 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -368,13 +368,6 @@ struct ahc_platform_data {
368 resource_size_t mem_busaddr; /* Mem Base Addr */ 368 resource_size_t mem_busaddr; /* Mem Base Addr */
369}; 369};
370 370
371/************************** OS Utility Wrappers *******************************/
372#define printf printk
373#define M_NOWAIT GFP_ATOMIC
374#define M_WAITOK 0
375#define malloc(size, type, flags) kmalloc(size, flags)
376#define free(ptr, type) kfree(ptr)
377
378void ahc_delay(long); 371void ahc_delay(long);
379 372
380 373
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 78fc70c24e07..ee05e8410754 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -225,7 +225,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
225 ahc_get_pci_bus(pci), 225 ahc_get_pci_bus(pci),
226 ahc_get_pci_slot(pci), 226 ahc_get_pci_slot(pci),
227 ahc_get_pci_function(pci)); 227 ahc_get_pci_function(pci));
228 name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT); 228 name = kmalloc(strlen(buf) + 1, GFP_ATOMIC);
229 if (name == NULL) 229 if (name == NULL)
230 return (-ENOMEM); 230 return (-ENOMEM);
231 strcpy(name, buf); 231 strcpy(name, buf);
@@ -412,7 +412,7 @@ ahc_pci_map_registers(struct ahc_softc *ahc)
412 */ 412 */
413 if (ahc_pci_test_register_access(ahc) != 0) { 413 if (ahc_pci_test_register_access(ahc) != 0) {
414 414
415 printf("aic7xxx: PCI Device %d:%d:%d " 415 printk("aic7xxx: PCI Device %d:%d:%d "
416 "failed memory mapped test. Using PIO.\n", 416 "failed memory mapped test. Using PIO.\n",
417 ahc_get_pci_bus(ahc->dev_softc), 417 ahc_get_pci_bus(ahc->dev_softc),
418 ahc_get_pci_slot(ahc->dev_softc), 418 ahc_get_pci_slot(ahc->dev_softc),
@@ -425,7 +425,7 @@ ahc_pci_map_registers(struct ahc_softc *ahc)
425 } else 425 } else
426 command |= PCIM_CMD_MEMEN; 426 command |= PCIM_CMD_MEMEN;
427 } else { 427 } else {
428 printf("aic7xxx: PCI%d:%d:%d MEM region 0x%llx " 428 printk("aic7xxx: PCI%d:%d:%d MEM region 0x%llx "
429 "unavailable. Cannot memory map device.\n", 429 "unavailable. Cannot memory map device.\n",
430 ahc_get_pci_bus(ahc->dev_softc), 430 ahc_get_pci_bus(ahc->dev_softc),
431 ahc_get_pci_slot(ahc->dev_softc), 431 ahc_get_pci_slot(ahc->dev_softc),
@@ -444,7 +444,7 @@ ahc_pci_map_registers(struct ahc_softc *ahc)
444 ahc->bsh.ioport = (u_long)base; 444 ahc->bsh.ioport = (u_long)base;
445 command |= PCIM_CMD_PORTEN; 445 command |= PCIM_CMD_PORTEN;
446 } else { 446 } else {
447 printf("aic7xxx: PCI%d:%d:%d IO region 0x%llx[0..255] " 447 printk("aic7xxx: PCI%d:%d:%d IO region 0x%llx[0..255] "
448 "unavailable. Cannot map device.\n", 448 "unavailable. Cannot map device.\n",
449 ahc_get_pci_bus(ahc->dev_softc), 449 ahc_get_pci_bus(ahc->dev_softc),
450 ahc_get_pci_slot(ahc->dev_softc), 450 ahc_get_pci_slot(ahc->dev_softc),
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
index 27014b9de126..2b11a4272364 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
@@ -752,7 +752,7 @@ ahc_pci_config(struct ahc_softc *ahc, const struct ahc_pci_identity *entry)
752 if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { 752 if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
753 753
754 if (bootverbose) 754 if (bootverbose)
755 printf("%s: Enabling 39Bit Addressing\n", 755 printk("%s: Enabling 39Bit Addressing\n",
756 ahc_name(ahc)); 756 ahc_name(ahc));
757 devconfig |= DACEN; 757 devconfig |= DACEN;
758 } 758 }
@@ -896,7 +896,7 @@ ahc_pci_config(struct ahc_softc *ahc, const struct ahc_pci_identity *entry)
896 /* See if someone else set us up already */ 896 /* See if someone else set us up already */
897 if ((ahc->flags & AHC_NO_BIOS_INIT) == 0 897 if ((ahc->flags & AHC_NO_BIOS_INIT) == 0
898 && scsiseq != 0) { 898 && scsiseq != 0) {
899 printf("%s: Using left over BIOS settings\n", 899 printk("%s: Using left over BIOS settings\n",
900 ahc_name(ahc)); 900 ahc_name(ahc));
901 ahc->flags &= ~AHC_USEDEFAULTS; 901 ahc->flags &= ~AHC_USEDEFAULTS;
902 ahc->flags |= AHC_BIOS_ENABLED; 902 ahc->flags |= AHC_BIOS_ENABLED;
@@ -1155,7 +1155,7 @@ done:
1155 ahc_outb(ahc, CLRINT, CLRPARERR); 1155 ahc_outb(ahc, CLRINT, CLRPARERR);
1156 ahc_outb(ahc, CLRINT, CLRBRKADRINT); 1156 ahc_outb(ahc, CLRINT, CLRBRKADRINT);
1157 if (bootverbose && enable) { 1157 if (bootverbose && enable) {
1158 printf("%s: External SRAM, %s access%s, %dbytes/SCB\n", 1158 printk("%s: External SRAM, %s access%s, %dbytes/SCB\n",
1159 ahc_name(ahc), fast ? "fast" : "slow", 1159 ahc_name(ahc), fast ? "fast" : "slow",
1160 pcheck ? ", parity checking enabled" : "", 1160 pcheck ? ", parity checking enabled" : "",
1161 large ? 64 : 32); 1161 large ? 64 : 32);
@@ -1292,7 +1292,7 @@ check_extport(struct ahc_softc *ahc, u_int *sxfrctl1)
1292 if (have_seeprom) { 1292 if (have_seeprom) {
1293 1293
1294 if (bootverbose) 1294 if (bootverbose)
1295 printf("%s: Reading SEEPROM...", ahc_name(ahc)); 1295 printk("%s: Reading SEEPROM...", ahc_name(ahc));
1296 1296
1297 for (;;) { 1297 for (;;) {
1298 u_int start_addr; 1298 u_int start_addr;
@@ -1309,9 +1309,9 @@ check_extport(struct ahc_softc *ahc, u_int *sxfrctl1)
1309 if (have_seeprom != 0 || sd.sd_chip == C56_66) { 1309 if (have_seeprom != 0 || sd.sd_chip == C56_66) {
1310 if (bootverbose) { 1310 if (bootverbose) {
1311 if (have_seeprom == 0) 1311 if (have_seeprom == 0)
1312 printf ("checksum error\n"); 1312 printk ("checksum error\n");
1313 else 1313 else
1314 printf ("done.\n"); 1314 printk ("done.\n");
1315 } 1315 }
1316 break; 1316 break;
1317 } 1317 }
@@ -1362,9 +1362,9 @@ check_extport(struct ahc_softc *ahc, u_int *sxfrctl1)
1362 1362
1363 if (!have_seeprom) { 1363 if (!have_seeprom) {
1364 if (bootverbose) 1364 if (bootverbose)
1365 printf("%s: No SEEPROM available.\n", ahc_name(ahc)); 1365 printk("%s: No SEEPROM available.\n", ahc_name(ahc));
1366 ahc->flags |= AHC_USEDEFAULTS; 1366 ahc->flags |= AHC_USEDEFAULTS;
1367 free(ahc->seep_config, M_DEVBUF); 1367 kfree(ahc->seep_config);
1368 ahc->seep_config = NULL; 1368 ahc->seep_config = NULL;
1369 sc = NULL; 1369 sc = NULL;
1370 } else { 1370 } else {
@@ -1399,7 +1399,7 @@ check_extport(struct ahc_softc *ahc, u_int *sxfrctl1)
1399 if ((sc->adapter_control & CFSTERM) != 0) 1399 if ((sc->adapter_control & CFSTERM) != 0)
1400 *sxfrctl1 |= STPWEN; 1400 *sxfrctl1 |= STPWEN;
1401 if (bootverbose) 1401 if (bootverbose)
1402 printf("%s: Low byte termination %sabled\n", 1402 printk("%s: Low byte termination %sabled\n",
1403 ahc_name(ahc), 1403 ahc_name(ahc),
1404 (*sxfrctl1 & STPWEN) ? "en" : "dis"); 1404 (*sxfrctl1 & STPWEN) ? "en" : "dis");
1405 } 1405 }
@@ -1569,7 +1569,7 @@ configure_termination(struct ahc_softc *ahc,
1569 &eeprom_present); 1569 &eeprom_present);
1570 if ((adapter_control & CFSEAUTOTERM) == 0) { 1570 if ((adapter_control & CFSEAUTOTERM) == 0) {
1571 if (bootverbose) 1571 if (bootverbose)
1572 printf("%s: Manual SE Termination\n", 1572 printk("%s: Manual SE Termination\n",
1573 ahc_name(ahc)); 1573 ahc_name(ahc));
1574 enableSEC_low = (adapter_control & CFSELOWTERM); 1574 enableSEC_low = (adapter_control & CFSELOWTERM);
1575 enableSEC_high = 1575 enableSEC_high =
@@ -1577,7 +1577,7 @@ configure_termination(struct ahc_softc *ahc,
1577 } 1577 }
1578 if ((adapter_control & CFAUTOTERM) == 0) { 1578 if ((adapter_control & CFAUTOTERM) == 0) {
1579 if (bootverbose) 1579 if (bootverbose)
1580 printf("%s: Manual LVD Termination\n", 1580 printk("%s: Manual LVD Termination\n",
1581 ahc_name(ahc)); 1581 ahc_name(ahc));
1582 enablePRI_low = (adapter_control & CFSTERM); 1582 enablePRI_low = (adapter_control & CFSTERM);
1583 enablePRI_high = (adapter_control & CFWSTERM); 1583 enablePRI_high = (adapter_control & CFWSTERM);
@@ -1604,19 +1604,19 @@ configure_termination(struct ahc_softc *ahc,
1604 1604
1605 if (bootverbose 1605 if (bootverbose
1606 && (ahc->features & AHC_ULTRA2) == 0) { 1606 && (ahc->features & AHC_ULTRA2) == 0) {
1607 printf("%s: internal 50 cable %s present", 1607 printk("%s: internal 50 cable %s present",
1608 ahc_name(ahc), 1608 ahc_name(ahc),
1609 internal50_present ? "is":"not"); 1609 internal50_present ? "is":"not");
1610 1610
1611 if ((ahc->features & AHC_WIDE) != 0) 1611 if ((ahc->features & AHC_WIDE) != 0)
1612 printf(", internal 68 cable %s present", 1612 printk(", internal 68 cable %s present",
1613 internal68_present ? "is":"not"); 1613 internal68_present ? "is":"not");
1614 printf("\n%s: external cable %s present\n", 1614 printk("\n%s: external cable %s present\n",
1615 ahc_name(ahc), 1615 ahc_name(ahc),
1616 externalcable_present ? "is":"not"); 1616 externalcable_present ? "is":"not");
1617 } 1617 }
1618 if (bootverbose) 1618 if (bootverbose)
1619 printf("%s: BIOS eeprom %s present\n", 1619 printk("%s: BIOS eeprom %s present\n",
1620 ahc_name(ahc), eeprom_present ? "is" : "not"); 1620 ahc_name(ahc), eeprom_present ? "is" : "not");
1621 1621
1622 if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0) { 1622 if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0) {
@@ -1642,7 +1642,7 @@ configure_termination(struct ahc_softc *ahc,
1642 && (internal50_present != 0) 1642 && (internal50_present != 0)
1643 && (internal68_present != 0) 1643 && (internal68_present != 0)
1644 && (externalcable_present != 0)) { 1644 && (externalcable_present != 0)) {
1645 printf("%s: Illegal cable configuration!!. " 1645 printk("%s: Illegal cable configuration!!. "
1646 "Only two connectors on the " 1646 "Only two connectors on the "
1647 "adapter may be used at a " 1647 "adapter may be used at a "
1648 "time!\n", ahc_name(ahc)); 1648 "time!\n", ahc_name(ahc));
@@ -1664,10 +1664,10 @@ configure_termination(struct ahc_softc *ahc,
1664 brddat |= BRDDAT6; 1664 brddat |= BRDDAT6;
1665 if (bootverbose) { 1665 if (bootverbose) {
1666 if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0) 1666 if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0)
1667 printf("%s: 68 pin termination " 1667 printk("%s: 68 pin termination "
1668 "Enabled\n", ahc_name(ahc)); 1668 "Enabled\n", ahc_name(ahc));
1669 else 1669 else
1670 printf("%s: %sHigh byte termination " 1670 printk("%s: %sHigh byte termination "
1671 "Enabled\n", ahc_name(ahc), 1671 "Enabled\n", ahc_name(ahc),
1672 enableSEC_high ? "Secondary " 1672 enableSEC_high ? "Secondary "
1673 : ""); 1673 : "");
@@ -1683,10 +1683,10 @@ configure_termination(struct ahc_softc *ahc,
1683 *sxfrctl1 |= STPWEN; 1683 *sxfrctl1 |= STPWEN;
1684 if (bootverbose) { 1684 if (bootverbose) {
1685 if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0) 1685 if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0)
1686 printf("%s: 50 pin termination " 1686 printk("%s: 50 pin termination "
1687 "Enabled\n", ahc_name(ahc)); 1687 "Enabled\n", ahc_name(ahc));
1688 else 1688 else
1689 printf("%s: %sLow byte termination " 1689 printk("%s: %sLow byte termination "
1690 "Enabled\n", ahc_name(ahc), 1690 "Enabled\n", ahc_name(ahc),
1691 enableSEC_low ? "Secondary " 1691 enableSEC_low ? "Secondary "
1692 : ""); 1692 : "");
@@ -1696,7 +1696,7 @@ configure_termination(struct ahc_softc *ahc,
1696 if (enablePRI_low != 0) { 1696 if (enablePRI_low != 0) {
1697 *sxfrctl1 |= STPWEN; 1697 *sxfrctl1 |= STPWEN;
1698 if (bootverbose) 1698 if (bootverbose)
1699 printf("%s: Primary Low Byte termination " 1699 printk("%s: Primary Low Byte termination "
1700 "Enabled\n", ahc_name(ahc)); 1700 "Enabled\n", ahc_name(ahc));
1701 } 1701 }
1702 1702
@@ -1709,7 +1709,7 @@ configure_termination(struct ahc_softc *ahc,
1709 if (enablePRI_high != 0) { 1709 if (enablePRI_high != 0) {
1710 brddat |= BRDDAT4; 1710 brddat |= BRDDAT4;
1711 if (bootverbose) 1711 if (bootverbose)
1712 printf("%s: Primary High Byte " 1712 printk("%s: Primary High Byte "
1713 "termination Enabled\n", 1713 "termination Enabled\n",
1714 ahc_name(ahc)); 1714 ahc_name(ahc));
1715 } 1715 }
@@ -1721,7 +1721,7 @@ configure_termination(struct ahc_softc *ahc,
1721 *sxfrctl1 |= STPWEN; 1721 *sxfrctl1 |= STPWEN;
1722 1722
1723 if (bootverbose) 1723 if (bootverbose)
1724 printf("%s: %sLow byte termination Enabled\n", 1724 printk("%s: %sLow byte termination Enabled\n",
1725 ahc_name(ahc), 1725 ahc_name(ahc),
1726 (ahc->features & AHC_ULTRA2) ? "Primary " 1726 (ahc->features & AHC_ULTRA2) ? "Primary "
1727 : ""); 1727 : "");
@@ -1731,7 +1731,7 @@ configure_termination(struct ahc_softc *ahc,
1731 && (ahc->features & AHC_WIDE) != 0) { 1731 && (ahc->features & AHC_WIDE) != 0) {
1732 brddat |= BRDDAT6; 1732 brddat |= BRDDAT6;
1733 if (bootverbose) 1733 if (bootverbose)
1734 printf("%s: %sHigh byte termination Enabled\n", 1734 printk("%s: %sHigh byte termination Enabled\n",
1735 ahc_name(ahc), 1735 ahc_name(ahc),
1736 (ahc->features & AHC_ULTRA2) 1736 (ahc->features & AHC_ULTRA2)
1737 ? "Secondary " : ""); 1737 ? "Secondary " : "");
@@ -1937,29 +1937,29 @@ ahc_pci_intr(struct ahc_softc *ahc)
1937 status1 = ahc_pci_read_config(ahc->dev_softc, 1937 status1 = ahc_pci_read_config(ahc->dev_softc,
1938 PCIR_STATUS + 1, /*bytes*/1); 1938 PCIR_STATUS + 1, /*bytes*/1);
1939 1939
1940 printf("%s: PCI error Interrupt at seqaddr = 0x%x\n", 1940 printk("%s: PCI error Interrupt at seqaddr = 0x%x\n",
1941 ahc_name(ahc), 1941 ahc_name(ahc),
1942 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); 1942 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
1943 1943
1944 if (status1 & DPE) { 1944 if (status1 & DPE) {
1945 ahc->pci_target_perr_count++; 1945 ahc->pci_target_perr_count++;
1946 printf("%s: Data Parity Error Detected during address " 1946 printk("%s: Data Parity Error Detected during address "
1947 "or write data phase\n", ahc_name(ahc)); 1947 "or write data phase\n", ahc_name(ahc));
1948 } 1948 }
1949 if (status1 & SSE) { 1949 if (status1 & SSE) {
1950 printf("%s: Signal System Error Detected\n", ahc_name(ahc)); 1950 printk("%s: Signal System Error Detected\n", ahc_name(ahc));
1951 } 1951 }
1952 if (status1 & RMA) { 1952 if (status1 & RMA) {
1953 printf("%s: Received a Master Abort\n", ahc_name(ahc)); 1953 printk("%s: Received a Master Abort\n", ahc_name(ahc));
1954 } 1954 }
1955 if (status1 & RTA) { 1955 if (status1 & RTA) {
1956 printf("%s: Received a Target Abort\n", ahc_name(ahc)); 1956 printk("%s: Received a Target Abort\n", ahc_name(ahc));
1957 } 1957 }
1958 if (status1 & STA) { 1958 if (status1 & STA) {
1959 printf("%s: Signaled a Target Abort\n", ahc_name(ahc)); 1959 printk("%s: Signaled a Target Abort\n", ahc_name(ahc));
1960 } 1960 }
1961 if (status1 & DPR) { 1961 if (status1 & DPR) {
1962 printf("%s: Data Parity Error has been reported via PERR#\n", 1962 printk("%s: Data Parity Error has been reported via PERR#\n",
1963 ahc_name(ahc)); 1963 ahc_name(ahc));
1964 } 1964 }
1965 1965
@@ -1968,14 +1968,14 @@ ahc_pci_intr(struct ahc_softc *ahc)
1968 status1, /*bytes*/1); 1968 status1, /*bytes*/1);
1969 1969
1970 if ((status1 & (DPE|SSE|RMA|RTA|STA|DPR)) == 0) { 1970 if ((status1 & (DPE|SSE|RMA|RTA|STA|DPR)) == 0) {
1971 printf("%s: Latched PCIERR interrupt with " 1971 printk("%s: Latched PCIERR interrupt with "
1972 "no status bits set\n", ahc_name(ahc)); 1972 "no status bits set\n", ahc_name(ahc));
1973 } else { 1973 } else {
1974 ahc_outb(ahc, CLRINT, CLRPARERR); 1974 ahc_outb(ahc, CLRINT, CLRPARERR);
1975 } 1975 }
1976 1976
1977 if (ahc->pci_target_perr_count > AHC_PCI_TARGET_PERR_THRESH) { 1977 if (ahc->pci_target_perr_count > AHC_PCI_TARGET_PERR_THRESH) {
1978 printf( 1978 printk(
1979"%s: WARNING WARNING WARNING WARNING\n" 1979"%s: WARNING WARNING WARNING WARNING\n"
1980"%s: Too many PCI parity errors observed as a target.\n" 1980"%s: Too many PCI parity errors observed as a target.\n"
1981"%s: Some device on this bus is generating bad parity.\n" 1981"%s: Some device on this bus is generating bad parity.\n"
@@ -2386,7 +2386,7 @@ ahc_aha29160C_setup(struct ahc_softc *ahc)
2386static int 2386static int
2387ahc_raid_setup(struct ahc_softc *ahc) 2387ahc_raid_setup(struct ahc_softc *ahc)
2388{ 2388{
2389 printf("RAID functionality unsupported\n"); 2389 printk("RAID functionality unsupported\n");
2390 return (ENXIO); 2390 return (ENXIO);
2391} 2391}
2392 2392
@@ -2404,7 +2404,7 @@ ahc_aha394XX_setup(struct ahc_softc *ahc)
2404 ahc->channel = 'B'; 2404 ahc->channel = 'B';
2405 break; 2405 break;
2406 default: 2406 default:
2407 printf("adapter at unexpected slot %d\n" 2407 printk("adapter at unexpected slot %d\n"
2408 "unable to map to a channel\n", 2408 "unable to map to a channel\n",
2409 ahc_get_pci_slot(pci)); 2409 ahc_get_pci_slot(pci));
2410 ahc->channel = 'A'; 2410 ahc->channel = 'A';
@@ -2429,7 +2429,7 @@ ahc_aha398XX_setup(struct ahc_softc *ahc)
2429 ahc->channel = 'C'; 2429 ahc->channel = 'C';
2430 break; 2430 break;
2431 default: 2431 default:
2432 printf("adapter at unexpected slot %d\n" 2432 printk("adapter at unexpected slot %d\n"
2433 "unable to map to a channel\n", 2433 "unable to map to a channel\n",
2434 ahc_get_pci_slot(pci)); 2434 ahc_get_pci_slot(pci));
2435 ahc->channel = 'A'; 2435 ahc->channel = 'A';
@@ -2459,7 +2459,7 @@ ahc_aha494XX_setup(struct ahc_softc *ahc)
2459 ahc->channel = 'D'; 2459 ahc->channel = 'D';
2460 break; 2460 break;
2461 default: 2461 default:
2462 printf("adapter at unexpected slot %d\n" 2462 printk("adapter at unexpected slot %d\n"
2463 "unable to map to a channel\n", 2463 "unable to map to a channel\n",
2464 ahc_get_pci_slot(pci)); 2464 ahc_get_pci_slot(pci));
2465 ahc->channel = 'A'; 2465 ahc->channel = 'A';
diff --git a/drivers/scsi/aic7xxx/aic7xxx_proc.c b/drivers/scsi/aic7xxx/aic7xxx_proc.c
index e92991a7c485..f2525f8ed1c7 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_proc.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_proc.c
@@ -248,13 +248,13 @@ ahc_proc_write_seeprom(struct ahc_softc *ahc, char *buffer, int length)
248 ahc_pause(ahc); 248 ahc_pause(ahc);
249 249
250 if (length != sizeof(struct seeprom_config)) { 250 if (length != sizeof(struct seeprom_config)) {
251 printf("ahc_proc_write_seeprom: incorrect buffer size\n"); 251 printk("ahc_proc_write_seeprom: incorrect buffer size\n");
252 goto done; 252 goto done;
253 } 253 }
254 254
255 have_seeprom = ahc_verify_cksum((struct seeprom_config*)buffer); 255 have_seeprom = ahc_verify_cksum((struct seeprom_config*)buffer);
256 if (have_seeprom == 0) { 256 if (have_seeprom == 0) {
257 printf("ahc_proc_write_seeprom: cksum verification failed\n"); 257 printk("ahc_proc_write_seeprom: cksum verification failed\n");
258 goto done; 258 goto done;
259 } 259 }
260 260
@@ -290,26 +290,25 @@ ahc_proc_write_seeprom(struct ahc_softc *ahc, char *buffer, int length)
290 sd.sd_DI = DI_2840; 290 sd.sd_DI = DI_2840;
291 have_seeprom = TRUE; 291 have_seeprom = TRUE;
292 } else { 292 } else {
293 printf("ahc_proc_write_seeprom: unsupported adapter type\n"); 293 printk("ahc_proc_write_seeprom: unsupported adapter type\n");
294 goto done; 294 goto done;
295 } 295 }
296 296
297 if (!have_seeprom) { 297 if (!have_seeprom) {
298 printf("ahc_proc_write_seeprom: No Serial EEPROM\n"); 298 printk("ahc_proc_write_seeprom: No Serial EEPROM\n");
299 goto done; 299 goto done;
300 } else { 300 } else {
301 u_int start_addr; 301 u_int start_addr;
302 302
303 if (ahc->seep_config == NULL) { 303 if (ahc->seep_config == NULL) {
304 ahc->seep_config = malloc(sizeof(*ahc->seep_config), 304 ahc->seep_config = kmalloc(sizeof(*ahc->seep_config), GFP_ATOMIC);
305 M_DEVBUF, M_NOWAIT);
306 if (ahc->seep_config == NULL) { 305 if (ahc->seep_config == NULL) {
307 printf("aic7xxx: Unable to allocate serial " 306 printk("aic7xxx: Unable to allocate serial "
308 "eeprom buffer. Write failing\n"); 307 "eeprom buffer. Write failing\n");
309 goto done; 308 goto done;
310 } 309 }
311 } 310 }
312 printf("aic7xxx: Writing Serial EEPROM\n"); 311 printk("aic7xxx: Writing Serial EEPROM\n");
313 start_addr = 32 * (ahc->channel - 'A'); 312 start_addr = 32 * (ahc->channel - 'A');
314 ahc_write_seeprom(&sd, (u_int16_t *)buffer, start_addr, 313 ahc_write_seeprom(&sd, (u_int16_t *)buffer, start_addr,
315 sizeof(struct seeprom_config)/2); 314 sizeof(struct seeprom_config)/2);
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index 75d20f72501f..532d212b6b2c 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -223,7 +223,7 @@ Again:
223 switch (opcode) { 223 switch (opcode) {
224 case TC_NO_ERROR: 224 case TC_NO_ERROR:
225 ts->resp = SAS_TASK_COMPLETE; 225 ts->resp = SAS_TASK_COMPLETE;
226 ts->stat = SAM_GOOD; 226 ts->stat = SAM_STAT_GOOD;
227 break; 227 break;
228 case TC_UNDERRUN: 228 case TC_UNDERRUN:
229 ts->resp = SAS_TASK_COMPLETE; 229 ts->resp = SAS_TASK_COMPLETE;
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index ce5371b3cdd5..475c31ae985c 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -43,21 +43,27 @@
43******************************************************************************* 43*******************************************************************************
44*/ 44*/
45#include <linux/interrupt.h> 45#include <linux/interrupt.h>
46
47struct device_attribute; 46struct device_attribute;
48/*The limit of outstanding scsi command that firmware can handle*/ 47/*The limit of outstanding scsi command that firmware can handle*/
49#define ARCMSR_MAX_OUTSTANDING_CMD 256 48#define ARCMSR_MAX_OUTSTANDING_CMD 256
50#define ARCMSR_MAX_FREECCB_NUM 320 49#define ARCMSR_MAX_FREECCB_NUM 320
51#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2008/11/03" 50#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2010/02/02"
52#define ARCMSR_SCSI_INITIATOR_ID 255 51#define ARCMSR_SCSI_INITIATOR_ID 255
53#define ARCMSR_MAX_XFER_SECTORS 512 52#define ARCMSR_MAX_XFER_SECTORS 512
54#define ARCMSR_MAX_XFER_SECTORS_B 4096 53#define ARCMSR_MAX_XFER_SECTORS_B 4096
54#define ARCMSR_MAX_XFER_SECTORS_C 304
55#define ARCMSR_MAX_TARGETID 17 55#define ARCMSR_MAX_TARGETID 17
56#define ARCMSR_MAX_TARGETLUN 8 56#define ARCMSR_MAX_TARGETLUN 8
57#define ARCMSR_MAX_CMD_PERLUN ARCMSR_MAX_OUTSTANDING_CMD 57#define ARCMSR_MAX_CMD_PERLUN ARCMSR_MAX_OUTSTANDING_CMD
58#define ARCMSR_MAX_QBUFFER 4096 58#define ARCMSR_MAX_QBUFFER 4096
59#define ARCMSR_MAX_SG_ENTRIES 38 59#define ARCMSR_DEFAULT_SG_ENTRIES 38
60#define ARCMSR_MAX_HBB_POSTQUEUE 264 60#define ARCMSR_MAX_HBB_POSTQUEUE 264
61#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */
62#define ARCMSR_CDB_SG_PAGE_LENGTH 256
63#define SCSI_CMD_ARECA_SPECIFIC 0xE1
64#ifndef PCI_DEVICE_ID_ARECA_1880
65#define PCI_DEVICE_ID_ARECA_1880 0x1880
66 #endif
61/* 67/*
62********************************************************************************** 68**********************************************************************************
63** 69**
@@ -132,35 +138,28 @@ struct CMD_MESSAGE_FIELD
132#define ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE \ 138#define ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE \
133 ARECA_SATA_RAID | FUNCTION_FLUSH_ADAPTER_CACHE 139 ARECA_SATA_RAID | FUNCTION_FLUSH_ADAPTER_CACHE
134/* ARECA IOCTL ReturnCode */ 140/* ARECA IOCTL ReturnCode */
135#define ARCMSR_MESSAGE_RETURNCODE_OK 0x00000001 141#define ARCMSR_MESSAGE_RETURNCODE_OK 0x00000001
136#define ARCMSR_MESSAGE_RETURNCODE_ERROR 0x00000006 142#define ARCMSR_MESSAGE_RETURNCODE_ERROR 0x00000006
137#define ARCMSR_MESSAGE_RETURNCODE_3F 0x0000003F 143#define ARCMSR_MESSAGE_RETURNCODE_3F 0x0000003F
138#define ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON 0x00000088 144#define ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON 0x00000088
139/* 145/*
140************************************************************* 146*************************************************************
141** structure for holding DMA address data 147** structure for holding DMA address data
142************************************************************* 148*************************************************************
143*/ 149*/
150#define IS_DMA64 (sizeof(dma_addr_t) == 8)
144#define IS_SG64_ADDR 0x01000000 /* bit24 */ 151#define IS_SG64_ADDR 0x01000000 /* bit24 */
145struct SG32ENTRY 152struct SG32ENTRY
146{ 153{
147 __le32 length; 154 __le32 length;
148 __le32 address; 155 __le32 address;
149}; 156}__attribute__ ((packed));
150struct SG64ENTRY 157struct SG64ENTRY
151{ 158{
152 __le32 length; 159 __le32 length;
153 __le32 address; 160 __le32 address;
154 __le32 addresshigh; 161 __le32 addresshigh;
155}; 162}__attribute__ ((packed));
156struct SGENTRY_UNION
157{
158 union
159 {
160 struct SG32ENTRY sg32entry;
161 struct SG64ENTRY sg64entry;
162 }u;
163};
164/* 163/*
165******************************************************************** 164********************************************************************
166** Q Buffer of IOP Message Transfer 165** Q Buffer of IOP Message Transfer
@@ -187,6 +186,9 @@ struct FIRMWARE_INFO
187 char model[8]; /*15, 60-67*/ 186 char model[8]; /*15, 60-67*/
188 char firmware_ver[16]; /*17, 68-83*/ 187 char firmware_ver[16]; /*17, 68-83*/
189 char device_map[16]; /*21, 84-99*/ 188 char device_map[16]; /*21, 84-99*/
189 uint32_t cfgVersion; /*25,100-103 Added for checking of new firmware capability*/
190 uint8_t cfgSerial[16]; /*26,104-119*/
191 uint32_t cfgPicStatus; /*30,120-123*/
190}; 192};
191/* signature of set and get firmware config */ 193/* signature of set and get firmware config */
192#define ARCMSR_SIGNATURE_GET_CONFIG 0x87974060 194#define ARCMSR_SIGNATURE_GET_CONFIG 0x87974060
@@ -210,9 +212,15 @@ struct FIRMWARE_INFO
210#define ARCMSR_CCBPOST_FLAG_SGL_BSIZE 0x80000000 212#define ARCMSR_CCBPOST_FLAG_SGL_BSIZE 0x80000000
211#define ARCMSR_CCBPOST_FLAG_IAM_BIOS 0x40000000 213#define ARCMSR_CCBPOST_FLAG_IAM_BIOS 0x40000000
212#define ARCMSR_CCBREPLY_FLAG_IAM_BIOS 0x40000000 214#define ARCMSR_CCBREPLY_FLAG_IAM_BIOS 0x40000000
213#define ARCMSR_CCBREPLY_FLAG_ERROR 0x10000000 215#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE0 0x10000000
216#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE1 0x00000001
214/* outbound firmware ok */ 217/* outbound firmware ok */
215#define ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK 0x80000000 218#define ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK 0x80000000
219/* ARC-1680 Bus Reset*/
220#define ARCMSR_ARC1680_BUS_RESET 0x00000003
221/* ARC-1880 Bus Reset*/
222#define ARCMSR_ARC1880_RESET_ADAPTER 0x00000024
223#define ARCMSR_ARC1880_DiagWrite_ENABLE 0x00000080
216 224
217/* 225/*
218************************************************************************ 226************************************************************************
@@ -264,11 +272,66 @@ struct FIRMWARE_INFO
264 272
265/* data tunnel buffer between user space program and its firmware */ 273/* data tunnel buffer between user space program and its firmware */
266/* user space data to iop 128bytes */ 274/* user space data to iop 128bytes */
267#define ARCMSR_IOCTL_WBUFFER 0x0000fe00 275#define ARCMSR_MESSAGE_WBUFFER 0x0000fe00
268/* iop data to user space 128bytes */ 276/* iop data to user space 128bytes */
269#define ARCMSR_IOCTL_RBUFFER 0x0000ff00 277#define ARCMSR_MESSAGE_RBUFFER 0x0000ff00
270/* iop message_rwbuffer for message command */ 278/* iop message_rwbuffer for message command */
271#define ARCMSR_MSGCODE_RWBUFFER 0x0000fa00 279#define ARCMSR_MESSAGE_RWBUFFER 0x0000fa00
280/*
281************************************************************************
282** SPEC. for Areca HBC adapter
283************************************************************************
284*/
285#define ARCMSR_HBC_ISR_THROTTLING_LEVEL 12
286#define ARCMSR_HBC_ISR_MAX_DONE_QUEUE 20
287/* Host Interrupt Mask */
288#define ARCMSR_HBCMU_UTILITY_A_ISR_MASK 0x00000001 /* When clear, the Utility_A interrupt routes to the host.*/
289#define ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK 0x00000004 /* When clear, the General Outbound Doorbell interrupt routes to the host.*/
290#define ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK 0x00000008 /* When clear, the Outbound Post List FIFO Not Empty interrupt routes to the host.*/
291#define ARCMSR_HBCMU_ALL_INTMASKENABLE 0x0000000D /* disable all ISR */
292/* Host Interrupt Status */
293#define ARCMSR_HBCMU_UTILITY_A_ISR 0x00000001
294 /*
295 ** Set when the Utility_A Interrupt bit is set in the Outbound Doorbell Register.
296 ** It clears by writing a 1 to the Utility_A bit in the Outbound Doorbell Clear Register or through automatic clearing (if enabled).
297 */
298#define ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR 0x00000004
299 /*
300 ** Set if Outbound Doorbell register bits 30:1 have a non-zero
301 ** value. This bit clears only when Outbound Doorbell bits
302 ** 30:1 are ALL clear. Only a write to the Outbound Doorbell
303 ** Clear register clears bits in the Outbound Doorbell register.
304 */
305#define ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR 0x00000008
306 /*
307 ** Set whenever the Outbound Post List Producer/Consumer
308 ** Register (FIFO) is not empty. It clears when the Outbound
309 ** Post List FIFO is empty.
310 */
311#define ARCMSR_HBCMU_SAS_ALL_INT 0x00000010
312 /*
313 ** This bit indicates a SAS interrupt from a source external to
314 ** the PCIe core. This bit is not maskable.
315 */
316 /* DoorBell*/
317#define ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK 0x00000002
318#define ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK 0x00000004
319 /*inbound message 0 ready*/
320#define ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE 0x00000008
321 /*more than 12 request completed in a time*/
322#define ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING 0x00000010
323#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK 0x00000002
324 /*outbound DATA WRITE isr door bell clear*/
325#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_DOORBELL_CLEAR 0x00000002
326#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK 0x00000004
327 /*outbound DATA READ isr door bell clear*/
328#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_DOORBELL_CLEAR 0x00000004
329 /*outbound message 0 ready*/
330#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008
331 /*outbound message cmd isr door bell clear*/
332#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR 0x00000008
333 /*ARCMSR_HBAMU_MESSAGE_FIRMWARE_OK*/
334#define ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK 0x80000000
272/* 335/*
273******************************************************************************* 336*******************************************************************************
274** ARECA SCSI COMMAND DESCRIPTOR BLOCK size 0x1F8 (504) 337** ARECA SCSI COMMAND DESCRIPTOR BLOCK size 0x1F8 (504)
@@ -290,7 +353,7 @@ struct ARCMSR_CDB
290#define ARCMSR_CDB_FLAG_HEADQ 0x08 353#define ARCMSR_CDB_FLAG_HEADQ 0x08
291#define ARCMSR_CDB_FLAG_ORDEREDQ 0x10 354#define ARCMSR_CDB_FLAG_ORDEREDQ 0x10
292 355
293 uint8_t Reserved1; 356 uint8_t msgPages;
294 uint32_t Context; 357 uint32_t Context;
295 uint32_t DataLength; 358 uint32_t DataLength;
296 uint8_t Cdb[16]; 359 uint8_t Cdb[16];
@@ -303,8 +366,8 @@ struct ARCMSR_CDB
303 uint8_t SenseData[15]; 366 uint8_t SenseData[15];
304 union 367 union
305 { 368 {
306 struct SG32ENTRY sg32entry[ARCMSR_MAX_SG_ENTRIES]; 369 struct SG32ENTRY sg32entry[1];
307 struct SG64ENTRY sg64entry[ARCMSR_MAX_SG_ENTRIES]; 370 struct SG64ENTRY sg64entry[1];
308 } u; 371 } u;
309}; 372};
310/* 373/*
@@ -344,15 +407,89 @@ struct MessageUnit_B
344 uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE]; 407 uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE];
345 uint32_t postq_index; 408 uint32_t postq_index;
346 uint32_t doneq_index; 409 uint32_t doneq_index;
347 uint32_t __iomem *drv2iop_doorbell_reg; 410 uint32_t __iomem *drv2iop_doorbell;
348 uint32_t __iomem *drv2iop_doorbell_mask_reg; 411 uint32_t __iomem *drv2iop_doorbell_mask;
349 uint32_t __iomem *iop2drv_doorbell_reg; 412 uint32_t __iomem *iop2drv_doorbell;
350 uint32_t __iomem *iop2drv_doorbell_mask_reg; 413 uint32_t __iomem *iop2drv_doorbell_mask;
351 uint32_t __iomem *msgcode_rwbuffer_reg; 414 uint32_t __iomem *message_rwbuffer;
352 uint32_t __iomem *ioctl_wbuffer_reg; 415 uint32_t __iomem *message_wbuffer;
353 uint32_t __iomem *ioctl_rbuffer_reg; 416 uint32_t __iomem *message_rbuffer;
417};
418/*
419*********************************************************************
420** LSI
421*********************************************************************
422*/
423struct MessageUnit_C{
424 uint32_t message_unit_status; /*0000 0003*/
425 uint32_t slave_error_attribute; /*0004 0007*/
426 uint32_t slave_error_address; /*0008 000B*/
427 uint32_t posted_outbound_doorbell; /*000C 000F*/
428 uint32_t master_error_attribute; /*0010 0013*/
429 uint32_t master_error_address_low; /*0014 0017*/
430 uint32_t master_error_address_high; /*0018 001B*/
431 uint32_t hcb_size; /*001C 001F*/
432 uint32_t inbound_doorbell; /*0020 0023*/
433 uint32_t diagnostic_rw_data; /*0024 0027*/
434 uint32_t diagnostic_rw_address_low; /*0028 002B*/
435 uint32_t diagnostic_rw_address_high; /*002C 002F*/
436 uint32_t host_int_status; /*0030 0033*/
437 uint32_t host_int_mask; /*0034 0037*/
438 uint32_t dcr_data; /*0038 003B*/
439 uint32_t dcr_address; /*003C 003F*/
440 uint32_t inbound_queueport; /*0040 0043*/
441 uint32_t outbound_queueport; /*0044 0047*/
442 uint32_t hcb_pci_address_low; /*0048 004B*/
443 uint32_t hcb_pci_address_high; /*004C 004F*/
444 uint32_t iop_int_status; /*0050 0053*/
445 uint32_t iop_int_mask; /*0054 0057*/
446 uint32_t iop_inbound_queue_port; /*0058 005B*/
447 uint32_t iop_outbound_queue_port; /*005C 005F*/
448 uint32_t inbound_free_list_index; /*0060 0063*/
449 uint32_t inbound_post_list_index; /*0064 0067*/
450 uint32_t outbound_free_list_index; /*0068 006B*/
451 uint32_t outbound_post_list_index; /*006C 006F*/
452 uint32_t inbound_doorbell_clear; /*0070 0073*/
453 uint32_t i2o_message_unit_control; /*0074 0077*/
454 uint32_t last_used_message_source_address_low; /*0078 007B*/
455 uint32_t last_used_message_source_address_high; /*007C 007F*/
456 uint32_t pull_mode_data_byte_count[4]; /*0080 008F*/
457 uint32_t message_dest_address_index; /*0090 0093*/
458 uint32_t done_queue_not_empty_int_counter_timer; /*0094 0097*/
459 uint32_t utility_A_int_counter_timer; /*0098 009B*/
460 uint32_t outbound_doorbell; /*009C 009F*/
461 uint32_t outbound_doorbell_clear; /*00A0 00A3*/
462 uint32_t message_source_address_index; /*00A4 00A7*/
463 uint32_t message_done_queue_index; /*00A8 00AB*/
464 uint32_t reserved0; /*00AC 00AF*/
465 uint32_t inbound_msgaddr0; /*00B0 00B3*/
466 uint32_t inbound_msgaddr1; /*00B4 00B7*/
467 uint32_t outbound_msgaddr0; /*00B8 00BB*/
468 uint32_t outbound_msgaddr1; /*00BC 00BF*/
469 uint32_t inbound_queueport_low; /*00C0 00C3*/
470 uint32_t inbound_queueport_high; /*00C4 00C7*/
471 uint32_t outbound_queueport_low; /*00C8 00CB*/
472 uint32_t outbound_queueport_high; /*00CC 00CF*/
473 uint32_t iop_inbound_queue_port_low; /*00D0 00D3*/
474 uint32_t iop_inbound_queue_port_high; /*00D4 00D7*/
475 uint32_t iop_outbound_queue_port_low; /*00D8 00DB*/
476 uint32_t iop_outbound_queue_port_high; /*00DC 00DF*/
477 uint32_t message_dest_queue_port_low; /*00E0 00E3*/
478 uint32_t message_dest_queue_port_high; /*00E4 00E7*/
479 uint32_t last_used_message_dest_address_low; /*00E8 00EB*/
480 uint32_t last_used_message_dest_address_high; /*00EC 00EF*/
481 uint32_t message_done_queue_base_address_low; /*00F0 00F3*/
482 uint32_t message_done_queue_base_address_high; /*00F4 00F7*/
483 uint32_t host_diagnostic; /*00F8 00FB*/
484 uint32_t write_sequence; /*00FC 00FF*/
485 uint32_t reserved1[34]; /*0100 0187*/
486 uint32_t reserved2[1950]; /*0188 1FFF*/
487 uint32_t message_wbuffer[32]; /*2000 207F*/
488 uint32_t reserved3[32]; /*2080 20FF*/
489 uint32_t message_rbuffer[32]; /*2100 217F*/
490 uint32_t reserved4[32]; /*2180 21FF*/
491 uint32_t msgcode_rwbuffer[256]; /*2200 23FF*/
354}; 492};
355
356/* 493/*
357******************************************************************************* 494*******************************************************************************
358** Adapter Control Block 495** Adapter Control Block
@@ -370,14 +507,20 @@ struct AdapterControlBlock
370 unsigned long vir2phy_offset; 507 unsigned long vir2phy_offset;
371 /* Offset is used in making arc cdb physical to virtual calculations */ 508 /* Offset is used in making arc cdb physical to virtual calculations */
372 uint32_t outbound_int_enable; 509 uint32_t outbound_int_enable;
373 510 uint32_t cdb_phyaddr_hi32;
511 uint32_t reg_mu_acc_handle0;
512 spinlock_t eh_lock;
513 spinlock_t ccblist_lock;
374 union { 514 union {
375 struct MessageUnit_A __iomem * pmuA; 515 struct MessageUnit_A __iomem *pmuA;
376 struct MessageUnit_B * pmuB; 516 struct MessageUnit_B *pmuB;
517 struct MessageUnit_C __iomem *pmuC;
377 }; 518 };
378 /* message unit ATU inbound base address0 */ 519 /* message unit ATU inbound base address0 */
379 520 void __iomem *mem_base0;
521 void __iomem *mem_base1;
380 uint32_t acb_flags; 522 uint32_t acb_flags;
523 u16 dev_id;
381 uint8_t adapter_index; 524 uint8_t adapter_index;
382 #define ACB_F_SCSISTOPADAPTER 0x0001 525 #define ACB_F_SCSISTOPADAPTER 0x0001
383 #define ACB_F_MSG_STOP_BGRB 0x0002 526 #define ACB_F_MSG_STOP_BGRB 0x0002
@@ -392,8 +535,11 @@ struct AdapterControlBlock
392 /* message clear rqbuffer */ 535 /* message clear rqbuffer */
393 #define ACB_F_MESSAGE_WQBUFFER_READED 0x0040 536 #define ACB_F_MESSAGE_WQBUFFER_READED 0x0040
394 #define ACB_F_BUS_RESET 0x0080 537 #define ACB_F_BUS_RESET 0x0080
538 #define ACB_F_BUS_HANG_ON 0x0800/* need hardware reset bus */
539
395 #define ACB_F_IOP_INITED 0x0100 540 #define ACB_F_IOP_INITED 0x0100
396 /* iop init */ 541 /* iop init */
542 #define ACB_F_ABORT 0x0200
397 #define ACB_F_FIRMWARE_TRAP 0x0400 543 #define ACB_F_FIRMWARE_TRAP 0x0400
398 struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM]; 544 struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
399 /* used for memory free */ 545 /* used for memory free */
@@ -408,7 +554,8 @@ struct AdapterControlBlock
408 /* dma_coherent used for memory free */ 554 /* dma_coherent used for memory free */
409 dma_addr_t dma_coherent_handle; 555 dma_addr_t dma_coherent_handle;
410 /* dma_coherent_handle used for memory free */ 556 /* dma_coherent_handle used for memory free */
411 557 dma_addr_t dma_coherent_handle_hbb_mu;
558 unsigned int uncache_size;
412 uint8_t rqbuffer[ARCMSR_MAX_QBUFFER]; 559 uint8_t rqbuffer[ARCMSR_MAX_QBUFFER];
413 /* data collection buffer for read from 80331 */ 560 /* data collection buffer for read from 80331 */
414 int32_t rqbuf_firstindex; 561 int32_t rqbuf_firstindex;
@@ -432,14 +579,18 @@ struct AdapterControlBlock
432 uint32_t firm_numbers_queue; 579 uint32_t firm_numbers_queue;
433 uint32_t firm_sdram_size; 580 uint32_t firm_sdram_size;
434 uint32_t firm_hd_channels; 581 uint32_t firm_hd_channels;
435 char firm_model[12]; 582 uint32_t firm_cfg_version;
436 char firm_version[20]; 583 char firm_model[12];
584 char firm_version[20];
437 char device_map[20]; /*21,84-99*/ 585 char device_map[20]; /*21,84-99*/
438 struct work_struct arcmsr_do_message_isr_bh; 586 struct work_struct arcmsr_do_message_isr_bh;
439 struct timer_list eternal_timer; 587 struct timer_list eternal_timer;
440 unsigned short fw_state; 588 unsigned short fw_flag;
589 #define FW_NORMAL 0x0000
590 #define FW_BOG 0x0001
591 #define FW_DEADLOCK 0x0010
441 atomic_t rq_map_token; 592 atomic_t rq_map_token;
442 int ante_token_value; 593 atomic_t ante_token_value;
443};/* HW_DEVICE_EXTENSION */ 594};/* HW_DEVICE_EXTENSION */
444/* 595/*
445******************************************************************************* 596*******************************************************************************
@@ -447,67 +598,33 @@ struct AdapterControlBlock
447** this CCB length must be 32 bytes boundary 598** this CCB length must be 32 bytes boundary
448******************************************************************************* 599*******************************************************************************
449*/ 600*/
450struct CommandControlBlock 601struct CommandControlBlock{
451{ 602 /*x32:sizeof struct_CCB=(32+60)byte, x64:sizeof struct_CCB=(64+60)byte*/
452 struct ARCMSR_CDB arcmsr_cdb; 603 struct list_head list; /*x32: 8byte, x64: 16byte*/
453 /* 604 struct scsi_cmnd *pcmd; /*8 bytes pointer of linux scsi command */
454 ** 0-503 (size of CDB = 504): 605 struct AdapterControlBlock *acb; /*x32: 4byte, x64: 8byte*/
455 ** arcmsr messenger scsi command descriptor size 504 bytes 606 uint32_t cdb_phyaddr_pattern; /*x32: 4byte, x64: 4byte*/
456 */ 607 uint32_t arc_cdb_size; /*x32:4byte,x64:4byte*/
457 uint32_t cdb_shifted_phyaddr; 608 uint16_t ccb_flags; /*x32: 2byte, x64: 2byte*/
458 /* 504-507 */ 609 #define CCB_FLAG_READ 0x0000
459 uint32_t reserved1; 610 #define CCB_FLAG_WRITE 0x0001
460 /* 508-511 */ 611 #define CCB_FLAG_ERROR 0x0002
461#if BITS_PER_LONG == 64 612 #define CCB_FLAG_FLUSHCACHE 0x0004
613 #define CCB_FLAG_MASTER_ABORTED 0x0008
614 uint16_t startdone; /*x32:2byte,x32:2byte*/
615 #define ARCMSR_CCB_DONE 0x0000
616 #define ARCMSR_CCB_START 0x55AA
617 #define ARCMSR_CCB_ABORTED 0xAA55
618 #define ARCMSR_CCB_ILLEGAL 0xFFFF
619 #if BITS_PER_LONG == 64
462 /* ======================512+64 bytes======================== */ 620 /* ======================512+64 bytes======================== */
463 struct list_head list; 621 uint32_t reserved[5]; /*24 byte*/
464 /* 512-527 16 bytes next/prev ptrs for ccb lists */ 622 #else
465 struct scsi_cmnd * pcmd;
466 /* 528-535 8 bytes pointer of linux scsi command */
467 struct AdapterControlBlock * acb;
468 /* 536-543 8 bytes pointer of acb */
469
470 uint16_t ccb_flags;
471 /* 544-545 */
472 #define CCB_FLAG_READ 0x0000
473 #define CCB_FLAG_WRITE 0x0001
474 #define CCB_FLAG_ERROR 0x0002
475 #define CCB_FLAG_FLUSHCACHE 0x0004
476 #define CCB_FLAG_MASTER_ABORTED 0x0008
477 uint16_t startdone;
478 /* 546-547 */
479 #define ARCMSR_CCB_DONE 0x0000
480 #define ARCMSR_CCB_START 0x55AA
481 #define ARCMSR_CCB_ABORTED 0xAA55
482 #define ARCMSR_CCB_ILLEGAL 0xFFFF
483 uint32_t reserved2[7];
484 /* 548-551 552-555 556-559 560-563 564-567 568-571 572-575 */
485#else
486 /* ======================512+32 bytes======================== */ 623 /* ======================512+32 bytes======================== */
487 struct list_head list; 624 uint32_t reserved; /*8 byte*/
488 /* 512-519 8 bytes next/prev ptrs for ccb lists */ 625 #endif
489 struct scsi_cmnd * pcmd; 626 /* ======================================================= */
490 /* 520-523 4 bytes pointer of linux scsi command */ 627 struct ARCMSR_CDB arcmsr_cdb;
491 struct AdapterControlBlock * acb;
492 /* 524-527 4 bytes pointer of acb */
493
494 uint16_t ccb_flags;
495 /* 528-529 */
496 #define CCB_FLAG_READ 0x0000
497 #define CCB_FLAG_WRITE 0x0001
498 #define CCB_FLAG_ERROR 0x0002
499 #define CCB_FLAG_FLUSHCACHE 0x0004
500 #define CCB_FLAG_MASTER_ABORTED 0x0008
501 uint16_t startdone;
502 /* 530-531 */
503 #define ARCMSR_CCB_DONE 0x0000
504 #define ARCMSR_CCB_START 0x55AA
505 #define ARCMSR_CCB_ABORTED 0xAA55
506 #define ARCMSR_CCB_ILLEGAL 0xFFFF
507 uint32_t reserved2[3];
508 /* 532-535 536-539 540-543 */
509#endif
510 /* ========================================================== */
511}; 628};
512/* 629/*
513******************************************************************************* 630*******************************************************************************
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index ffa54792bb33..95a895dd4f13 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -58,7 +58,6 @@
58#include <linux/timer.h> 58#include <linux/timer.h>
59#include <linux/pci.h> 59#include <linux/pci.h>
60#include <linux/aer.h> 60#include <linux/aer.h>
61#include <linux/slab.h>
62#include <asm/dma.h> 61#include <asm/dma.h>
63#include <asm/io.h> 62#include <asm/io.h>
64#include <asm/system.h> 63#include <asm/system.h>
@@ -71,20 +70,13 @@
71#include <scsi/scsi_transport.h> 70#include <scsi/scsi_transport.h>
72#include <scsi/scsicam.h> 71#include <scsi/scsicam.h>
73#include "arcmsr.h" 72#include "arcmsr.h"
74 73MODULE_AUTHOR("Nick Cheng <support@areca.com.tw>");
75#ifdef CONFIG_SCSI_ARCMSR_RESET 74MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/16xx/1880) SATA/SAS RAID Host Bus Adapter");
76 static int sleeptime = 20;
77 static int retrycount = 12;
78 module_param(sleeptime, int, S_IRUGO|S_IWUSR);
79 MODULE_PARM_DESC(sleeptime, "The waiting period for FW ready while bus reset");
80 module_param(retrycount, int, S_IRUGO|S_IWUSR);
81 MODULE_PARM_DESC(retrycount, "The retry count for FW ready while bus reset");
82#endif
83MODULE_AUTHOR("Erich Chen <support@areca.com.tw>");
84MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID Host Bus Adapter");
85MODULE_LICENSE("Dual BSD/GPL"); 75MODULE_LICENSE("Dual BSD/GPL");
86MODULE_VERSION(ARCMSR_DRIVER_VERSION); 76MODULE_VERSION(ARCMSR_DRIVER_VERSION);
87 77static int sleeptime = 10;
78static int retrycount = 30;
79wait_queue_head_t wait_q;
88static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, 80static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
89 struct scsi_cmnd *cmd); 81 struct scsi_cmnd *cmd);
90static int arcmsr_iop_confirm(struct AdapterControlBlock *acb); 82static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
@@ -107,10 +99,12 @@ static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb);
107static void arcmsr_request_device_map(unsigned long pacb); 99static void arcmsr_request_device_map(unsigned long pacb);
108static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb); 100static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb);
109static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb); 101static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb);
102static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb);
110static void arcmsr_message_isr_bh_fn(struct work_struct *work); 103static void arcmsr_message_isr_bh_fn(struct work_struct *work);
111static void *arcmsr_get_firmware_spec(struct AdapterControlBlock *acb, int mode); 104static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb);
112static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); 105static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
113 106static void arcmsr_hbc_message_isr(struct AdapterControlBlock *pACB);
107static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
114static const char *arcmsr_info(struct Scsi_Host *); 108static const char *arcmsr_info(struct Scsi_Host *);
115static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); 109static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
116static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, 110static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
@@ -127,18 +121,18 @@ static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
127 121
128static struct scsi_host_template arcmsr_scsi_host_template = { 122static struct scsi_host_template arcmsr_scsi_host_template = {
129 .module = THIS_MODULE, 123 .module = THIS_MODULE,
130 .name = "ARCMSR ARECA SATA/SAS RAID Host Bus Adapter" 124 .name = "ARCMSR ARECA SATA/SAS RAID Controller"
131 ARCMSR_DRIVER_VERSION, 125 ARCMSR_DRIVER_VERSION,
132 .info = arcmsr_info, 126 .info = arcmsr_info,
133 .queuecommand = arcmsr_queue_command, 127 .queuecommand = arcmsr_queue_command,
134 .eh_abort_handler = arcmsr_abort, 128 .eh_abort_handler = arcmsr_abort,
135 .eh_bus_reset_handler = arcmsr_bus_reset, 129 .eh_bus_reset_handler = arcmsr_bus_reset,
136 .bios_param = arcmsr_bios_param, 130 .bios_param = arcmsr_bios_param,
137 .change_queue_depth = arcmsr_adjust_disk_queue_depth, 131 .change_queue_depth = arcmsr_adjust_disk_queue_depth,
138 .can_queue = ARCMSR_MAX_OUTSTANDING_CMD, 132 .can_queue = ARCMSR_MAX_FREECCB_NUM,
139 .this_id = ARCMSR_SCSI_INITIATOR_ID, 133 .this_id = ARCMSR_SCSI_INITIATOR_ID,
140 .sg_tablesize = ARCMSR_MAX_SG_ENTRIES, 134 .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
141 .max_sectors = ARCMSR_MAX_XFER_SECTORS, 135 .max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
142 .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN, 136 .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
143 .use_clustering = ENABLE_CLUSTERING, 137 .use_clustering = ENABLE_CLUSTERING,
144 .shost_attrs = arcmsr_host_attrs, 138 .shost_attrs = arcmsr_host_attrs,
@@ -162,26 +156,125 @@ static struct pci_device_id arcmsr_device_id_table[] = {
162 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)}, 156 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)},
163 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)}, 157 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)},
164 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)}, 158 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)},
159 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880)},
165 {0, 0}, /* Terminating entry */ 160 {0, 0}, /* Terminating entry */
166}; 161};
167MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table); 162MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
168static struct pci_driver arcmsr_pci_driver = { 163static struct pci_driver arcmsr_pci_driver = {
169 .name = "arcmsr", 164 .name = "arcmsr",
170 .id_table = arcmsr_device_id_table, 165 .id_table = arcmsr_device_id_table,
171 .probe = arcmsr_probe, 166 .probe = arcmsr_probe,
172 .remove = arcmsr_remove, 167 .remove = arcmsr_remove,
173 .shutdown = arcmsr_shutdown, 168 .shutdown = arcmsr_shutdown,
174}; 169};
170/*
171****************************************************************************
172****************************************************************************
173*/
174int arcmsr_sleep_for_bus_reset(struct scsi_cmnd *cmd)
175{
176 struct Scsi_Host *shost = NULL;
177 int i, isleep;
178 shost = cmd->device->host;
179 isleep = sleeptime / 10;
180 if (isleep > 0) {
181 for (i = 0; i < isleep; i++) {
182 msleep(10000);
183 }
184 }
185
186 isleep = sleeptime % 10;
187 if (isleep > 0) {
188 msleep(isleep*1000);
189 }
190 printk(KERN_NOTICE "wake-up\n");
191 return 0;
192}
193
194static void arcmsr_free_hbb_mu(struct AdapterControlBlock *acb)
195{
196 switch (acb->adapter_type) {
197 case ACB_ADAPTER_TYPE_A:
198 case ACB_ADAPTER_TYPE_C:
199 break;
200 case ACB_ADAPTER_TYPE_B:{
201 dma_free_coherent(&acb->pdev->dev,
202 sizeof(struct MessageUnit_B),
203 acb->pmuB, acb->dma_coherent_handle_hbb_mu);
204 }
205 }
206}
207
208static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
209{
210 struct pci_dev *pdev = acb->pdev;
211 switch (acb->adapter_type){
212 case ACB_ADAPTER_TYPE_A:{
213 acb->pmuA = ioremap(pci_resource_start(pdev,0), pci_resource_len(pdev,0));
214 if (!acb->pmuA) {
215 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
216 return false;
217 }
218 break;
219 }
220 case ACB_ADAPTER_TYPE_B:{
221 void __iomem *mem_base0, *mem_base1;
222 mem_base0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
223 if (!mem_base0) {
224 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
225 return false;
226 }
227 mem_base1 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2));
228 if (!mem_base1) {
229 iounmap(mem_base0);
230 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
231 return false;
232 }
233 acb->mem_base0 = mem_base0;
234 acb->mem_base1 = mem_base1;
235 break;
236 }
237 case ACB_ADAPTER_TYPE_C:{
238 acb->pmuC = ioremap_nocache(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
239 if (!acb->pmuC) {
240 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
241 return false;
242 }
243 if (readl(&acb->pmuC->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
244 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &acb->pmuC->outbound_doorbell_clear);/*clear interrupt*/
245 return true;
246 }
247 break;
248 }
249 }
250 return true;
251}
252
253static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
254{
255 switch (acb->adapter_type) {
256 case ACB_ADAPTER_TYPE_A:{
257 iounmap(acb->pmuA);
258 }
259 break;
260 case ACB_ADAPTER_TYPE_B:{
261 iounmap(acb->mem_base0);
262 iounmap(acb->mem_base1);
263 }
264
265 break;
266 case ACB_ADAPTER_TYPE_C:{
267 iounmap(acb->pmuC);
268 }
269 }
270}
175 271
176static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id) 272static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
177{ 273{
178 irqreturn_t handle_state; 274 irqreturn_t handle_state;
179 struct AdapterControlBlock *acb = dev_id; 275 struct AdapterControlBlock *acb = dev_id;
180 276
181 spin_lock(acb->host->host_lock);
182 handle_state = arcmsr_interrupt(acb); 277 handle_state = arcmsr_interrupt(acb);
183 spin_unlock(acb->host->host_lock);
184
185 return handle_state; 278 return handle_state;
186} 279}
187 280
@@ -218,181 +311,228 @@ static void arcmsr_define_adapter_type(struct AdapterControlBlock *acb)
218 struct pci_dev *pdev = acb->pdev; 311 struct pci_dev *pdev = acb->pdev;
219 u16 dev_id; 312 u16 dev_id;
220 pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id); 313 pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id);
314 acb->dev_id = dev_id;
221 switch (dev_id) { 315 switch (dev_id) {
222 case 0x1201 : { 316 case 0x1880: {
317 acb->adapter_type = ACB_ADAPTER_TYPE_C;
318 }
319 break;
320 case 0x1201: {
223 acb->adapter_type = ACB_ADAPTER_TYPE_B; 321 acb->adapter_type = ACB_ADAPTER_TYPE_B;
224 } 322 }
225 break; 323 break;
226 324
227 default : acb->adapter_type = ACB_ADAPTER_TYPE_A; 325 default: acb->adapter_type = ACB_ADAPTER_TYPE_A;
228 } 326 }
229} 327}
230 328
231static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) 329static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
232{ 330{
331 struct MessageUnit_A __iomem *reg = acb->pmuA;
332 uint32_t Index;
333 uint8_t Retries = 0x00;
334 do {
335 for (Index = 0; Index < 100; Index++) {
336 if (readl(&reg->outbound_intstatus) &
337 ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
338 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
339 &reg->outbound_intstatus);
340 return true;
341 }
342 msleep(10);
343 }/*max 1 seconds*/
233 344
234 switch (acb->adapter_type) { 345 } while (Retries++ < 20);/*max 20 sec*/
235 346 return false;
236 case ACB_ADAPTER_TYPE_A: { 347}
237 struct pci_dev *pdev = acb->pdev;
238 void *dma_coherent;
239 dma_addr_t dma_coherent_handle, dma_addr;
240 struct CommandControlBlock *ccb_tmp;
241 int i, j;
242 348
243 acb->pmuA = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); 349static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
244 if (!acb->pmuA) { 350{
245 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", 351 struct MessageUnit_B *reg = acb->pmuB;
246 acb->host->host_no); 352 uint32_t Index;
247 return -ENOMEM; 353 uint8_t Retries = 0x00;
248 } 354 do {
355 for (Index = 0; Index < 100; Index++) {
356 if (readl(reg->iop2drv_doorbell)
357 & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
358 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN
359 , reg->iop2drv_doorbell);
360 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
361 return true;
362 }
363 msleep(10);
364 }/*max 1 seconds*/
249 365
250 dma_coherent = dma_alloc_coherent(&pdev->dev, 366 } while (Retries++ < 20);/*max 20 sec*/
251 ARCMSR_MAX_FREECCB_NUM * 367 return false;
252 sizeof (struct CommandControlBlock) + 0x20, 368}
253 &dma_coherent_handle, GFP_KERNEL);
254 369
255 if (!dma_coherent) { 370static uint8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *pACB)
256 iounmap(acb->pmuA); 371{
257 return -ENOMEM; 372 struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC;
373 unsigned char Retries = 0x00;
374 uint32_t Index;
375 do {
376 for (Index = 0; Index < 100; Index++) {
377 if (readl(&phbcmu->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
378 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &phbcmu->outbound_doorbell_clear);/*clear interrupt*/
379 return true;
380 }
381 /* one us delay */
382 msleep(10);
383 } /*max 1 seconds*/
384 } while (Retries++ < 20); /*max 20 sec*/
385 return false;
386}
387static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
388{
389 struct MessageUnit_A __iomem *reg = acb->pmuA;
390 int retry_count = 30;
391 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
392 do {
393 if (arcmsr_hba_wait_msgint_ready(acb))
394 break;
395 else {
396 retry_count--;
397 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
398 timeout, retry count down = %d \n", acb->host->host_no, retry_count);
258 } 399 }
400 } while (retry_count != 0);
401}
259 402
260 acb->dma_coherent = dma_coherent; 403static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
261 acb->dma_coherent_handle = dma_coherent_handle; 404{
262 405 struct MessageUnit_B *reg = acb->pmuB;
263 if (((unsigned long)dma_coherent & 0x1F)) { 406 int retry_count = 30;
264 dma_coherent = dma_coherent + 407 writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell);
265 (0x20 - ((unsigned long)dma_coherent & 0x1F)); 408 do {
266 dma_coherent_handle = dma_coherent_handle + 409 if (arcmsr_hbb_wait_msgint_ready(acb))
267 (0x20 - ((unsigned long)dma_coherent_handle & 0x1F)); 410 break;
411 else {
412 retry_count--;
413 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
414 timeout,retry count down = %d \n", acb->host->host_no, retry_count);
268 } 415 }
416 } while (retry_count != 0);
417}
269 418
270 dma_addr = dma_coherent_handle; 419static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *pACB)
271 ccb_tmp = (struct CommandControlBlock *)dma_coherent; 420{
272 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { 421 struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
273 ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5; 422 int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
274 ccb_tmp->acb = acb; 423 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
275 acb->pccb_pool[i] = ccb_tmp; 424 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
276 list_add_tail(&ccb_tmp->list, &acb->ccb_free_list); 425 do {
277 dma_addr = dma_addr + sizeof(struct CommandControlBlock); 426 if (arcmsr_hbc_wait_msgint_ready(pACB)) {
278 ccb_tmp++; 427 break;
428 } else {
429 retry_count--;
430 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
431 timeout,retry count down = %d \n", pACB->host->host_no, retry_count);
279 } 432 }
433 } while (retry_count != 0);
434 return;
435}
436static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
437{
438 switch (acb->adapter_type) {
280 439
281 acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr; 440 case ACB_ADAPTER_TYPE_A: {
282 for (i = 0; i < ARCMSR_MAX_TARGETID; i++) 441 arcmsr_flush_hba_cache(acb);
283 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
284 acb->devstate[i][j] = ARECA_RAID_GONE;
285 } 442 }
286 break; 443 break;
287 444
288 case ACB_ADAPTER_TYPE_B: { 445 case ACB_ADAPTER_TYPE_B: {
289 446 arcmsr_flush_hbb_cache(acb);
290 struct pci_dev *pdev = acb->pdev;
291 struct MessageUnit_B *reg;
292 void __iomem *mem_base0, *mem_base1;
293 void *dma_coherent;
294 dma_addr_t dma_coherent_handle, dma_addr;
295 struct CommandControlBlock *ccb_tmp;
296 int i, j;
297
298 dma_coherent = dma_alloc_coherent(&pdev->dev,
299 ((ARCMSR_MAX_FREECCB_NUM *
300 sizeof(struct CommandControlBlock) + 0x20) +
301 sizeof(struct MessageUnit_B)),
302 &dma_coherent_handle, GFP_KERNEL);
303 if (!dma_coherent)
304 return -ENOMEM;
305
306 acb->dma_coherent = dma_coherent;
307 acb->dma_coherent_handle = dma_coherent_handle;
308
309 if (((unsigned long)dma_coherent & 0x1F)) {
310 dma_coherent = dma_coherent +
311 (0x20 - ((unsigned long)dma_coherent & 0x1F));
312 dma_coherent_handle = dma_coherent_handle +
313 (0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
314 }
315
316 dma_addr = dma_coherent_handle;
317 ccb_tmp = (struct CommandControlBlock *)dma_coherent;
318 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
319 ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5;
320 ccb_tmp->acb = acb;
321 acb->pccb_pool[i] = ccb_tmp;
322 list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
323 dma_addr = dma_addr + sizeof(struct CommandControlBlock);
324 ccb_tmp++;
325 }
326
327 reg = (struct MessageUnit_B *)(dma_coherent +
328 ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
329 acb->pmuB = reg;
330 mem_base0 = ioremap(pci_resource_start(pdev, 0),
331 pci_resource_len(pdev, 0));
332 if (!mem_base0)
333 goto out;
334
335 mem_base1 = ioremap(pci_resource_start(pdev, 2),
336 pci_resource_len(pdev, 2));
337 if (!mem_base1) {
338 iounmap(mem_base0);
339 goto out;
340 }
341
342 reg->drv2iop_doorbell_reg = mem_base0 + ARCMSR_DRV2IOP_DOORBELL;
343 reg->drv2iop_doorbell_mask_reg = mem_base0 +
344 ARCMSR_DRV2IOP_DOORBELL_MASK;
345 reg->iop2drv_doorbell_reg = mem_base0 + ARCMSR_IOP2DRV_DOORBELL;
346 reg->iop2drv_doorbell_mask_reg = mem_base0 +
347 ARCMSR_IOP2DRV_DOORBELL_MASK;
348 reg->ioctl_wbuffer_reg = mem_base1 + ARCMSR_IOCTL_WBUFFER;
349 reg->ioctl_rbuffer_reg = mem_base1 + ARCMSR_IOCTL_RBUFFER;
350 reg->msgcode_rwbuffer_reg = mem_base1 + ARCMSR_MSGCODE_RWBUFFER;
351
352 acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr;
353 for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
354 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
355 acb->devstate[i][j] = ARECA_RAID_GOOD;
356 } 447 }
357 break; 448 break;
449 case ACB_ADAPTER_TYPE_C: {
450 arcmsr_flush_hbc_cache(acb);
451 }
358 } 452 }
359 return 0;
360
361out:
362 dma_free_coherent(&acb->pdev->dev,
363 (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 +
364 sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
365 return -ENOMEM;
366} 453}
367static void arcmsr_message_isr_bh_fn(struct work_struct *work) 454
455static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
368{ 456{
369 struct AdapterControlBlock *acb = container_of(work, struct AdapterControlBlock, arcmsr_do_message_isr_bh); 457 struct pci_dev *pdev = acb->pdev;
458 void *dma_coherent;
459 dma_addr_t dma_coherent_handle;
460 struct CommandControlBlock *ccb_tmp;
461 int i = 0, j = 0;
462 dma_addr_t cdb_phyaddr;
463 unsigned long roundup_ccbsize = 0, offset;
464 unsigned long max_xfer_len;
465 unsigned long max_sg_entrys;
466 uint32_t firm_config_version;
467 for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
468 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
469 acb->devstate[i][j] = ARECA_RAID_GONE;
470
471 max_xfer_len = ARCMSR_MAX_XFER_LEN;
472 max_sg_entrys = ARCMSR_DEFAULT_SG_ENTRIES;
473 firm_config_version = acb->firm_cfg_version;
474 if((firm_config_version & 0xFF) >= 3){
475 max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */
476 max_sg_entrys = (max_xfer_len/4096);
477 }
478 acb->host->max_sectors = max_xfer_len/512;
479 acb->host->sg_tablesize = max_sg_entrys;
480 roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
481 acb->uncache_size = roundup_ccbsize * ARCMSR_MAX_FREECCB_NUM + 32;
482 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
483 if(!dma_coherent){
484 printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error \n", acb->host->host_no);
485 return -ENOMEM;
486 }
487 acb->dma_coherent = dma_coherent;
488 acb->dma_coherent_handle = dma_coherent_handle;
489 memset(dma_coherent, 0, acb->uncache_size);
490 offset = roundup((unsigned long)dma_coherent, 32) - (unsigned long)dma_coherent;
491 dma_coherent_handle = dma_coherent_handle + offset;
492 dma_coherent = (struct CommandControlBlock *)dma_coherent + offset;
493 ccb_tmp = dma_coherent;
494 acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
495 for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){
496 cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
497 ccb_tmp->cdb_phyaddr_pattern = ((acb->adapter_type == ACB_ADAPTER_TYPE_C) ? cdb_phyaddr : (cdb_phyaddr >> 5));
498 acb->pccb_pool[i] = ccb_tmp;
499 ccb_tmp->acb = acb;
500 INIT_LIST_HEAD(&ccb_tmp->list);
501 list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
502 ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize);
503 dma_coherent_handle = dma_coherent_handle + roundup_ccbsize;
504 }
505 return 0;
506}
370 507
508static void arcmsr_message_isr_bh_fn(struct work_struct *work)
509{
510 struct AdapterControlBlock *acb = container_of(work,struct AdapterControlBlock, arcmsr_do_message_isr_bh);
371 switch (acb->adapter_type) { 511 switch (acb->adapter_type) {
372 case ACB_ADAPTER_TYPE_A: { 512 case ACB_ADAPTER_TYPE_A: {
373 513
374 struct MessageUnit_A __iomem *reg = acb->pmuA; 514 struct MessageUnit_A __iomem *reg = acb->pmuA;
375 char *acb_dev_map = (char *)acb->device_map; 515 char *acb_dev_map = (char *)acb->device_map;
376 uint32_t __iomem *signature = (uint32_t __iomem *) (&reg->message_rwbuffer[0]); 516 uint32_t __iomem *signature = (uint32_t __iomem*) (&reg->message_rwbuffer[0]);
377 char __iomem *devicemap = (char __iomem *) (&reg->message_rwbuffer[21]); 517 char __iomem *devicemap = (char __iomem*) (&reg->message_rwbuffer[21]);
378 int target, lun; 518 int target, lun;
379 struct scsi_device *psdev; 519 struct scsi_device *psdev;
380 char diff; 520 char diff;
381 521
382 atomic_inc(&acb->rq_map_token); 522 atomic_inc(&acb->rq_map_token);
383 if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) { 523 if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
384 for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) { 524 for(target = 0; target < ARCMSR_MAX_TARGETID -1; target++) {
385 diff = (*acb_dev_map)^readb(devicemap); 525 diff = (*acb_dev_map)^readb(devicemap);
386 if (diff != 0) { 526 if (diff != 0) {
387 char temp; 527 char temp;
388 *acb_dev_map = readb(devicemap); 528 *acb_dev_map = readb(devicemap);
389 temp = *acb_dev_map; 529 temp =*acb_dev_map;
390 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) { 530 for(lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
391 if ((temp & 0x01) == 1 && (diff & 0x01) == 1) { 531 if((temp & 0x01)==1 && (diff & 0x01) == 1) {
392 scsi_add_device(acb->host, 0, target, lun); 532 scsi_add_device(acb->host, 0, target, lun);
393 } else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) { 533 }else if((temp & 0x01) == 0 && (diff & 0x01) == 1) {
394 psdev = scsi_device_lookup(acb->host, 0, target, lun); 534 psdev = scsi_device_lookup(acb->host, 0, target, lun);
395 if (psdev != NULL) { 535 if (psdev != NULL ) {
396 scsi_remove_device(psdev); 536 scsi_remove_device(psdev);
397 scsi_device_put(psdev); 537 scsi_device_put(psdev);
398 } 538 }
@@ -411,8 +551,45 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work)
411 case ACB_ADAPTER_TYPE_B: { 551 case ACB_ADAPTER_TYPE_B: {
412 struct MessageUnit_B *reg = acb->pmuB; 552 struct MessageUnit_B *reg = acb->pmuB;
413 char *acb_dev_map = (char *)acb->device_map; 553 char *acb_dev_map = (char *)acb->device_map;
414 uint32_t __iomem *signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer_reg[0]); 554 uint32_t __iomem *signature = (uint32_t __iomem*)(&reg->message_rwbuffer[0]);
415 char __iomem *devicemap = (char __iomem *)(&reg->msgcode_rwbuffer_reg[21]); 555 char __iomem *devicemap = (char __iomem*)(&reg->message_rwbuffer[21]);
556 int target, lun;
557 struct scsi_device *psdev;
558 char diff;
559
560 atomic_inc(&acb->rq_map_token);
561 if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
562 for(target = 0; target < ARCMSR_MAX_TARGETID -1; target++) {
563 diff = (*acb_dev_map)^readb(devicemap);
564 if (diff != 0) {
565 char temp;
566 *acb_dev_map = readb(devicemap);
567 temp =*acb_dev_map;
568 for(lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
569 if((temp & 0x01)==1 && (diff & 0x01) == 1) {
570 scsi_add_device(acb->host, 0, target, lun);
571 }else if((temp & 0x01) == 0 && (diff & 0x01) == 1) {
572 psdev = scsi_device_lookup(acb->host, 0, target, lun);
573 if (psdev != NULL ) {
574 scsi_remove_device(psdev);
575 scsi_device_put(psdev);
576 }
577 }
578 temp >>= 1;
579 diff >>= 1;
580 }
581 }
582 devicemap++;
583 acb_dev_map++;
584 }
585 }
586 }
587 break;
588 case ACB_ADAPTER_TYPE_C: {
589 struct MessageUnit_C *reg = acb->pmuC;
590 char *acb_dev_map = (char *)acb->device_map;
591 uint32_t __iomem *signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
592 char __iomem *devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
416 int target, lun; 593 int target, lun;
417 struct scsi_device *psdev; 594 struct scsi_device *psdev;
418 char diff; 595 char diff;
@@ -447,185 +624,152 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work)
447 } 624 }
448} 625}
449 626
450static int arcmsr_probe(struct pci_dev *pdev, 627static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
451 const struct pci_device_id *id)
452{ 628{
453 struct Scsi_Host *host; 629 struct Scsi_Host *host;
454 struct AdapterControlBlock *acb; 630 struct AdapterControlBlock *acb;
455 uint8_t bus, dev_fun; 631 uint8_t bus,dev_fun;
456 int error; 632 int error;
457
458 error = pci_enable_device(pdev); 633 error = pci_enable_device(pdev);
459 if (error) 634 if(error){
460 goto out; 635 return -ENODEV;
461 pci_set_master(pdev); 636 }
462 637 host = scsi_host_alloc(&arcmsr_scsi_host_template, sizeof(struct AdapterControlBlock));
463 host = scsi_host_alloc(&arcmsr_scsi_host_template, 638 if(!host){
464 sizeof(struct AdapterControlBlock)); 639 goto pci_disable_dev;
465 if (!host) {
466 error = -ENOMEM;
467 goto out_disable_device;
468 } 640 }
469 acb = (struct AdapterControlBlock *)host->hostdata;
470 memset(acb, 0, sizeof (struct AdapterControlBlock));
471
472 error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 641 error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
473 if (error) { 642 if(error){
474 error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 643 error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
475 if (error) { 644 if(error){
476 printk(KERN_WARNING 645 printk(KERN_WARNING
477 "scsi%d: No suitable DMA mask available\n", 646 "scsi%d: No suitable DMA mask available\n",
478 host->host_no); 647 host->host_no);
479 goto out_host_put; 648 goto scsi_host_release;
480 } 649 }
481 } 650 }
651 init_waitqueue_head(&wait_q);
482 bus = pdev->bus->number; 652 bus = pdev->bus->number;
483 dev_fun = pdev->devfn; 653 dev_fun = pdev->devfn;
484 acb->host = host; 654 acb = (struct AdapterControlBlock *) host->hostdata;
655 memset(acb,0,sizeof(struct AdapterControlBlock));
485 acb->pdev = pdev; 656 acb->pdev = pdev;
486 host->max_sectors = ARCMSR_MAX_XFER_SECTORS; 657 acb->host = host;
487 host->max_lun = ARCMSR_MAX_TARGETLUN; 658 host->max_lun = ARCMSR_MAX_TARGETLUN;
488 host->max_id = ARCMSR_MAX_TARGETID;/*16:8*/ 659 host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/
489 host->max_cmd_len = 16; /*this is issue of 64bit LBA, over 2T byte*/ 660 host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/
490 host->sg_tablesize = ARCMSR_MAX_SG_ENTRIES; 661 host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */
491 host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */ 662 host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
492 host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
493 host->this_id = ARCMSR_SCSI_INITIATOR_ID; 663 host->this_id = ARCMSR_SCSI_INITIATOR_ID;
494 host->unique_id = (bus << 8) | dev_fun; 664 host->unique_id = (bus << 8) | dev_fun;
495 host->irq = pdev->irq; 665 pci_set_drvdata(pdev, host);
666 pci_set_master(pdev);
496 error = pci_request_regions(pdev, "arcmsr"); 667 error = pci_request_regions(pdev, "arcmsr");
497 if (error) { 668 if(error){
498 goto out_host_put; 669 goto scsi_host_release;
499 } 670 }
500 arcmsr_define_adapter_type(acb); 671 spin_lock_init(&acb->eh_lock);
501 672 spin_lock_init(&acb->ccblist_lock);
502 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | 673 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
503 ACB_F_MESSAGE_RQBUFFER_CLEARED | 674 ACB_F_MESSAGE_RQBUFFER_CLEARED |
504 ACB_F_MESSAGE_WQBUFFER_READED); 675 ACB_F_MESSAGE_WQBUFFER_READED);
505 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; 676 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
506 INIT_LIST_HEAD(&acb->ccb_free_list); 677 INIT_LIST_HEAD(&acb->ccb_free_list);
507 INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn); 678 arcmsr_define_adapter_type(acb);
679 error = arcmsr_remap_pciregion(acb);
680 if(!error){
681 goto pci_release_regs;
682 }
683 error = arcmsr_get_firmware_spec(acb);
684 if(!error){
685 goto unmap_pci_region;
686 }
508 error = arcmsr_alloc_ccb_pool(acb); 687 error = arcmsr_alloc_ccb_pool(acb);
509 if (error) 688 if(error){
510 goto out_release_regions; 689 goto free_hbb_mu;
511 690 }
512 arcmsr_iop_init(acb); 691 arcmsr_iop_init(acb);
513 error = request_irq(pdev->irq, arcmsr_do_interrupt,
514 IRQF_SHARED, "arcmsr", acb);
515 if (error)
516 goto out_free_ccb_pool;
517
518 pci_set_drvdata(pdev, host);
519 if (strncmp(acb->firm_version, "V1.42", 5) >= 0)
520 host->max_sectors= ARCMSR_MAX_XFER_SECTORS_B;
521
522 error = scsi_add_host(host, &pdev->dev); 692 error = scsi_add_host(host, &pdev->dev);
523 if (error) 693 if(error){
524 goto out_free_irq; 694 goto RAID_controller_stop;
525 695 }
526 error = arcmsr_alloc_sysfs_attr(acb); 696 error = request_irq(pdev->irq, arcmsr_do_interrupt, IRQF_SHARED, "arcmsr", acb);
527 if (error) 697 if(error){
528 goto out_free_sysfs; 698 goto scsi_host_remove;
529 699 }
530 scsi_scan_host(host); 700 host->irq = pdev->irq;
531 #ifdef CONFIG_SCSI_ARCMSR_AER 701 scsi_scan_host(host);
532 pci_enable_pcie_error_reporting(pdev); 702 INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
533 #endif
534 atomic_set(&acb->rq_map_token, 16); 703 atomic_set(&acb->rq_map_token, 16);
535 acb->fw_state = true; 704 atomic_set(&acb->ante_token_value, 16);
705 acb->fw_flag = FW_NORMAL;
536 init_timer(&acb->eternal_timer); 706 init_timer(&acb->eternal_timer);
537 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(10*HZ); 707 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
538 acb->eternal_timer.data = (unsigned long) acb; 708 acb->eternal_timer.data = (unsigned long) acb;
539 acb->eternal_timer.function = &arcmsr_request_device_map; 709 acb->eternal_timer.function = &arcmsr_request_device_map;
540 add_timer(&acb->eternal_timer); 710 add_timer(&acb->eternal_timer);
541 711 if(arcmsr_alloc_sysfs_attr(acb))
712 goto out_free_sysfs;
542 return 0; 713 return 0;
543 out_free_sysfs: 714out_free_sysfs:
544 out_free_irq: 715scsi_host_remove:
545 free_irq(pdev->irq, acb); 716 scsi_remove_host(host);
546 out_free_ccb_pool: 717RAID_controller_stop:
718 arcmsr_stop_adapter_bgrb(acb);
719 arcmsr_flush_adapter_cache(acb);
547 arcmsr_free_ccb_pool(acb); 720 arcmsr_free_ccb_pool(acb);
548 out_release_regions: 721free_hbb_mu:
722 arcmsr_free_hbb_mu(acb);
723unmap_pci_region:
724 arcmsr_unmap_pciregion(acb);
725pci_release_regs:
549 pci_release_regions(pdev); 726 pci_release_regions(pdev);
550 out_host_put: 727scsi_host_release:
551 scsi_host_put(host); 728 scsi_host_put(host);
552 out_disable_device: 729pci_disable_dev:
553 pci_disable_device(pdev); 730 pci_disable_device(pdev);
554 out: 731 return -ENODEV;
555 return error;
556}
557
558static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
559{
560 struct MessageUnit_A __iomem *reg = acb->pmuA;
561 uint32_t Index;
562 uint8_t Retries = 0x00;
563
564 do {
565 for (Index = 0; Index < 100; Index++) {
566 if (readl(&reg->outbound_intstatus) &
567 ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
568 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
569 &reg->outbound_intstatus);
570 return 0x00;
571 }
572 msleep(10);
573 }/*max 1 seconds*/
574
575 } while (Retries++ < 20);/*max 20 sec*/
576 return 0xff;
577}
578
579static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
580{
581 struct MessageUnit_B *reg = acb->pmuB;
582 uint32_t Index;
583 uint8_t Retries = 0x00;
584
585 do {
586 for (Index = 0; Index < 100; Index++) {
587 if (readl(reg->iop2drv_doorbell_reg)
588 & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
589 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN
590 , reg->iop2drv_doorbell_reg);
591 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
592 return 0x00;
593 }
594 msleep(10);
595 }/*max 1 seconds*/
596
597 } while (Retries++ < 20);/*max 20 sec*/
598 return 0xff;
599} 732}
600 733
601static uint8_t arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) 734static uint8_t arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
602{ 735{
603 struct MessageUnit_A __iomem *reg = acb->pmuA; 736 struct MessageUnit_A __iomem *reg = acb->pmuA;
604
605 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0); 737 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
606 if (arcmsr_hba_wait_msgint_ready(acb)) { 738 if (!arcmsr_hba_wait_msgint_ready(acb)) {
607 printk(KERN_NOTICE 739 printk(KERN_NOTICE
608 "arcmsr%d: wait 'abort all outstanding command' timeout \n" 740 "arcmsr%d: wait 'abort all outstanding command' timeout \n"
609 , acb->host->host_no); 741 , acb->host->host_no);
610 return 0xff; 742 return false;
611 } 743 }
612 return 0x00; 744 return true;
613} 745}
614 746
615static uint8_t arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) 747static uint8_t arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
616{ 748{
617 struct MessageUnit_B *reg = acb->pmuB; 749 struct MessageUnit_B *reg = acb->pmuB;
618 750
619 writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg); 751 writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell);
620 if (arcmsr_hbb_wait_msgint_ready(acb)) { 752 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
621 printk(KERN_NOTICE 753 printk(KERN_NOTICE
622 "arcmsr%d: wait 'abort all outstanding command' timeout \n" 754 "arcmsr%d: wait 'abort all outstanding command' timeout \n"
623 , acb->host->host_no); 755 , acb->host->host_no);
624 return 0xff; 756 return false;
625 } 757 }
626 return 0x00; 758 return true;
759}
760static uint8_t arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *pACB)
761{
762 struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
763 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
764 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
765 if (!arcmsr_hbc_wait_msgint_ready(pACB)) {
766 printk(KERN_NOTICE
767 "arcmsr%d: wait 'abort all outstanding command' timeout \n"
768 , pACB->host->host_no);
769 return false;
770 }
771 return true;
627} 772}
628
629static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb) 773static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
630{ 774{
631 uint8_t rtnval = 0; 775 uint8_t rtnval = 0;
@@ -638,10 +782,26 @@ static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
638 case ACB_ADAPTER_TYPE_B: { 782 case ACB_ADAPTER_TYPE_B: {
639 rtnval = arcmsr_abort_hbb_allcmd(acb); 783 rtnval = arcmsr_abort_hbb_allcmd(acb);
640 } 784 }
785 break;
786
787 case ACB_ADAPTER_TYPE_C: {
788 rtnval = arcmsr_abort_hbc_allcmd(acb);
789 }
641 } 790 }
642 return rtnval; 791 return rtnval;
643} 792}
644 793
794static bool arcmsr_hbb_enable_driver_mode(struct AdapterControlBlock *pacb)
795{
796 struct MessageUnit_B *reg = pacb->pmuB;
797 writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
798 if (!arcmsr_hbb_wait_msgint_ready(pacb)) {
799 printk(KERN_ERR "arcmsr%d: can't set driver mode. \n", pacb->host->host_no);
800 return false;
801 }
802 return true;
803}
804
645static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb) 805static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
646{ 806{
647 struct scsi_cmnd *pcmd = ccb->pcmd; 807 struct scsi_cmnd *pcmd = ccb->pcmd;
@@ -649,75 +809,25 @@ static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
649 scsi_dma_unmap(pcmd); 809 scsi_dma_unmap(pcmd);
650} 810}
651 811
652static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag) 812static void arcmsr_ccb_complete(struct CommandControlBlock *ccb)
653{ 813{
654 struct AdapterControlBlock *acb = ccb->acb; 814 struct AdapterControlBlock *acb = ccb->acb;
655 struct scsi_cmnd *pcmd = ccb->pcmd; 815 struct scsi_cmnd *pcmd = ccb->pcmd;
656 816 unsigned long flags;
817 atomic_dec(&acb->ccboutstandingcount);
657 arcmsr_pci_unmap_dma(ccb); 818 arcmsr_pci_unmap_dma(ccb);
658 if (stand_flag == 1)
659 atomic_dec(&acb->ccboutstandingcount);
660 ccb->startdone = ARCMSR_CCB_DONE; 819 ccb->startdone = ARCMSR_CCB_DONE;
661 ccb->ccb_flags = 0; 820 spin_lock_irqsave(&acb->ccblist_lock, flags);
662 list_add_tail(&ccb->list, &acb->ccb_free_list); 821 list_add_tail(&ccb->list, &acb->ccb_free_list);
822 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
663 pcmd->scsi_done(pcmd); 823 pcmd->scsi_done(pcmd);
664} 824}
665 825
666static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
667{
668 struct MessageUnit_A __iomem *reg = acb->pmuA;
669 int retry_count = 30;
670
671 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
672 do {
673 if (!arcmsr_hba_wait_msgint_ready(acb))
674 break;
675 else {
676 retry_count--;
677 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
678 timeout, retry count down = %d \n", acb->host->host_no, retry_count);
679 }
680 } while (retry_count != 0);
681}
682
683static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
684{
685 struct MessageUnit_B *reg = acb->pmuB;
686 int retry_count = 30;
687
688 writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell_reg);
689 do {
690 if (!arcmsr_hbb_wait_msgint_ready(acb))
691 break;
692 else {
693 retry_count--;
694 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
695 timeout,retry count down = %d \n", acb->host->host_no, retry_count);
696 }
697 } while (retry_count != 0);
698}
699
700static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
701{
702 switch (acb->adapter_type) {
703
704 case ACB_ADAPTER_TYPE_A: {
705 arcmsr_flush_hba_cache(acb);
706 }
707 break;
708
709 case ACB_ADAPTER_TYPE_B: {
710 arcmsr_flush_hbb_cache(acb);
711 }
712 }
713}
714
715static void arcmsr_report_sense_info(struct CommandControlBlock *ccb) 826static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
716{ 827{
717 828
718 struct scsi_cmnd *pcmd = ccb->pcmd; 829 struct scsi_cmnd *pcmd = ccb->pcmd;
719 struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer; 830 struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
720
721 pcmd->result = DID_OK << 16; 831 pcmd->result = DID_OK << 16;
722 if (sensebuffer) { 832 if (sensebuffer) {
723 int sense_data_length = 833 int sense_data_length =
@@ -733,8 +843,7 @@ static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
733static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb) 843static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
734{ 844{
735 u32 orig_mask = 0; 845 u32 orig_mask = 0;
736 switch (acb->adapter_type) { 846 switch (acb->adapter_type) {
737
738 case ACB_ADAPTER_TYPE_A : { 847 case ACB_ADAPTER_TYPE_A : {
739 struct MessageUnit_A __iomem *reg = acb->pmuA; 848 struct MessageUnit_A __iomem *reg = acb->pmuA;
740 orig_mask = readl(&reg->outbound_intmask); 849 orig_mask = readl(&reg->outbound_intmask);
@@ -742,35 +851,40 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
742 &reg->outbound_intmask); 851 &reg->outbound_intmask);
743 } 852 }
744 break; 853 break;
745
746 case ACB_ADAPTER_TYPE_B : { 854 case ACB_ADAPTER_TYPE_B : {
747 struct MessageUnit_B *reg = acb->pmuB; 855 struct MessageUnit_B *reg = acb->pmuB;
748 orig_mask = readl(reg->iop2drv_doorbell_mask_reg); 856 orig_mask = readl(reg->iop2drv_doorbell_mask);
749 writel(0, reg->iop2drv_doorbell_mask_reg); 857 writel(0, reg->iop2drv_doorbell_mask);
858 }
859 break;
860 case ACB_ADAPTER_TYPE_C:{
861 struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
862 /* disable all outbound interrupt */
863 orig_mask = readl(&reg->host_int_mask); /* disable outbound message0 int */
864 writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask);
750 } 865 }
751 break; 866 break;
752 } 867 }
753 return orig_mask; 868 return orig_mask;
754} 869}
755 870
756static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, \ 871static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
757 struct CommandControlBlock *ccb, uint32_t flag_ccb) 872 struct CommandControlBlock *ccb, bool error)
758{ 873{
759
760 uint8_t id, lun; 874 uint8_t id, lun;
761 id = ccb->pcmd->device->id; 875 id = ccb->pcmd->device->id;
762 lun = ccb->pcmd->device->lun; 876 lun = ccb->pcmd->device->lun;
763 if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) { 877 if (!error) {
764 if (acb->devstate[id][lun] == ARECA_RAID_GONE) 878 if (acb->devstate[id][lun] == ARECA_RAID_GONE)
765 acb->devstate[id][lun] = ARECA_RAID_GOOD; 879 acb->devstate[id][lun] = ARECA_RAID_GOOD;
766 ccb->pcmd->result = DID_OK << 16; 880 ccb->pcmd->result = DID_OK << 16;
767 arcmsr_ccb_complete(ccb, 1); 881 arcmsr_ccb_complete(ccb);
768 } else { 882 }else{
769 switch (ccb->arcmsr_cdb.DeviceStatus) { 883 switch (ccb->arcmsr_cdb.DeviceStatus) {
770 case ARCMSR_DEV_SELECT_TIMEOUT: { 884 case ARCMSR_DEV_SELECT_TIMEOUT: {
771 acb->devstate[id][lun] = ARECA_RAID_GONE; 885 acb->devstate[id][lun] = ARECA_RAID_GONE;
772 ccb->pcmd->result = DID_NO_CONNECT << 16; 886 ccb->pcmd->result = DID_NO_CONNECT << 16;
773 arcmsr_ccb_complete(ccb, 1); 887 arcmsr_ccb_complete(ccb);
774 } 888 }
775 break; 889 break;
776 890
@@ -779,49 +893,49 @@ static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, \
779 case ARCMSR_DEV_INIT_FAIL: { 893 case ARCMSR_DEV_INIT_FAIL: {
780 acb->devstate[id][lun] = ARECA_RAID_GONE; 894 acb->devstate[id][lun] = ARECA_RAID_GONE;
781 ccb->pcmd->result = DID_BAD_TARGET << 16; 895 ccb->pcmd->result = DID_BAD_TARGET << 16;
782 arcmsr_ccb_complete(ccb, 1); 896 arcmsr_ccb_complete(ccb);
783 } 897 }
784 break; 898 break;
785 899
786 case ARCMSR_DEV_CHECK_CONDITION: { 900 case ARCMSR_DEV_CHECK_CONDITION: {
787 acb->devstate[id][lun] = ARECA_RAID_GOOD; 901 acb->devstate[id][lun] = ARECA_RAID_GOOD;
788 arcmsr_report_sense_info(ccb); 902 arcmsr_report_sense_info(ccb);
789 arcmsr_ccb_complete(ccb, 1); 903 arcmsr_ccb_complete(ccb);
790 } 904 }
791 break; 905 break;
792 906
793 default: 907 default:
794 printk(KERN_NOTICE 908 printk(KERN_NOTICE
795 "arcmsr%d: scsi id = %d lun = %d" 909 "arcmsr%d: scsi id = %d lun = %d isr get command error done, \
796 " isr get command error done, " 910 but got unknown DeviceStatus = 0x%x \n"
797 "but got unknown DeviceStatus = 0x%x \n" 911 , acb->host->host_no
798 , acb->host->host_no 912 , id
799 , id 913 , lun
800 , lun 914 , ccb->arcmsr_cdb.DeviceStatus);
801 , ccb->arcmsr_cdb.DeviceStatus); 915 acb->devstate[id][lun] = ARECA_RAID_GONE;
802 acb->devstate[id][lun] = ARECA_RAID_GONE; 916 ccb->pcmd->result = DID_NO_CONNECT << 16;
803 ccb->pcmd->result = DID_NO_CONNECT << 16; 917 arcmsr_ccb_complete(ccb);
804 arcmsr_ccb_complete(ccb, 1);
805 break; 918 break;
806 } 919 }
807 } 920 }
808} 921}
809 922
810static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, uint32_t flag_ccb) 923static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
811 924
812{ 925{
813 struct CommandControlBlock *ccb; 926 int id, lun;
814 927 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
815 ccb = (struct CommandControlBlock *)(acb->vir2phy_offset + (flag_ccb << 5)); 928 if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
816 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { 929 struct scsi_cmnd *abortcmd = pCCB->pcmd;
817 if (ccb->startdone == ARCMSR_CCB_ABORTED) {
818 struct scsi_cmnd *abortcmd = ccb->pcmd;
819 if (abortcmd) { 930 if (abortcmd) {
931 id = abortcmd->device->id;
932 lun = abortcmd->device->lun;
820 abortcmd->result |= DID_ABORT << 16; 933 abortcmd->result |= DID_ABORT << 16;
821 arcmsr_ccb_complete(ccb, 1); 934 arcmsr_ccb_complete(pCCB);
822 printk(KERN_NOTICE "arcmsr%d: ccb ='0x%p' \ 935 printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n",
823 isr got aborted command \n", acb->host->host_no, ccb); 936 acb->host->host_no, pCCB);
824 } 937 }
938 return;
825 } 939 }
826 printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \ 940 printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \
827 done acb = '0x%p'" 941 done acb = '0x%p'"
@@ -829,20 +943,22 @@ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, uint32_t fla
829 " ccboutstandingcount = %d \n" 943 " ccboutstandingcount = %d \n"
830 , acb->host->host_no 944 , acb->host->host_no
831 , acb 945 , acb
832 , ccb 946 , pCCB
833 , ccb->acb 947 , pCCB->acb
834 , ccb->startdone 948 , pCCB->startdone
835 , atomic_read(&acb->ccboutstandingcount)); 949 , atomic_read(&acb->ccboutstandingcount));
950 return;
836 } 951 }
837 else 952 arcmsr_report_ccb_state(acb, pCCB, error);
838 arcmsr_report_ccb_state(acb, ccb, flag_ccb);
839} 953}
840 954
841static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) 955static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
842{ 956{
843 int i = 0; 957 int i = 0;
844 uint32_t flag_ccb; 958 uint32_t flag_ccb;
845 959 struct ARCMSR_CDB *pARCMSR_CDB;
960 bool error;
961 struct CommandControlBlock *pCCB;
846 switch (acb->adapter_type) { 962 switch (acb->adapter_type) {
847 963
848 case ACB_ADAPTER_TYPE_A: { 964 case ACB_ADAPTER_TYPE_A: {
@@ -852,9 +968,12 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
852 acb->outbound_int_enable; 968 acb->outbound_int_enable;
853 /*clear and abort all outbound posted Q*/ 969 /*clear and abort all outbound posted Q*/
854 writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/ 970 writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
855 while (((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) 971 while(((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF)
856 && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { 972 && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
857 arcmsr_drain_donequeue(acb, flag_ccb); 973 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
974 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
975 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
976 arcmsr_drain_donequeue(acb, pCCB, error);
858 } 977 }
859 } 978 }
860 break; 979 break;
@@ -862,17 +981,37 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
862 case ACB_ADAPTER_TYPE_B: { 981 case ACB_ADAPTER_TYPE_B: {
863 struct MessageUnit_B *reg = acb->pmuB; 982 struct MessageUnit_B *reg = acb->pmuB;
864 /*clear all outbound posted Q*/ 983 /*clear all outbound posted Q*/
984 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, &reg->iop2drv_doorbell); /* clear doorbell interrupt */
865 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { 985 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
866 if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) { 986 if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) {
867 writel(0, &reg->done_qbuffer[i]); 987 writel(0, &reg->done_qbuffer[i]);
868 arcmsr_drain_donequeue(acb, flag_ccb); 988 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
989 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
990 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
991 arcmsr_drain_donequeue(acb, pCCB, error);
869 } 992 }
870 writel(0, &reg->post_qbuffer[i]); 993 reg->post_qbuffer[i] = 0;
871 } 994 }
872 reg->doneq_index = 0; 995 reg->doneq_index = 0;
873 reg->postq_index = 0; 996 reg->postq_index = 0;
874 } 997 }
875 break; 998 break;
999 case ACB_ADAPTER_TYPE_C: {
1000 struct MessageUnit_C *reg = acb->pmuC;
1001 struct ARCMSR_CDB *pARCMSR_CDB;
1002 uint32_t flag_ccb, ccb_cdb_phy;
1003 bool error;
1004 struct CommandControlBlock *pCCB;
1005 while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
1006 /*need to do*/
1007 flag_ccb = readl(&reg->outbound_queueport_low);
1008 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
1009 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+ccb_cdb_phy);/*frame must be 32 bytes aligned*/
1010 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1011 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
1012 arcmsr_drain_donequeue(acb, pCCB, error);
1013 }
1014 }
876 } 1015 }
877} 1016}
878static void arcmsr_remove(struct pci_dev *pdev) 1017static void arcmsr_remove(struct pci_dev *pdev)
@@ -887,11 +1026,11 @@ static void arcmsr_remove(struct pci_dev *pdev)
887 del_timer_sync(&acb->eternal_timer); 1026 del_timer_sync(&acb->eternal_timer);
888 arcmsr_disable_outbound_ints(acb); 1027 arcmsr_disable_outbound_ints(acb);
889 arcmsr_stop_adapter_bgrb(acb); 1028 arcmsr_stop_adapter_bgrb(acb);
890 arcmsr_flush_adapter_cache(acb); 1029 arcmsr_flush_adapter_cache(acb);
891 acb->acb_flags |= ACB_F_SCSISTOPADAPTER; 1030 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
892 acb->acb_flags &= ~ACB_F_IOP_INITED; 1031 acb->acb_flags &= ~ACB_F_IOP_INITED;
893 1032
894 for (poll_count = 0; poll_count < ARCMSR_MAX_OUTSTANDING_CMD; poll_count++) { 1033 for (poll_count = 0; poll_count < ARCMSR_MAX_OUTSTANDING_CMD; poll_count++){
895 if (!atomic_read(&acb->ccboutstandingcount)) 1034 if (!atomic_read(&acb->ccboutstandingcount))
896 break; 1035 break;
897 arcmsr_interrupt(acb);/* FIXME: need spinlock */ 1036 arcmsr_interrupt(acb);/* FIXME: need spinlock */
@@ -908,17 +1047,16 @@ static void arcmsr_remove(struct pci_dev *pdev)
908 if (ccb->startdone == ARCMSR_CCB_START) { 1047 if (ccb->startdone == ARCMSR_CCB_START) {
909 ccb->startdone = ARCMSR_CCB_ABORTED; 1048 ccb->startdone = ARCMSR_CCB_ABORTED;
910 ccb->pcmd->result = DID_ABORT << 16; 1049 ccb->pcmd->result = DID_ABORT << 16;
911 arcmsr_ccb_complete(ccb, 1); 1050 arcmsr_ccb_complete(ccb);
912 } 1051 }
913 } 1052 }
914 } 1053 }
915
916 free_irq(pdev->irq, acb); 1054 free_irq(pdev->irq, acb);
917 arcmsr_free_ccb_pool(acb); 1055 arcmsr_free_ccb_pool(acb);
1056 arcmsr_free_hbb_mu(acb);
1057 arcmsr_unmap_pciregion(acb);
918 pci_release_regions(pdev); 1058 pci_release_regions(pdev);
919
920 scsi_host_put(host); 1059 scsi_host_put(host);
921
922 pci_disable_device(pdev); 1060 pci_disable_device(pdev);
923 pci_set_drvdata(pdev, NULL); 1061 pci_set_drvdata(pdev, NULL);
924} 1062}
@@ -938,7 +1076,6 @@ static void arcmsr_shutdown(struct pci_dev *pdev)
938static int arcmsr_module_init(void) 1076static int arcmsr_module_init(void)
939{ 1077{
940 int error = 0; 1078 int error = 0;
941
942 error = pci_register_driver(&arcmsr_pci_driver); 1079 error = pci_register_driver(&arcmsr_pci_driver);
943 return error; 1080 return error;
944} 1081}
@@ -954,10 +1091,9 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
954 u32 intmask_org) 1091 u32 intmask_org)
955{ 1092{
956 u32 mask; 1093 u32 mask;
957
958 switch (acb->adapter_type) { 1094 switch (acb->adapter_type) {
959 1095
960 case ACB_ADAPTER_TYPE_A : { 1096 case ACB_ADAPTER_TYPE_A: {
961 struct MessageUnit_A __iomem *reg = acb->pmuA; 1097 struct MessageUnit_A __iomem *reg = acb->pmuA;
962 mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE | 1098 mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
963 ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE| 1099 ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|
@@ -967,15 +1103,22 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
967 } 1103 }
968 break; 1104 break;
969 1105
970 case ACB_ADAPTER_TYPE_B : { 1106 case ACB_ADAPTER_TYPE_B: {
971 struct MessageUnit_B *reg = acb->pmuB; 1107 struct MessageUnit_B *reg = acb->pmuB;
972 mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | 1108 mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK |
973 ARCMSR_IOP2DRV_DATA_READ_OK | 1109 ARCMSR_IOP2DRV_DATA_READ_OK |
974 ARCMSR_IOP2DRV_CDB_DONE | 1110 ARCMSR_IOP2DRV_CDB_DONE |
975 ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); 1111 ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
976 writel(mask, reg->iop2drv_doorbell_mask_reg); 1112 writel(mask, reg->iop2drv_doorbell_mask);
977 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f; 1113 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
978 } 1114 }
1115 break;
1116 case ACB_ADAPTER_TYPE_C: {
1117 struct MessageUnit_C *reg = acb->pmuC;
1118 mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
1119 writel(intmask_org & mask, &reg->host_int_mask);
1120 acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
1121 }
979 } 1122 }
980} 1123}
981 1124
@@ -986,80 +1129,70 @@ static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
986 int8_t *psge = (int8_t *)&arcmsr_cdb->u; 1129 int8_t *psge = (int8_t *)&arcmsr_cdb->u;
987 __le32 address_lo, address_hi; 1130 __le32 address_lo, address_hi;
988 int arccdbsize = 0x30; 1131 int arccdbsize = 0x30;
1132 __le32 length = 0;
1133 int i;
1134 struct scatterlist *sg;
989 int nseg; 1135 int nseg;
990
991 ccb->pcmd = pcmd; 1136 ccb->pcmd = pcmd;
992 memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB)); 1137 memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
993 arcmsr_cdb->Bus = 0;
994 arcmsr_cdb->TargetID = pcmd->device->id; 1138 arcmsr_cdb->TargetID = pcmd->device->id;
995 arcmsr_cdb->LUN = pcmd->device->lun; 1139 arcmsr_cdb->LUN = pcmd->device->lun;
996 arcmsr_cdb->Function = 1; 1140 arcmsr_cdb->Function = 1;
997 arcmsr_cdb->CdbLength = (uint8_t)pcmd->cmd_len; 1141 arcmsr_cdb->Context = 0;
998 arcmsr_cdb->Context = (unsigned long)arcmsr_cdb;
999 memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len); 1142 memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
1000 1143
1001 nseg = scsi_dma_map(pcmd); 1144 nseg = scsi_dma_map(pcmd);
1002 if (nseg > ARCMSR_MAX_SG_ENTRIES) 1145 if (unlikely(nseg > acb->host->sg_tablesize || nseg < 0))
1003 return FAILED; 1146 return FAILED;
1004 BUG_ON(nseg < 0); 1147 scsi_for_each_sg(pcmd, sg, nseg, i) {
1005 1148 /* Get the physical address of the current data pointer */
1006 if (nseg) { 1149 length = cpu_to_le32(sg_dma_len(sg));
1007 __le32 length; 1150 address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
1008 int i, cdb_sgcount = 0; 1151 address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
1009 struct scatterlist *sg; 1152 if (address_hi == 0) {
1010 1153 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
1011 /* map stor port SG list to our iop SG List. */ 1154
1012 scsi_for_each_sg(pcmd, sg, nseg, i) { 1155 pdma_sg->address = address_lo;
1013 /* Get the physical address of the current data pointer */ 1156 pdma_sg->length = length;
1014 length = cpu_to_le32(sg_dma_len(sg)); 1157 psge += sizeof (struct SG32ENTRY);
1015 address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg))); 1158 arccdbsize += sizeof (struct SG32ENTRY);
1016 address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg))); 1159 } else {
1017 if (address_hi == 0) { 1160 struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
1018 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
1019
1020 pdma_sg->address = address_lo;
1021 pdma_sg->length = length;
1022 psge += sizeof (struct SG32ENTRY);
1023 arccdbsize += sizeof (struct SG32ENTRY);
1024 } else {
1025 struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
1026 1161
1027 pdma_sg->addresshigh = address_hi; 1162 pdma_sg->addresshigh = address_hi;
1028 pdma_sg->address = address_lo; 1163 pdma_sg->address = address_lo;
1029 pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR); 1164 pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR);
1030 psge += sizeof (struct SG64ENTRY); 1165 psge += sizeof (struct SG64ENTRY);
1031 arccdbsize += sizeof (struct SG64ENTRY); 1166 arccdbsize += sizeof (struct SG64ENTRY);
1032 }
1033 cdb_sgcount++;
1034 } 1167 }
1035 arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
1036 arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
1037 if ( arccdbsize > 256)
1038 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
1039 } 1168 }
1040 if (pcmd->sc_data_direction == DMA_TO_DEVICE ) { 1169 arcmsr_cdb->sgcount = (uint8_t)nseg;
1170 arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
1171 arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0);
1172 if ( arccdbsize > 256)
1173 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
1174 if (pcmd->cmnd[0]|WRITE_6 || pcmd->cmnd[0]|WRITE_10 || pcmd->cmnd[0]|WRITE_12 ){
1041 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE; 1175 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
1042 ccb->ccb_flags |= CCB_FLAG_WRITE;
1043 } 1176 }
1177 ccb->arc_cdb_size = arccdbsize;
1044 return SUCCESS; 1178 return SUCCESS;
1045} 1179}
1046 1180
1047static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb) 1181static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
1048{ 1182{
1049 uint32_t cdb_shifted_phyaddr = ccb->cdb_shifted_phyaddr; 1183 uint32_t cdb_phyaddr_pattern = ccb->cdb_phyaddr_pattern;
1050 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb; 1184 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1051 atomic_inc(&acb->ccboutstandingcount); 1185 atomic_inc(&acb->ccboutstandingcount);
1052 ccb->startdone = ARCMSR_CCB_START; 1186 ccb->startdone = ARCMSR_CCB_START;
1053
1054 switch (acb->adapter_type) { 1187 switch (acb->adapter_type) {
1055 case ACB_ADAPTER_TYPE_A: { 1188 case ACB_ADAPTER_TYPE_A: {
1056 struct MessageUnit_A __iomem *reg = acb->pmuA; 1189 struct MessageUnit_A __iomem *reg = acb->pmuA;
1057 1190
1058 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) 1191 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
1059 writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE, 1192 writel(cdb_phyaddr_pattern | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
1060 &reg->inbound_queueport); 1193 &reg->inbound_queueport);
1061 else { 1194 else {
1062 writel(cdb_shifted_phyaddr, &reg->inbound_queueport); 1195 writel(cdb_phyaddr_pattern, &reg->inbound_queueport);
1063 } 1196 }
1064 } 1197 }
1065 break; 1198 break;
@@ -1071,18 +1204,30 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr
1071 ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE); 1204 ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
1072 writel(0, &reg->post_qbuffer[ending_index]); 1205 writel(0, &reg->post_qbuffer[ending_index]);
1073 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { 1206 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1074 writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,\ 1207 writel(cdb_phyaddr_pattern | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,\
1075 &reg->post_qbuffer[index]); 1208 &reg->post_qbuffer[index]);
1076 } 1209 } else {
1077 else { 1210 writel(cdb_phyaddr_pattern, &reg->post_qbuffer[index]);
1078 writel(cdb_shifted_phyaddr, &reg->post_qbuffer[index]);
1079 } 1211 }
1080 index++; 1212 index++;
1081 index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */ 1213 index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
1082 reg->postq_index = index; 1214 reg->postq_index = index;
1083 writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell_reg); 1215 writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell);
1084 } 1216 }
1085 break; 1217 break;
1218 case ACB_ADAPTER_TYPE_C: {
1219 struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)acb->pmuC;
1220 uint32_t ccb_post_stamp, arc_cdb_size;
1221
1222 arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
1223 ccb_post_stamp = (cdb_phyaddr_pattern | ((arc_cdb_size - 1) >> 6) | 1);
1224 if (acb->cdb_phyaddr_hi32) {
1225 writel(acb->cdb_phyaddr_hi32, &phbcmu->inbound_queueport_high);
1226 writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
1227 } else {
1228 writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
1229 }
1230 }
1086 } 1231 }
1087} 1232}
1088 1233
@@ -1091,8 +1236,7 @@ static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
1091 struct MessageUnit_A __iomem *reg = acb->pmuA; 1236 struct MessageUnit_A __iomem *reg = acb->pmuA;
1092 acb->acb_flags &= ~ACB_F_MSG_START_BGRB; 1237 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1093 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0); 1238 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
1094 1239 if (!arcmsr_hba_wait_msgint_ready(acb)) {
1095 if (arcmsr_hba_wait_msgint_ready(acb)) {
1096 printk(KERN_NOTICE 1240 printk(KERN_NOTICE
1097 "arcmsr%d: wait 'stop adapter background rebulid' timeout \n" 1241 "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1098 , acb->host->host_no); 1242 , acb->host->host_no);
@@ -1103,15 +1247,28 @@ static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
1103{ 1247{
1104 struct MessageUnit_B *reg = acb->pmuB; 1248 struct MessageUnit_B *reg = acb->pmuB;
1105 acb->acb_flags &= ~ACB_F_MSG_START_BGRB; 1249 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1106 writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell_reg); 1250 writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell);
1107 1251
1108 if (arcmsr_hbb_wait_msgint_ready(acb)) { 1252 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
1109 printk(KERN_NOTICE 1253 printk(KERN_NOTICE
1110 "arcmsr%d: wait 'stop adapter background rebulid' timeout \n" 1254 "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1111 , acb->host->host_no); 1255 , acb->host->host_no);
1112 } 1256 }
1113} 1257}
1114 1258
1259static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *pACB)
1260{
1261 struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
1262 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
1263 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
1264 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
1265 if (!arcmsr_hbc_wait_msgint_ready(pACB)) {
1266 printk(KERN_NOTICE
1267 "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1268 , pACB->host->host_no);
1269 }
1270 return;
1271}
1115static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) 1272static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
1116{ 1273{
1117 switch (acb->adapter_type) { 1274 switch (acb->adapter_type) {
@@ -1124,30 +1281,15 @@ static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
1124 arcmsr_stop_hbb_bgrb(acb); 1281 arcmsr_stop_hbb_bgrb(acb);
1125 } 1282 }
1126 break; 1283 break;
1284 case ACB_ADAPTER_TYPE_C: {
1285 arcmsr_stop_hbc_bgrb(acb);
1286 }
1127 } 1287 }
1128} 1288}
1129 1289
1130static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb) 1290static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
1131{ 1291{
1132 switch (acb->adapter_type) { 1292 dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle);
1133 case ACB_ADAPTER_TYPE_A: {
1134 iounmap(acb->pmuA);
1135 dma_free_coherent(&acb->pdev->dev,
1136 ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20,
1137 acb->dma_coherent,
1138 acb->dma_coherent_handle);
1139 break;
1140 }
1141 case ACB_ADAPTER_TYPE_B: {
1142 struct MessageUnit_B *reg = acb->pmuB;
1143 iounmap((u8 *)reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL);
1144 iounmap((u8 *)reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER);
1145 dma_free_coherent(&acb->pdev->dev,
1146 (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 +
1147 sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
1148 }
1149 }
1150
1151} 1293}
1152 1294
1153void arcmsr_iop_message_read(struct AdapterControlBlock *acb) 1295void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
@@ -1161,9 +1303,13 @@ void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
1161 1303
1162 case ACB_ADAPTER_TYPE_B: { 1304 case ACB_ADAPTER_TYPE_B: {
1163 struct MessageUnit_B *reg = acb->pmuB; 1305 struct MessageUnit_B *reg = acb->pmuB;
1164 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg); 1306 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
1165 } 1307 }
1166 break; 1308 break;
1309 case ACB_ADAPTER_TYPE_C: {
1310 struct MessageUnit_C __iomem *reg = acb->pmuC;
1311 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
1312 }
1167 } 1313 }
1168} 1314}
1169 1315
@@ -1186,7 +1332,16 @@ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
1186 ** push inbound doorbell tell iop, driver data write ok 1332 ** push inbound doorbell tell iop, driver data write ok
1187 ** and wait reply on next hwinterrupt for next Qbuffer post 1333 ** and wait reply on next hwinterrupt for next Qbuffer post
1188 */ 1334 */
1189 writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell_reg); 1335 writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell);
1336 }
1337 break;
1338 case ACB_ADAPTER_TYPE_C: {
1339 struct MessageUnit_C __iomem *reg = acb->pmuC;
1340 /*
1341 ** push inbound doorbell tell iop, driver data write ok
1342 ** and wait reply on next hwinterrupt for next Qbuffer post
1343 */
1344 writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK, &reg->inbound_doorbell);
1190 } 1345 }
1191 break; 1346 break;
1192 } 1347 }
@@ -1195,7 +1350,6 @@ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
1195struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb) 1350struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
1196{ 1351{
1197 struct QBUFFER __iomem *qbuffer = NULL; 1352 struct QBUFFER __iomem *qbuffer = NULL;
1198
1199 switch (acb->adapter_type) { 1353 switch (acb->adapter_type) {
1200 1354
1201 case ACB_ADAPTER_TYPE_A: { 1355 case ACB_ADAPTER_TYPE_A: {
@@ -1206,9 +1360,13 @@ struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
1206 1360
1207 case ACB_ADAPTER_TYPE_B: { 1361 case ACB_ADAPTER_TYPE_B: {
1208 struct MessageUnit_B *reg = acb->pmuB; 1362 struct MessageUnit_B *reg = acb->pmuB;
1209 qbuffer = (struct QBUFFER __iomem *)reg->ioctl_rbuffer_reg; 1363 qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
1210 } 1364 }
1211 break; 1365 break;
1366 case ACB_ADAPTER_TYPE_C: {
1367 struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)acb->pmuC;
1368 qbuffer = (struct QBUFFER __iomem *)&phbcmu->message_rbuffer;
1369 }
1212 } 1370 }
1213 return qbuffer; 1371 return qbuffer;
1214} 1372}
@@ -1216,7 +1374,6 @@ struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
1216static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb) 1374static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb)
1217{ 1375{
1218 struct QBUFFER __iomem *pqbuffer = NULL; 1376 struct QBUFFER __iomem *pqbuffer = NULL;
1219
1220 switch (acb->adapter_type) { 1377 switch (acb->adapter_type) {
1221 1378
1222 case ACB_ADAPTER_TYPE_A: { 1379 case ACB_ADAPTER_TYPE_A: {
@@ -1227,9 +1384,14 @@ static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBloc
1227 1384
1228 case ACB_ADAPTER_TYPE_B: { 1385 case ACB_ADAPTER_TYPE_B: {
1229 struct MessageUnit_B *reg = acb->pmuB; 1386 struct MessageUnit_B *reg = acb->pmuB;
1230 pqbuffer = (struct QBUFFER __iomem *)reg->ioctl_wbuffer_reg; 1387 pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
1231 } 1388 }
1232 break; 1389 break;
1390 case ACB_ADAPTER_TYPE_C: {
1391 struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
1392 pqbuffer = (struct QBUFFER __iomem *)&reg->message_wbuffer;
1393 }
1394
1233 } 1395 }
1234 return pqbuffer; 1396 return pqbuffer;
1235} 1397}
@@ -1240,19 +1402,18 @@ static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
1240 struct QBUFFER *pQbuffer; 1402 struct QBUFFER *pQbuffer;
1241 uint8_t __iomem *iop_data; 1403 uint8_t __iomem *iop_data;
1242 int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex; 1404 int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
1243
1244 rqbuf_lastindex = acb->rqbuf_lastindex; 1405 rqbuf_lastindex = acb->rqbuf_lastindex;
1245 rqbuf_firstindex = acb->rqbuf_firstindex; 1406 rqbuf_firstindex = acb->rqbuf_firstindex;
1246 prbuffer = arcmsr_get_iop_rqbuffer(acb); 1407 prbuffer = arcmsr_get_iop_rqbuffer(acb);
1247 iop_data = (uint8_t __iomem *)prbuffer->data; 1408 iop_data = (uint8_t __iomem *)prbuffer->data;
1248 iop_len = prbuffer->data_len; 1409 iop_len = prbuffer->data_len;
1249 my_empty_len = (rqbuf_firstindex - rqbuf_lastindex -1)&(ARCMSR_MAX_QBUFFER -1); 1410 my_empty_len = (rqbuf_firstindex - rqbuf_lastindex - 1) & (ARCMSR_MAX_QBUFFER - 1);
1250 1411
1251 if (my_empty_len >= iop_len) 1412 if (my_empty_len >= iop_len)
1252 { 1413 {
1253 while (iop_len > 0) { 1414 while (iop_len > 0) {
1254 pQbuffer = (struct QBUFFER *)&acb->rqbuffer[rqbuf_lastindex]; 1415 pQbuffer = (struct QBUFFER *)&acb->rqbuffer[rqbuf_lastindex];
1255 memcpy(pQbuffer, iop_data,1); 1416 memcpy(pQbuffer, iop_data, 1);
1256 rqbuf_lastindex++; 1417 rqbuf_lastindex++;
1257 rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 1418 rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1258 iop_data++; 1419 iop_data++;
@@ -1303,25 +1464,52 @@ static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
1303{ 1464{
1304 uint32_t outbound_doorbell; 1465 uint32_t outbound_doorbell;
1305 struct MessageUnit_A __iomem *reg = acb->pmuA; 1466 struct MessageUnit_A __iomem *reg = acb->pmuA;
1306
1307 outbound_doorbell = readl(&reg->outbound_doorbell); 1467 outbound_doorbell = readl(&reg->outbound_doorbell);
1308 writel(outbound_doorbell, &reg->outbound_doorbell); 1468 writel(outbound_doorbell, &reg->outbound_doorbell);
1309 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) { 1469 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
1310 arcmsr_iop2drv_data_wrote_handle(acb); 1470 arcmsr_iop2drv_data_wrote_handle(acb);
1311 } 1471 }
1312 1472
1313 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) { 1473 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
1314 arcmsr_iop2drv_data_read_handle(acb); 1474 arcmsr_iop2drv_data_read_handle(acb);
1315 } 1475 }
1316} 1476}
1317 1477static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *pACB)
1478{
1479 uint32_t outbound_doorbell;
1480 struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
1481 /*
1482 *******************************************************************
1483 ** Maybe here we need to check wrqbuffer_lock is lock or not
1484 ** DOORBELL: din! don!
1485 ** check if there are any mail need to pack from firmware
1486 *******************************************************************
1487 */
1488 outbound_doorbell = readl(&reg->outbound_doorbell);
1489 writel(outbound_doorbell, &reg->outbound_doorbell_clear);/*clear interrupt*/
1490 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
1491 arcmsr_iop2drv_data_wrote_handle(pACB);
1492 }
1493 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
1494 arcmsr_iop2drv_data_read_handle(pACB);
1495 }
1496 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
1497 arcmsr_hbc_message_isr(pACB); /* messenger of "driver to iop commands" */
1498 }
1499 return;
1500}
1318static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb) 1501static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
1319{ 1502{
1320 uint32_t flag_ccb; 1503 uint32_t flag_ccb;
1321 struct MessageUnit_A __iomem *reg = acb->pmuA; 1504 struct MessageUnit_A __iomem *reg = acb->pmuA;
1322 1505 struct ARCMSR_CDB *pARCMSR_CDB;
1506 struct CommandControlBlock *pCCB;
1507 bool error;
1323 while ((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) { 1508 while ((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) {
1324 arcmsr_drain_donequeue(acb, flag_ccb); 1509 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
1510 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1511 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
1512 arcmsr_drain_donequeue(acb, pCCB, error);
1325 } 1513 }
1326} 1514}
1327 1515
@@ -1330,29 +1518,62 @@ static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
1330 uint32_t index; 1518 uint32_t index;
1331 uint32_t flag_ccb; 1519 uint32_t flag_ccb;
1332 struct MessageUnit_B *reg = acb->pmuB; 1520 struct MessageUnit_B *reg = acb->pmuB;
1333 1521 struct ARCMSR_CDB *pARCMSR_CDB;
1522 struct CommandControlBlock *pCCB;
1523 bool error;
1334 index = reg->doneq_index; 1524 index = reg->doneq_index;
1335
1336 while ((flag_ccb = readl(&reg->done_qbuffer[index])) != 0) { 1525 while ((flag_ccb = readl(&reg->done_qbuffer[index])) != 0) {
1337 writel(0, &reg->done_qbuffer[index]); 1526 writel(0, &reg->done_qbuffer[index]);
1338 arcmsr_drain_donequeue(acb, flag_ccb); 1527 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
1528 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1529 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
1530 arcmsr_drain_donequeue(acb, pCCB, error);
1339 index++; 1531 index++;
1340 index %= ARCMSR_MAX_HBB_POSTQUEUE; 1532 index %= ARCMSR_MAX_HBB_POSTQUEUE;
1341 reg->doneq_index = index; 1533 reg->doneq_index = index;
1342 } 1534 }
1343} 1535}
1536
1537static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb)
1538{
1539 struct MessageUnit_C *phbcmu;
1540 struct ARCMSR_CDB *arcmsr_cdb;
1541 struct CommandControlBlock *ccb;
1542 uint32_t flag_ccb, ccb_cdb_phy, throttling = 0;
1543 int error;
1544
1545 phbcmu = (struct MessageUnit_C *)acb->pmuC;
1546 /* areca cdb command done */
1547 /* Use correct offset and size for syncing */
1548
1549 while (readl(&phbcmu->host_int_status) &
1550 ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR){
1551 /* check if command done with no error*/
1552 flag_ccb = readl(&phbcmu->outbound_queueport_low);
1553 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);/*frame must be 32 bytes aligned*/
1554 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
1555 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
1556 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
1557 /* check if command done with no error */
1558 arcmsr_drain_donequeue(acb, ccb, error);
1559 if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
1560 writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING, &phbcmu->inbound_doorbell);
1561 break;
1562 }
1563 throttling++;
1564 }
1565}
1344/* 1566/*
1345********************************************************************************** 1567**********************************************************************************
1346** Handle a message interrupt 1568** Handle a message interrupt
1347** 1569**
1348** The only message interrupt we expect is in response to a query for the current adapter config. 1570** The only message interrupt we expect is in response to a query for the current adapter config.
1349** We want this in order to compare the drivemap so that we can detect newly-attached drives. 1571** We want this in order to compare the drivemap so that we can detect newly-attached drives.
1350********************************************************************************** 1572**********************************************************************************
1351*/ 1573*/
1352static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) 1574static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb)
1353{ 1575{
1354 struct MessageUnit_A *reg = acb->pmuA; 1576 struct MessageUnit_A *reg = acb->pmuA;
1355
1356 /*clear interrupt and message state*/ 1577 /*clear interrupt and message state*/
1357 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus); 1578 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus);
1358 schedule_work(&acb->arcmsr_do_message_isr_bh); 1579 schedule_work(&acb->arcmsr_do_message_isr_bh);
@@ -1362,16 +1583,32 @@ static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb)
1362 struct MessageUnit_B *reg = acb->pmuB; 1583 struct MessageUnit_B *reg = acb->pmuB;
1363 1584
1364 /*clear interrupt and message state*/ 1585 /*clear interrupt and message state*/
1365 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg); 1586 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
1587 schedule_work(&acb->arcmsr_do_message_isr_bh);
1588}
1589/*
1590**********************************************************************************
1591** Handle a message interrupt
1592**
1593** The only message interrupt we expect is in response to a query for the
1594** current adapter config.
1595** We want this in order to compare the drivemap so that we can detect newly-attached drives.
1596**********************************************************************************
1597*/
1598static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb)
1599{
1600 struct MessageUnit_C *reg = acb->pmuC;
1601 /*clear interrupt and message state*/
1602 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);
1366 schedule_work(&acb->arcmsr_do_message_isr_bh); 1603 schedule_work(&acb->arcmsr_do_message_isr_bh);
1367} 1604}
1605
1368static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb) 1606static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb)
1369{ 1607{
1370 uint32_t outbound_intstatus; 1608 uint32_t outbound_intstatus;
1371 struct MessageUnit_A __iomem *reg = acb->pmuA; 1609 struct MessageUnit_A __iomem *reg = acb->pmuA;
1372
1373 outbound_intstatus = readl(&reg->outbound_intstatus) & 1610 outbound_intstatus = readl(&reg->outbound_intstatus) &
1374 acb->outbound_int_enable; 1611 acb->outbound_int_enable;
1375 if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) { 1612 if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) {
1376 return 1; 1613 return 1;
1377 } 1614 }
@@ -1382,7 +1619,7 @@ static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb)
1382 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) { 1619 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
1383 arcmsr_hba_postqueue_isr(acb); 1620 arcmsr_hba_postqueue_isr(acb);
1384 } 1621 }
1385 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { 1622 if(outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
1386 /* messenger of "driver to iop commands" */ 1623 /* messenger of "driver to iop commands" */
1387 arcmsr_hba_message_isr(acb); 1624 arcmsr_hba_message_isr(acb);
1388 } 1625 }
@@ -1393,18 +1630,17 @@ static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
1393{ 1630{
1394 uint32_t outbound_doorbell; 1631 uint32_t outbound_doorbell;
1395 struct MessageUnit_B *reg = acb->pmuB; 1632 struct MessageUnit_B *reg = acb->pmuB;
1396 1633 outbound_doorbell = readl(reg->iop2drv_doorbell) &
1397 outbound_doorbell = readl(reg->iop2drv_doorbell_reg) & 1634 acb->outbound_int_enable;
1398 acb->outbound_int_enable;
1399 if (!outbound_doorbell) 1635 if (!outbound_doorbell)
1400 return 1; 1636 return 1;
1401 1637
1402 writel(~outbound_doorbell, reg->iop2drv_doorbell_reg); 1638 writel(~outbound_doorbell, reg->iop2drv_doorbell);
1403 /*in case the last action of doorbell interrupt clearance is cached, 1639 /*in case the last action of doorbell interrupt clearance is cached,
1404 this action can push HW to write down the clear bit*/ 1640 this action can push HW to write down the clear bit*/
1405 readl(reg->iop2drv_doorbell_reg); 1641 readl(reg->iop2drv_doorbell);
1406 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg); 1642 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
1407 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { 1643 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
1408 arcmsr_iop2drv_data_wrote_handle(acb); 1644 arcmsr_iop2drv_data_wrote_handle(acb);
1409 } 1645 }
1410 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) { 1646 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
@@ -1413,14 +1649,37 @@ static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
1413 if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) { 1649 if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
1414 arcmsr_hbb_postqueue_isr(acb); 1650 arcmsr_hbb_postqueue_isr(acb);
1415 } 1651 }
1416 if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { 1652 if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
1417 /* messenger of "driver to iop commands" */ 1653 /* messenger of "driver to iop commands" */
1418 arcmsr_hbb_message_isr(acb); 1654 arcmsr_hbb_message_isr(acb);
1419 } 1655 }
1420
1421 return 0; 1656 return 0;
1422} 1657}
1423 1658
1659static int arcmsr_handle_hbc_isr(struct AdapterControlBlock *pACB)
1660{
1661 uint32_t host_interrupt_status;
1662 struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC;
1663 /*
1664 *********************************************
1665 ** check outbound intstatus
1666 *********************************************
1667 */
1668 host_interrupt_status = readl(&phbcmu->host_int_status);
1669 if (!host_interrupt_status) {
1670 /*it must be share irq*/
1671 return 1;
1672 }
1673 /* MU ioctl transfer doorbell interrupts*/
1674 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
1675 arcmsr_hbc_doorbell_isr(pACB); /* messenger of "ioctl message read write" */
1676 }
1677 /* MU post queue interrupts*/
1678 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
1679 arcmsr_hbc_postqueue_isr(pACB); /* messenger of "scsi commands" */
1680 }
1681 return 0;
1682}
1424static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb) 1683static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
1425{ 1684{
1426 switch (acb->adapter_type) { 1685 switch (acb->adapter_type) {
@@ -1437,6 +1696,11 @@ static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
1437 } 1696 }
1438 } 1697 }
1439 break; 1698 break;
1699 case ACB_ADAPTER_TYPE_C: {
1700 if (arcmsr_handle_hbc_isr(acb)) {
1701 return IRQ_NONE;
1702 }
1703 }
1440 } 1704 }
1441 return IRQ_HANDLED; 1705 return IRQ_HANDLED;
1442} 1706}
@@ -1463,7 +1727,6 @@ void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
1463 struct QBUFFER __iomem *pwbuffer; 1727 struct QBUFFER __iomem *pwbuffer;
1464 uint8_t __iomem *iop_data; 1728 uint8_t __iomem *iop_data;
1465 int32_t allxfer_len = 0; 1729 int32_t allxfer_len = 0;
1466
1467 pwbuffer = arcmsr_get_iop_wqbuffer(acb); 1730 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
1468 iop_data = (uint8_t __iomem *)pwbuffer->data; 1731 iop_data = (uint8_t __iomem *)pwbuffer->data;
1469 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) { 1732 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
@@ -1496,7 +1759,6 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
1496 (uint32_t ) cmd->cmnd[7] << 8 | 1759 (uint32_t ) cmd->cmnd[7] << 8 |
1497 (uint32_t ) cmd->cmnd[8]; 1760 (uint32_t ) cmd->cmnd[8];
1498 /* 4 bytes: Areca io control code */ 1761 /* 4 bytes: Areca io control code */
1499
1500 sg = scsi_sglist(cmd); 1762 sg = scsi_sglist(cmd);
1501 buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset; 1763 buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
1502 if (scsi_sg_count(cmd) > 1) { 1764 if (scsi_sg_count(cmd) > 1) {
@@ -1522,13 +1784,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
1522 retvalue = ARCMSR_MESSAGE_FAIL; 1784 retvalue = ARCMSR_MESSAGE_FAIL;
1523 goto message_out; 1785 goto message_out;
1524 } 1786 }
1525 1787
1526 if (!acb->fw_state) {
1527 pcmdmessagefld->cmdmessage.ReturnCode =
1528 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1529 goto message_out;
1530 }
1531
1532 ptmpQbuffer = ver_addr; 1788 ptmpQbuffer = ver_addr;
1533 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) 1789 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
1534 && (allxfer_len < 1031)) { 1790 && (allxfer_len < 1031)) {
@@ -1560,7 +1816,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
1560 } 1816 }
1561 memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, allxfer_len); 1817 memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, allxfer_len);
1562 pcmdmessagefld->cmdmessage.Length = allxfer_len; 1818 pcmdmessagefld->cmdmessage.Length = allxfer_len;
1563 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 1819 if(acb->fw_flag == FW_DEADLOCK) {
1820 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1821 }else{
1822 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1823 }
1564 kfree(ver_addr); 1824 kfree(ver_addr);
1565 } 1825 }
1566 break; 1826 break;
@@ -1575,12 +1835,13 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
1575 retvalue = ARCMSR_MESSAGE_FAIL; 1835 retvalue = ARCMSR_MESSAGE_FAIL;
1576 goto message_out; 1836 goto message_out;
1577 } 1837 }
1578 if (!acb->fw_state) { 1838 if(acb->fw_flag == FW_DEADLOCK) {
1579 pcmdmessagefld->cmdmessage.ReturnCode = 1839 pcmdmessagefld->cmdmessage.ReturnCode =
1580 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 1840 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1581 goto message_out; 1841 }else{
1842 pcmdmessagefld->cmdmessage.ReturnCode =
1843 ARCMSR_MESSAGE_RETURNCODE_OK;
1582 } 1844 }
1583
1584 ptmpuserbuffer = ver_addr; 1845 ptmpuserbuffer = ver_addr;
1585 user_len = pcmdmessagefld->cmdmessage.Length; 1846 user_len = pcmdmessagefld->cmdmessage.Length;
1586 memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len); 1847 memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
@@ -1633,12 +1894,6 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
1633 1894
1634 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { 1895 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
1635 uint8_t *pQbuffer = acb->rqbuffer; 1896 uint8_t *pQbuffer = acb->rqbuffer;
1636 if (!acb->fw_state) {
1637 pcmdmessagefld->cmdmessage.ReturnCode =
1638 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1639 goto message_out;
1640 }
1641
1642 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1897 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1643 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1898 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1644 arcmsr_iop_message_read(acb); 1899 arcmsr_iop_message_read(acb);
@@ -1647,16 +1902,24 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
1647 acb->rqbuf_firstindex = 0; 1902 acb->rqbuf_firstindex = 0;
1648 acb->rqbuf_lastindex = 0; 1903 acb->rqbuf_lastindex = 0;
1649 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 1904 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1650 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 1905 if(acb->fw_flag == FW_DEADLOCK) {
1906 pcmdmessagefld->cmdmessage.ReturnCode =
1907 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1908 }else{
1909 pcmdmessagefld->cmdmessage.ReturnCode =
1910 ARCMSR_MESSAGE_RETURNCODE_OK;
1911 }
1651 } 1912 }
1652 break; 1913 break;
1653 1914
1654 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { 1915 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
1655 uint8_t *pQbuffer = acb->wqbuffer; 1916 uint8_t *pQbuffer = acb->wqbuffer;
1656 if (!acb->fw_state) { 1917 if(acb->fw_flag == FW_DEADLOCK) {
1657 pcmdmessagefld->cmdmessage.ReturnCode = 1918 pcmdmessagefld->cmdmessage.ReturnCode =
1658 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 1919 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1659 goto message_out; 1920 }else{
1921 pcmdmessagefld->cmdmessage.ReturnCode =
1922 ARCMSR_MESSAGE_RETURNCODE_OK;
1660 } 1923 }
1661 1924
1662 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1925 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
@@ -1669,18 +1932,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
1669 acb->wqbuf_firstindex = 0; 1932 acb->wqbuf_firstindex = 0;
1670 acb->wqbuf_lastindex = 0; 1933 acb->wqbuf_lastindex = 0;
1671 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 1934 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1672 pcmdmessagefld->cmdmessage.ReturnCode =
1673 ARCMSR_MESSAGE_RETURNCODE_OK;
1674 } 1935 }
1675 break; 1936 break;
1676 1937
1677 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { 1938 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
1678 uint8_t *pQbuffer; 1939 uint8_t *pQbuffer;
1679 if (!acb->fw_state) {
1680 pcmdmessagefld->cmdmessage.ReturnCode =
1681 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1682 goto message_out;
1683 }
1684 1940
1685 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1941 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1686 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1942 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
@@ -1698,47 +1954,52 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
1698 memset(pQbuffer, 0, sizeof(struct QBUFFER)); 1954 memset(pQbuffer, 0, sizeof(struct QBUFFER));
1699 pQbuffer = acb->wqbuffer; 1955 pQbuffer = acb->wqbuffer;
1700 memset(pQbuffer, 0, sizeof(struct QBUFFER)); 1956 memset(pQbuffer, 0, sizeof(struct QBUFFER));
1701 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 1957 if(acb->fw_flag == FW_DEADLOCK) {
1958 pcmdmessagefld->cmdmessage.ReturnCode =
1959 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1960 }else{
1961 pcmdmessagefld->cmdmessage.ReturnCode =
1962 ARCMSR_MESSAGE_RETURNCODE_OK;
1963 }
1702 } 1964 }
1703 break; 1965 break;
1704 1966
1705 case ARCMSR_MESSAGE_RETURN_CODE_3F: { 1967 case ARCMSR_MESSAGE_RETURN_CODE_3F: {
1706 if (!acb->fw_state) { 1968 if(acb->fw_flag == FW_DEADLOCK) {
1707 pcmdmessagefld->cmdmessage.ReturnCode = 1969 pcmdmessagefld->cmdmessage.ReturnCode =
1708 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 1970 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1709 goto message_out; 1971 }else{
1710 } 1972 pcmdmessagefld->cmdmessage.ReturnCode =
1711 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; 1973 ARCMSR_MESSAGE_RETURNCODE_3F;
1712 } 1974 }
1713 break; 1975 break;
1714 1976 }
1715 case ARCMSR_MESSAGE_SAY_HELLO: { 1977 case ARCMSR_MESSAGE_SAY_HELLO: {
1716 int8_t *hello_string = "Hello! I am ARCMSR"; 1978 int8_t *hello_string = "Hello! I am ARCMSR";
1717 if (!acb->fw_state) { 1979 if(acb->fw_flag == FW_DEADLOCK) {
1718 pcmdmessagefld->cmdmessage.ReturnCode = 1980 pcmdmessagefld->cmdmessage.ReturnCode =
1719 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 1981 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1720 goto message_out; 1982 }else{
1983 pcmdmessagefld->cmdmessage.ReturnCode =
1984 ARCMSR_MESSAGE_RETURNCODE_OK;
1721 } 1985 }
1722 memcpy(pcmdmessagefld->messagedatabuffer, hello_string 1986 memcpy(pcmdmessagefld->messagedatabuffer, hello_string
1723 , (int16_t)strlen(hello_string)); 1987 , (int16_t)strlen(hello_string));
1724 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1725 } 1988 }
1726 break; 1989 break;
1727 1990
1728 case ARCMSR_MESSAGE_SAY_GOODBYE: 1991 case ARCMSR_MESSAGE_SAY_GOODBYE:
1729 if (!acb->fw_state) { 1992 if(acb->fw_flag == FW_DEADLOCK) {
1730 pcmdmessagefld->cmdmessage.ReturnCode = 1993 pcmdmessagefld->cmdmessage.ReturnCode =
1731 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 1994 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1732 goto message_out;
1733 } 1995 }
1734 arcmsr_iop_parking(acb); 1996 arcmsr_iop_parking(acb);
1735 break; 1997 break;
1736 1998
1737 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: 1999 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
1738 if (!acb->fw_state) { 2000 if(acb->fw_flag == FW_DEADLOCK) {
1739 pcmdmessagefld->cmdmessage.ReturnCode = 2001 pcmdmessagefld->cmdmessage.ReturnCode =
1740 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 2002 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1741 goto message_out;
1742 } 2003 }
1743 arcmsr_flush_adapter_cache(acb); 2004 arcmsr_flush_adapter_cache(acb);
1744 break; 2005 break;
@@ -1756,11 +2017,16 @@ static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock
1756{ 2017{
1757 struct list_head *head = &acb->ccb_free_list; 2018 struct list_head *head = &acb->ccb_free_list;
1758 struct CommandControlBlock *ccb = NULL; 2019 struct CommandControlBlock *ccb = NULL;
1759 2020 unsigned long flags;
2021 spin_lock_irqsave(&acb->ccblist_lock, flags);
1760 if (!list_empty(head)) { 2022 if (!list_empty(head)) {
1761 ccb = list_entry(head->next, struct CommandControlBlock, list); 2023 ccb = list_entry(head->next, struct CommandControlBlock, list);
1762 list_del(head->next); 2024 list_del_init(&ccb->list);
2025 }else{
2026 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
2027 return 0;
1763 } 2028 }
2029 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
1764 return ccb; 2030 return ccb;
1765} 2031}
1766 2032
@@ -1826,83 +2092,29 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd,
1826 cmd->scsi_done = done; 2092 cmd->scsi_done = done;
1827 cmd->host_scribble = NULL; 2093 cmd->host_scribble = NULL;
1828 cmd->result = 0; 2094 cmd->result = 0;
1829 2095 if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){
1830 if ((scsicmd == SYNCHRONIZE_CACHE) || (scsicmd == SEND_DIAGNOSTIC)) { 2096 if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
1831 if (acb->devstate[target][lun] == ARECA_RAID_GONE) { 2097 cmd->result = (DID_NO_CONNECT << 16);
1832 cmd->result = (DID_NO_CONNECT << 16);
1833 } 2098 }
1834 cmd->scsi_done(cmd); 2099 cmd->scsi_done(cmd);
1835 return 0; 2100 return 0;
1836 } 2101 }
1837
1838 if (acb->acb_flags & ACB_F_BUS_RESET) {
1839 switch (acb->adapter_type) {
1840 case ACB_ADAPTER_TYPE_A: {
1841 struct MessageUnit_A __iomem *reg = acb->pmuA;
1842 uint32_t intmask_org, outbound_doorbell;
1843
1844 if ((readl(&reg->outbound_msgaddr1) &
1845 ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
1846 printk(KERN_NOTICE "arcmsr%d: bus reset and return busy\n",
1847 acb->host->host_no);
1848 return SCSI_MLQUEUE_HOST_BUSY;
1849 }
1850
1851 acb->acb_flags &= ~ACB_F_FIRMWARE_TRAP;
1852 printk(KERN_NOTICE "arcmsr%d: hardware bus reset and reset ok\n",
1853 acb->host->host_no);
1854 /* disable all outbound interrupt */
1855 intmask_org = arcmsr_disable_outbound_ints(acb);
1856 arcmsr_get_firmware_spec(acb, 1);
1857 /*start background rebuild*/
1858 arcmsr_start_adapter_bgrb(acb);
1859 /* clear Qbuffer if door bell ringed */
1860 outbound_doorbell = readl(&reg->outbound_doorbell);
1861 /*clear interrupt */
1862 writel(outbound_doorbell, &reg->outbound_doorbell);
1863 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
1864 &reg->inbound_doorbell);
1865 /* enable outbound Post Queue,outbound doorbell Interrupt */
1866 arcmsr_enable_outbound_ints(acb, intmask_org);
1867 acb->acb_flags |= ACB_F_IOP_INITED;
1868 acb->acb_flags &= ~ACB_F_BUS_RESET;
1869 }
1870 break;
1871 case ACB_ADAPTER_TYPE_B: {
1872 }
1873 }
1874 }
1875
1876 if (target == 16) { 2102 if (target == 16) {
1877 /* virtual device for iop message transfer */ 2103 /* virtual device for iop message transfer */
1878 arcmsr_handle_virtual_command(acb, cmd); 2104 arcmsr_handle_virtual_command(acb, cmd);
1879 return 0; 2105 return 0;
1880 } 2106 }
1881 if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
1882 uint8_t block_cmd;
1883
1884 block_cmd = cmd->cmnd[0] & 0x0f;
1885 if (block_cmd == 0x08 || block_cmd == 0x0a) {
1886 printk(KERN_NOTICE
1887 "arcmsr%d: block 'read/write'"
1888 "command with gone raid volume"
1889 " Cmd = %2x, TargetId = %d, Lun = %d \n"
1890 , acb->host->host_no
1891 , cmd->cmnd[0]
1892 , target, lun);
1893 cmd->result = (DID_NO_CONNECT << 16);
1894 cmd->scsi_done(cmd);
1895 return 0;
1896 }
1897 }
1898 if (atomic_read(&acb->ccboutstandingcount) >= 2107 if (atomic_read(&acb->ccboutstandingcount) >=
1899 ARCMSR_MAX_OUTSTANDING_CMD) 2108 ARCMSR_MAX_OUTSTANDING_CMD)
1900 return SCSI_MLQUEUE_HOST_BUSY; 2109 return SCSI_MLQUEUE_HOST_BUSY;
1901 2110 if ((scsicmd == SCSI_CMD_ARECA_SPECIFIC)) {
2111 printk(KERN_NOTICE "Receiveing SCSI_CMD_ARECA_SPECIFIC command..\n");
2112 return 0;
2113 }
1902 ccb = arcmsr_get_freeccb(acb); 2114 ccb = arcmsr_get_freeccb(acb);
1903 if (!ccb) 2115 if (!ccb)
1904 return SCSI_MLQUEUE_HOST_BUSY; 2116 return SCSI_MLQUEUE_HOST_BUSY;
1905 if ( arcmsr_build_ccb( acb, ccb, cmd ) == FAILED ) { 2117 if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) {
1906 cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1); 2118 cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1);
1907 cmd->scsi_done(cmd); 2119 cmd->scsi_done(cmd);
1908 return 0; 2120 return 0;
@@ -1911,7 +2123,7 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd,
1911 return 0; 2123 return 0;
1912} 2124}
1913 2125
1914static void *arcmsr_get_hba_config(struct AdapterControlBlock *acb, int mode) 2126static bool arcmsr_get_hba_config(struct AdapterControlBlock *acb)
1915{ 2127{
1916 struct MessageUnit_A __iomem *reg = acb->pmuA; 2128 struct MessageUnit_A __iomem *reg = acb->pmuA;
1917 char *acb_firm_model = acb->firm_model; 2129 char *acb_firm_model = acb->firm_model;
@@ -1919,19 +2131,16 @@ static void *arcmsr_get_hba_config(struct AdapterControlBlock *acb, int mode)
1919 char *acb_device_map = acb->device_map; 2131 char *acb_device_map = acb->device_map;
1920 char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]); 2132 char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]);
1921 char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]); 2133 char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]);
1922 char __iomem *iop_device_map = (char __iomem *) (&reg->message_rwbuffer[21]); 2134 char __iomem *iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]);
1923 int count; 2135 int count;
1924
1925 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0); 2136 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
1926 if (arcmsr_hba_wait_msgint_ready(acb)) { 2137 if (!arcmsr_hba_wait_msgint_ready(acb)) {
1927 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ 2138 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
1928 miscellaneous data' timeout \n", acb->host->host_no); 2139 miscellaneous data' timeout \n", acb->host->host_no);
1929 return NULL; 2140 return false;
1930 } 2141 }
1931
1932 if (mode == 1) {
1933 count = 8; 2142 count = 8;
1934 while (count) { 2143 while (count){
1935 *acb_firm_model = readb(iop_firm_model); 2144 *acb_firm_model = readb(iop_firm_model);
1936 acb_firm_model++; 2145 acb_firm_model++;
1937 iop_firm_model++; 2146 iop_firm_model++;
@@ -1939,138 +2148,213 @@ static void *arcmsr_get_hba_config(struct AdapterControlBlock *acb, int mode)
1939 } 2148 }
1940 2149
1941 count = 16; 2150 count = 16;
1942 while (count) { 2151 while (count){
1943 *acb_firm_version = readb(iop_firm_version); 2152 *acb_firm_version = readb(iop_firm_version);
1944 acb_firm_version++; 2153 acb_firm_version++;
1945 iop_firm_version++; 2154 iop_firm_version++;
1946 count--; 2155 count--;
1947 } 2156 }
1948 2157
1949 count = 16; 2158 count=16;
1950 while (count) { 2159 while(count){
1951 *acb_device_map = readb(iop_device_map); 2160 *acb_device_map = readb(iop_device_map);
1952 acb_device_map++; 2161 acb_device_map++;
1953 iop_device_map++; 2162 iop_device_map++;
1954 count--; 2163 count--;
1955 } 2164 }
1956 2165 printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n",
1957 printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n" 2166 acb->host->host_no,
1958 , acb->host->host_no 2167 acb->firm_version,
1959 , acb->firm_version); 2168 acb->firm_model);
1960 acb->signature = readl(&reg->message_rwbuffer[0]); 2169 acb->signature = readl(&reg->message_rwbuffer[0]);
1961 acb->firm_request_len = readl(&reg->message_rwbuffer[1]); 2170 acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
1962 acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]); 2171 acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
1963 acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]); 2172 acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
1964 acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]); 2173 acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
2174 acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
2175 return true;
1965} 2176}
1966 return reg->message_rwbuffer; 2177static bool arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
1967}
1968static void __iomem *arcmsr_get_hbb_config(struct AdapterControlBlock *acb, int mode)
1969{ 2178{
1970 struct MessageUnit_B *reg = acb->pmuB; 2179 struct MessageUnit_B *reg = acb->pmuB;
1971 uint32_t __iomem *lrwbuffer = reg->msgcode_rwbuffer_reg; 2180 struct pci_dev *pdev = acb->pdev;
2181 void *dma_coherent;
2182 dma_addr_t dma_coherent_handle;
1972 char *acb_firm_model = acb->firm_model; 2183 char *acb_firm_model = acb->firm_model;
1973 char *acb_firm_version = acb->firm_version; 2184 char *acb_firm_version = acb->firm_version;
1974 char *acb_device_map = acb->device_map; 2185 char *acb_device_map = acb->device_map;
1975 char __iomem *iop_firm_model = (char __iomem *)(&lrwbuffer[15]); 2186 char __iomem *iop_firm_model;
1976 /*firm_model,15,60-67*/ 2187 /*firm_model,15,60-67*/
1977 char __iomem *iop_firm_version = (char __iomem *)(&lrwbuffer[17]); 2188 char __iomem *iop_firm_version;
1978 /*firm_version,17,68-83*/ 2189 /*firm_version,17,68-83*/
1979 char __iomem *iop_device_map = (char __iomem *) (&lrwbuffer[21]); 2190 char __iomem *iop_device_map;
1980 /*firm_version,21,84-99*/ 2191 /*firm_version,21,84-99*/
1981 int count; 2192 int count;
1982 2193 dma_coherent = dma_alloc_coherent(&pdev->dev, sizeof(struct MessageUnit_B), &dma_coherent_handle, GFP_KERNEL);
1983 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg); 2194 if (!dma_coherent){
1984 if (arcmsr_hbb_wait_msgint_ready(acb)) { 2195 printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error for hbb mu\n", acb->host->host_no);
2196 return false;
2197 }
2198 acb->dma_coherent_handle_hbb_mu = dma_coherent_handle;
2199 reg = (struct MessageUnit_B *)dma_coherent;
2200 acb->pmuB = reg;
2201 reg->drv2iop_doorbell= (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_DRV2IOP_DOORBELL);
2202 reg->drv2iop_doorbell_mask = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_DRV2IOP_DOORBELL_MASK);
2203 reg->iop2drv_doorbell = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_IOP2DRV_DOORBELL);
2204 reg->iop2drv_doorbell_mask = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_IOP2DRV_DOORBELL_MASK);
2205 reg->message_wbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_WBUFFER);
2206 reg->message_rbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_RBUFFER);
2207 reg->message_rwbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_RWBUFFER);
2208 iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]); /*firm_model,15,60-67*/
2209 iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]); /*firm_version,17,68-83*/
2210 iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]); /*firm_version,21,84-99*/
2211
2212 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
2213 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
1985 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ 2214 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
1986 miscellaneous data' timeout \n", acb->host->host_no); 2215 miscellaneous data' timeout \n", acb->host->host_no);
1987 return NULL; 2216 return false;
1988 } 2217 }
1989
1990 if (mode == 1) {
1991 count = 8; 2218 count = 8;
1992 while (count) 2219 while (count){
1993 {
1994 *acb_firm_model = readb(iop_firm_model); 2220 *acb_firm_model = readb(iop_firm_model);
1995 acb_firm_model++; 2221 acb_firm_model++;
1996 iop_firm_model++; 2222 iop_firm_model++;
1997 count--; 2223 count--;
1998 } 2224 }
1999
2000 count = 16; 2225 count = 16;
2001 while (count) 2226 while (count){
2002 {
2003 *acb_firm_version = readb(iop_firm_version); 2227 *acb_firm_version = readb(iop_firm_version);
2004 acb_firm_version++; 2228 acb_firm_version++;
2005 iop_firm_version++; 2229 iop_firm_version++;
2006 count--; 2230 count--;
2007 } 2231 }
2008 2232
2009 count = 16; 2233 count = 16;
2010 while (count) { 2234 while(count){
2011 *acb_device_map = readb(iop_device_map); 2235 *acb_device_map = readb(iop_device_map);
2012 acb_device_map++; 2236 acb_device_map++;
2013 iop_device_map++; 2237 iop_device_map++;
2014 count--; 2238 count--;
2015 } 2239 }
2016 2240
2017 printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", 2241 printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n",
2018 acb->host->host_no, 2242 acb->host->host_no,
2019 acb->firm_version); 2243 acb->firm_version,
2244 acb->firm_model);
2020 2245
2021 acb->signature = readl(lrwbuffer++); 2246 acb->signature = readl(&reg->message_rwbuffer[1]);
2022 /*firm_signature,1,00-03*/ 2247 /*firm_signature,1,00-03*/
2023 acb->firm_request_len = readl(lrwbuffer++); 2248 acb->firm_request_len = readl(&reg->message_rwbuffer[2]);
2024 /*firm_request_len,1,04-07*/ 2249 /*firm_request_len,1,04-07*/
2025 acb->firm_numbers_queue = readl(lrwbuffer++); 2250 acb->firm_numbers_queue = readl(&reg->message_rwbuffer[3]);
2026 /*firm_numbers_queue,2,08-11*/ 2251 /*firm_numbers_queue,2,08-11*/
2027 acb->firm_sdram_size = readl(lrwbuffer++); 2252 acb->firm_sdram_size = readl(&reg->message_rwbuffer[4]);
2028 /*firm_sdram_size,3,12-15*/ 2253 /*firm_sdram_size,3,12-15*/
2029 acb->firm_hd_channels = readl(lrwbuffer); 2254 acb->firm_hd_channels = readl(&reg->message_rwbuffer[5]);
2030 /*firm_ide_channels,4,16-19*/ 2255 /*firm_ide_channels,4,16-19*/
2256 acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
2257 /*firm_ide_channels,4,16-19*/
2258 return true;
2031} 2259}
2032 return reg->msgcode_rwbuffer_reg;
2033}
2034static void *arcmsr_get_firmware_spec(struct AdapterControlBlock *acb, int mode)
2035{
2036 void *rtnval = 0;
2037 switch (acb->adapter_type) {
2038 case ACB_ADAPTER_TYPE_A: {
2039 rtnval = arcmsr_get_hba_config(acb, mode);
2040 }
2041 break;
2042 2260
2043 case ACB_ADAPTER_TYPE_B: { 2261static bool arcmsr_get_hbc_config(struct AdapterControlBlock *pACB)
2044 rtnval = arcmsr_get_hbb_config(acb, mode); 2262{
2263 uint32_t intmask_org, Index, firmware_state = 0;
2264 struct MessageUnit_C *reg = pACB->pmuC;
2265 char *acb_firm_model = pACB->firm_model;
2266 char *acb_firm_version = pACB->firm_version;
2267 char *iop_firm_model = (char *)(&reg->msgcode_rwbuffer[15]); /*firm_model,15,60-67*/
2268 char *iop_firm_version = (char *)(&reg->msgcode_rwbuffer[17]); /*firm_version,17,68-83*/
2269 int count;
2270 /* disable all outbound interrupt */
2271 intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
2272 writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask);
2273 /* wait firmware ready */
2274 do {
2275 firmware_state = readl(&reg->outbound_msgaddr1);
2276 } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
2277 /* post "get config" instruction */
2278 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
2279 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
2280 /* wait message ready */
2281 for (Index = 0; Index < 2000; Index++) {
2282 if (readl(&reg->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
2283 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);/*clear interrupt*/
2284 break;
2045 } 2285 }
2046 break; 2286 udelay(10);
2287 } /*max 1 seconds*/
2288 if (Index >= 2000) {
2289 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
2290 miscellaneous data' timeout \n", pACB->host->host_no);
2291 return false;
2047 } 2292 }
2048 return rtnval; 2293 count = 8;
2294 while (count) {
2295 *acb_firm_model = readb(iop_firm_model);
2296 acb_firm_model++;
2297 iop_firm_model++;
2298 count--;
2299 }
2300 count = 16;
2301 while (count) {
2302 *acb_firm_version = readb(iop_firm_version);
2303 acb_firm_version++;
2304 iop_firm_version++;
2305 count--;
2306 }
2307 printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n",
2308 pACB->host->host_no,
2309 pACB->firm_version,
2310 pACB->firm_model);
2311 pACB->firm_request_len = readl(&reg->msgcode_rwbuffer[1]); /*firm_request_len,1,04-07*/
2312 pACB->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]); /*firm_numbers_queue,2,08-11*/
2313 pACB->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]); /*firm_sdram_size,3,12-15*/
2314 pACB->firm_hd_channels = readl(&reg->msgcode_rwbuffer[4]); /*firm_ide_channels,4,16-19*/
2315 pACB->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
2316 /*all interrupt service will be enable at arcmsr_iop_init*/
2317 return true;
2318}
2319static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
2320{
2321 if (acb->adapter_type == ACB_ADAPTER_TYPE_A)
2322 return arcmsr_get_hba_config(acb);
2323 else if (acb->adapter_type == ACB_ADAPTER_TYPE_B)
2324 return arcmsr_get_hbb_config(acb);
2325 else
2326 return arcmsr_get_hbc_config(acb);
2049} 2327}
2050 2328
2051static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb, 2329static int arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
2052 struct CommandControlBlock *poll_ccb) 2330 struct CommandControlBlock *poll_ccb)
2053{ 2331{
2054 struct MessageUnit_A __iomem *reg = acb->pmuA; 2332 struct MessageUnit_A __iomem *reg = acb->pmuA;
2055 struct CommandControlBlock *ccb; 2333 struct CommandControlBlock *ccb;
2334 struct ARCMSR_CDB *arcmsr_cdb;
2056 uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0; 2335 uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
2057 2336 int rtn;
2337 bool error;
2058 polling_hba_ccb_retry: 2338 polling_hba_ccb_retry:
2059 poll_count++; 2339 poll_count++;
2060 outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable; 2340 outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable;
2061 writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/ 2341 writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
2062 while (1) { 2342 while (1) {
2063 if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF) { 2343 if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF) {
2064 if (poll_ccb_done) 2344 if (poll_ccb_done){
2345 rtn = SUCCESS;
2065 break; 2346 break;
2066 else { 2347 }else {
2067 msleep(25); 2348 msleep(25);
2068 if (poll_count > 100) 2349 if (poll_count > 100){
2350 rtn = FAILED;
2069 break; 2351 break;
2352 }
2070 goto polling_hba_ccb_retry; 2353 goto polling_hba_ccb_retry;
2071 } 2354 }
2072 } 2355 }
2073 ccb = (struct CommandControlBlock *)(acb->vir2phy_offset + (flag_ccb << 5)); 2356 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
2357 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
2074 poll_ccb_done = (ccb == poll_ccb) ? 1:0; 2358 poll_ccb_done = (ccb == poll_ccb) ? 1:0;
2075 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { 2359 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
2076 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) { 2360 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
@@ -2081,8 +2365,7 @@ static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
2081 , ccb->pcmd->device->lun 2365 , ccb->pcmd->device->lun
2082 , ccb); 2366 , ccb);
2083 ccb->pcmd->result = DID_ABORT << 16; 2367 ccb->pcmd->result = DID_ABORT << 16;
2084 arcmsr_ccb_complete(ccb, 1); 2368 arcmsr_ccb_complete(ccb);
2085 poll_ccb_done = 1;
2086 continue; 2369 continue;
2087 } 2370 }
2088 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb" 2371 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
@@ -2093,86 +2376,156 @@ static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
2093 , atomic_read(&acb->ccboutstandingcount)); 2376 , atomic_read(&acb->ccboutstandingcount));
2094 continue; 2377 continue;
2095 } 2378 }
2096 arcmsr_report_ccb_state(acb, ccb, flag_ccb); 2379 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
2380 arcmsr_report_ccb_state(acb, ccb, error);
2097 } 2381 }
2382 return rtn;
2098} 2383}
2099 2384
2100static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, 2385static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
2101 struct CommandControlBlock *poll_ccb) 2386 struct CommandControlBlock *poll_ccb)
2102{ 2387{
2103 struct MessageUnit_B *reg = acb->pmuB; 2388 struct MessageUnit_B *reg = acb->pmuB;
2104 struct CommandControlBlock *ccb; 2389 struct ARCMSR_CDB *arcmsr_cdb;
2105 uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0; 2390 struct CommandControlBlock *ccb;
2106 int index; 2391 uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
2107 2392 int index, rtn;
2393 bool error;
2108 polling_hbb_ccb_retry: 2394 polling_hbb_ccb_retry:
2109 poll_count++; 2395 poll_count++;
2110 /* clear doorbell interrupt */ 2396 /* clear doorbell interrupt */
2111 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg); 2397 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
2112 while (1) { 2398 while(1){
2113 index = reg->doneq_index; 2399 index = reg->doneq_index;
2114 if ((flag_ccb = readl(&reg->done_qbuffer[index])) == 0) { 2400 if ((flag_ccb = readl(&reg->done_qbuffer[index])) == 0) {
2115 if (poll_ccb_done) 2401 if (poll_ccb_done){
2402 rtn = SUCCESS;
2403 break;
2404 }else {
2405 msleep(25);
2406 if (poll_count > 100){
2407 rtn = FAILED;
2116 break; 2408 break;
2117 else {
2118 msleep(25);
2119 if (poll_count > 100)
2120 break;
2121 goto polling_hbb_ccb_retry;
2122 } 2409 }
2410 goto polling_hbb_ccb_retry;
2411 }
2412 }
2413 writel(0, &reg->done_qbuffer[index]);
2414 index++;
2415 /*if last index number set it to 0 */
2416 index %= ARCMSR_MAX_HBB_POSTQUEUE;
2417 reg->doneq_index = index;
2418 /* check if command done with no error*/
2419 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
2420 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
2421 poll_ccb_done = (ccb == poll_ccb) ? 1:0;
2422 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
2423 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
2424 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
2425 " poll command abort successfully \n"
2426 ,acb->host->host_no
2427 ,ccb->pcmd->device->id
2428 ,ccb->pcmd->device->lun
2429 ,ccb);
2430 ccb->pcmd->result = DID_ABORT << 16;
2431 arcmsr_ccb_complete(ccb);
2432 continue;
2123 } 2433 }
2124 writel(0, &reg->done_qbuffer[index]); 2434 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
2125 index++; 2435 " command done ccb = '0x%p'"
2126 /*if last index number set it to 0 */ 2436 "ccboutstandingcount = %d \n"
2127 index %= ARCMSR_MAX_HBB_POSTQUEUE; 2437 , acb->host->host_no
2128 reg->doneq_index = index; 2438 , ccb
2129 /* check ifcommand done with no error*/ 2439 , atomic_read(&acb->ccboutstandingcount));
2130 ccb = (struct CommandControlBlock *)\ 2440 continue;
2131 (acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/ 2441 }
2132 poll_ccb_done = (ccb == poll_ccb) ? 1:0; 2442 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
2133 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { 2443 arcmsr_report_ccb_state(acb, ccb, error);
2134 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) { 2444 }
2135 printk(KERN_NOTICE "arcmsr%d: \ 2445 return rtn;
2136 scsi id = %d lun = %d ccb = '0x%p' poll command abort successfully \n" 2446}
2137 ,acb->host->host_no 2447
2138 ,ccb->pcmd->device->id 2448static int arcmsr_polling_hbc_ccbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_ccb)
2139 ,ccb->pcmd->device->lun 2449{
2140 ,ccb); 2450 struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
2141 ccb->pcmd->result = DID_ABORT << 16; 2451 uint32_t flag_ccb, ccb_cdb_phy;
2142 arcmsr_ccb_complete(ccb, 1); 2452 struct ARCMSR_CDB *arcmsr_cdb;
2143 continue; 2453 bool error;
2454 struct CommandControlBlock *pCCB;
2455 uint32_t poll_ccb_done = 0, poll_count = 0;
2456 int rtn;
2457polling_hbc_ccb_retry:
2458 poll_count++;
2459 while (1) {
2460 if ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) == 0) {
2461 if (poll_ccb_done) {
2462 rtn = SUCCESS;
2463 break;
2464 } else {
2465 msleep(25);
2466 if (poll_count > 100) {
2467 rtn = FAILED;
2468 break;
2144 } 2469 }
2145 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb" 2470 goto polling_hbc_ccb_retry;
2146 " command done ccb = '0x%p'" 2471 }
2147 "ccboutstandingcount = %d \n" 2472 }
2473 flag_ccb = readl(&reg->outbound_queueport_low);
2474 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
2475 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);/*frame must be 32 bytes aligned*/
2476 pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
2477 poll_ccb_done = (pCCB == poll_ccb) ? 1 : 0;
2478 /* check ifcommand done with no error*/
2479 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
2480 if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
2481 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
2482 " poll command abort successfully \n"
2148 , acb->host->host_no 2483 , acb->host->host_no
2149 , ccb 2484 , pCCB->pcmd->device->id
2150 , atomic_read(&acb->ccboutstandingcount)); 2485 , pCCB->pcmd->device->lun
2486 , pCCB);
2487 pCCB->pcmd->result = DID_ABORT << 16;
2488 arcmsr_ccb_complete(pCCB);
2151 continue; 2489 continue;
2152 } 2490 }
2153 arcmsr_report_ccb_state(acb, ccb, flag_ccb); 2491 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
2154 } /*drain reply FIFO*/ 2492 " command done ccb = '0x%p'"
2493 "ccboutstandingcount = %d \n"
2494 , acb->host->host_no
2495 , pCCB
2496 , atomic_read(&acb->ccboutstandingcount));
2497 continue;
2498 }
2499 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
2500 arcmsr_report_ccb_state(acb, pCCB, error);
2501 }
2502 return rtn;
2155} 2503}
2156 2504static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
2157static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
2158 struct CommandControlBlock *poll_ccb) 2505 struct CommandControlBlock *poll_ccb)
2159{ 2506{
2507 int rtn = 0;
2160 switch (acb->adapter_type) { 2508 switch (acb->adapter_type) {
2161 2509
2162 case ACB_ADAPTER_TYPE_A: { 2510 case ACB_ADAPTER_TYPE_A: {
2163 arcmsr_polling_hba_ccbdone(acb,poll_ccb); 2511 rtn = arcmsr_polling_hba_ccbdone(acb, poll_ccb);
2164 } 2512 }
2165 break; 2513 break;
2166 2514
2167 case ACB_ADAPTER_TYPE_B: { 2515 case ACB_ADAPTER_TYPE_B: {
2168 arcmsr_polling_hbb_ccbdone(acb,poll_ccb); 2516 rtn = arcmsr_polling_hbb_ccbdone(acb, poll_ccb);
2517 }
2518 break;
2519 case ACB_ADAPTER_TYPE_C: {
2520 rtn = arcmsr_polling_hbc_ccbdone(acb, poll_ccb);
2169 } 2521 }
2170 } 2522 }
2523 return rtn;
2171} 2524}
2172 2525
2173static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) 2526static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
2174{ 2527{
2175 uint32_t cdb_phyaddr, ccb_phyaddr_hi32; 2528 uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
2176 dma_addr_t dma_coherent_handle; 2529 dma_addr_t dma_coherent_handle;
2177 /* 2530 /*
2178 ******************************************************************** 2531 ********************************************************************
@@ -2182,7 +2535,8 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
2182 */ 2535 */
2183 dma_coherent_handle = acb->dma_coherent_handle; 2536 dma_coherent_handle = acb->dma_coherent_handle;
2184 cdb_phyaddr = (uint32_t)(dma_coherent_handle); 2537 cdb_phyaddr = (uint32_t)(dma_coherent_handle);
2185 ccb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16); 2538 cdb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16);
2539 acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
2186 /* 2540 /*
2187 *********************************************************************** 2541 ***********************************************************************
2188 ** if adapter type B, set window of "post command Q" 2542 ** if adapter type B, set window of "post command Q"
@@ -2191,16 +2545,16 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
2191 switch (acb->adapter_type) { 2545 switch (acb->adapter_type) {
2192 2546
2193 case ACB_ADAPTER_TYPE_A: { 2547 case ACB_ADAPTER_TYPE_A: {
2194 if (ccb_phyaddr_hi32 != 0) { 2548 if (cdb_phyaddr_hi32 != 0) {
2195 struct MessageUnit_A __iomem *reg = acb->pmuA; 2549 struct MessageUnit_A __iomem *reg = acb->pmuA;
2196 uint32_t intmask_org; 2550 uint32_t intmask_org;
2197 intmask_org = arcmsr_disable_outbound_ints(acb); 2551 intmask_org = arcmsr_disable_outbound_ints(acb);
2198 writel(ARCMSR_SIGNATURE_SET_CONFIG, \ 2552 writel(ARCMSR_SIGNATURE_SET_CONFIG, \
2199 &reg->message_rwbuffer[0]); 2553 &reg->message_rwbuffer[0]);
2200 writel(ccb_phyaddr_hi32, &reg->message_rwbuffer[1]); 2554 writel(cdb_phyaddr_hi32, &reg->message_rwbuffer[1]);
2201 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \ 2555 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \
2202 &reg->inbound_msgaddr0); 2556 &reg->inbound_msgaddr0);
2203 if (arcmsr_hba_wait_msgint_ready(acb)) { 2557 if (!arcmsr_hba_wait_msgint_ready(acb)) {
2204 printk(KERN_NOTICE "arcmsr%d: ""set ccb high \ 2558 printk(KERN_NOTICE "arcmsr%d: ""set ccb high \
2205 part physical address timeout\n", 2559 part physical address timeout\n",
2206 acb->host->host_no); 2560 acb->host->host_no);
@@ -2220,19 +2574,18 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
2220 intmask_org = arcmsr_disable_outbound_ints(acb); 2574 intmask_org = arcmsr_disable_outbound_ints(acb);
2221 reg->postq_index = 0; 2575 reg->postq_index = 0;
2222 reg->doneq_index = 0; 2576 reg->doneq_index = 0;
2223 writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell_reg); 2577 writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell);
2224 if (arcmsr_hbb_wait_msgint_ready(acb)) { 2578 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
2225 printk(KERN_NOTICE "arcmsr%d:can not set diver mode\n", \ 2579 printk(KERN_NOTICE "arcmsr%d:can not set diver mode\n", \
2226 acb->host->host_no); 2580 acb->host->host_no);
2227 return 1; 2581 return 1;
2228 } 2582 }
2229 post_queue_phyaddr = cdb_phyaddr + ARCMSR_MAX_FREECCB_NUM * \ 2583 post_queue_phyaddr = acb->dma_coherent_handle_hbb_mu;
2230 sizeof(struct CommandControlBlock) + offsetof(struct MessageUnit_B, post_qbuffer) ; 2584 rwbuffer = reg->message_rwbuffer;
2231 rwbuffer = reg->msgcode_rwbuffer_reg;
2232 /* driver "set config" signature */ 2585 /* driver "set config" signature */
2233 writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++); 2586 writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
2234 /* normal should be zero */ 2587 /* normal should be zero */
2235 writel(ccb_phyaddr_hi32, rwbuffer++); 2588 writel(cdb_phyaddr_hi32, rwbuffer++);
2236 /* postQ size (256 + 8)*4 */ 2589 /* postQ size (256 + 8)*4 */
2237 writel(post_queue_phyaddr, rwbuffer++); 2590 writel(post_queue_phyaddr, rwbuffer++);
2238 /* doneQ size (256 + 8)*4 */ 2591 /* doneQ size (256 + 8)*4 */
@@ -2240,22 +2593,37 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
2240 /* ccb maxQ size must be --> [(256 + 8)*4]*/ 2593 /* ccb maxQ size must be --> [(256 + 8)*4]*/
2241 writel(1056, rwbuffer); 2594 writel(1056, rwbuffer);
2242 2595
2243 writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell_reg); 2596 writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell);
2244 if (arcmsr_hbb_wait_msgint_ready(acb)) { 2597 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
2245 printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \ 2598 printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
2246 timeout \n",acb->host->host_no); 2599 timeout \n",acb->host->host_no);
2247 return 1; 2600 return 1;
2248 } 2601 }
2249 2602 arcmsr_hbb_enable_driver_mode(acb);
2250 writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell_reg);
2251 if (arcmsr_hbb_wait_msgint_ready(acb)) {
2252 printk(KERN_NOTICE "arcmsr%d: 'can not set diver mode \n"\
2253 ,acb->host->host_no);
2254 return 1;
2255 }
2256 arcmsr_enable_outbound_ints(acb, intmask_org); 2603 arcmsr_enable_outbound_ints(acb, intmask_org);
2257 } 2604 }
2258 break; 2605 break;
2606 case ACB_ADAPTER_TYPE_C: {
2607 if (cdb_phyaddr_hi32 != 0) {
2608 struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
2609
2610 if (cdb_phyaddr_hi32 != 0) {
2611 unsigned char Retries = 0x00;
2612 do {
2613 printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x \n", acb->adapter_index, cdb_phyaddr_hi32);
2614 } while (Retries++ < 100);
2615 }
2616 writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->msgcode_rwbuffer[0]);
2617 writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[1]);
2618 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
2619 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
2620 if (!arcmsr_hbc_wait_msgint_ready(acb)) {
2621 printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
2622 timeout \n", acb->host->host_no);
2623 return 1;
2624 }
2625 }
2626 }
2259 } 2627 }
2260 return 0; 2628 return 0;
2261} 2629}
@@ -2263,7 +2631,6 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
2263static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb) 2631static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
2264{ 2632{
2265 uint32_t firmware_state = 0; 2633 uint32_t firmware_state = 0;
2266
2267 switch (acb->adapter_type) { 2634 switch (acb->adapter_type) {
2268 2635
2269 case ACB_ADAPTER_TYPE_A: { 2636 case ACB_ADAPTER_TYPE_A: {
@@ -2277,62 +2644,81 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
2277 case ACB_ADAPTER_TYPE_B: { 2644 case ACB_ADAPTER_TYPE_B: {
2278 struct MessageUnit_B *reg = acb->pmuB; 2645 struct MessageUnit_B *reg = acb->pmuB;
2279 do { 2646 do {
2280 firmware_state = readl(reg->iop2drv_doorbell_reg); 2647 firmware_state = readl(reg->iop2drv_doorbell);
2281 } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0); 2648 } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
2282 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg); 2649 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
2283 } 2650 }
2284 break; 2651 break;
2652 case ACB_ADAPTER_TYPE_C: {
2653 struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
2654 do {
2655 firmware_state = readl(&reg->outbound_msgaddr1);
2656 } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
2657 }
2285 } 2658 }
2286} 2659}
2287 2660
2288static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb) 2661static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb)
2289{ 2662{
2290 struct MessageUnit_A __iomem *reg = acb->pmuA; 2663 struct MessageUnit_A __iomem *reg = acb->pmuA;
2291 2664 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
2292 if (unlikely(atomic_read(&acb->rq_map_token) == 0)) { 2665 return;
2293 acb->fw_state = false;
2294 } else { 2666 } else {
2295 /*to prevent rq_map_token from changing by other interrupt, then 2667 acb->fw_flag = FW_NORMAL;
2296 avoid the dead-lock*/ 2668 if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)){
2297 acb->fw_state = true;
2298 atomic_dec(&acb->rq_map_token);
2299 if (!(acb->fw_state) ||
2300 (acb->ante_token_value == atomic_read(&acb->rq_map_token))) {
2301 atomic_set(&acb->rq_map_token, 16); 2669 atomic_set(&acb->rq_map_token, 16);
2302 } 2670 }
2303 acb->ante_token_value = atomic_read(&acb->rq_map_token); 2671 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
2672 if (atomic_dec_and_test(&acb->rq_map_token))
2673 return;
2304 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0); 2674 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
2675 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2305 } 2676 }
2306 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6000));
2307 return; 2677 return;
2308} 2678}
2309 2679
2310static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb) 2680static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb)
2311{ 2681{
2312 struct MessageUnit_B __iomem *reg = acb->pmuB; 2682 struct MessageUnit_B __iomem *reg = acb->pmuB;
2683 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
2684 return;
2685 } else {
2686 acb->fw_flag = FW_NORMAL;
2687 if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
2688 atomic_set(&acb->rq_map_token,16);
2689 }
2690 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
2691 if(atomic_dec_and_test(&acb->rq_map_token))
2692 return;
2693 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
2694 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2695 }
2696 return;
2697}
2313 2698
2314 if (unlikely(atomic_read(&acb->rq_map_token) == 0)) { 2699static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb)
2315 acb->fw_state = false; 2700{
2701 struct MessageUnit_C __iomem *reg = acb->pmuC;
2702 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) {
2703 return;
2316 } else { 2704 } else {
2317 /*to prevent rq_map_token from changing by other interrupt, then 2705 acb->fw_flag = FW_NORMAL;
2318 avoid the dead-lock*/ 2706 if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
2319 acb->fw_state = true;
2320 atomic_dec(&acb->rq_map_token);
2321 if (!(acb->fw_state) ||
2322 (acb->ante_token_value == atomic_read(&acb->rq_map_token))) {
2323 atomic_set(&acb->rq_map_token, 16); 2707 atomic_set(&acb->rq_map_token, 16);
2324 } 2708 }
2325 acb->ante_token_value = atomic_read(&acb->rq_map_token); 2709 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
2326 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg); 2710 if (atomic_dec_and_test(&acb->rq_map_token))
2711 return;
2712 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
2713 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
2714 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2327 } 2715 }
2328 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6000));
2329 return; 2716 return;
2330} 2717}
2331 2718
2332static void arcmsr_request_device_map(unsigned long pacb) 2719static void arcmsr_request_device_map(unsigned long pacb)
2333{ 2720{
2334 struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb; 2721 struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb;
2335
2336 switch (acb->adapter_type) { 2722 switch (acb->adapter_type) {
2337 case ACB_ADAPTER_TYPE_A: { 2723 case ACB_ADAPTER_TYPE_A: {
2338 arcmsr_request_hba_device_map(acb); 2724 arcmsr_request_hba_device_map(acb);
@@ -2342,6 +2728,9 @@ static void arcmsr_request_device_map(unsigned long pacb)
2342 arcmsr_request_hbb_device_map(acb); 2728 arcmsr_request_hbb_device_map(acb);
2343 } 2729 }
2344 break; 2730 break;
2731 case ACB_ADAPTER_TYPE_C: {
2732 arcmsr_request_hbc_device_map(acb);
2733 }
2345 } 2734 }
2346} 2735}
2347 2736
@@ -2350,7 +2739,7 @@ static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
2350 struct MessageUnit_A __iomem *reg = acb->pmuA; 2739 struct MessageUnit_A __iomem *reg = acb->pmuA;
2351 acb->acb_flags |= ACB_F_MSG_START_BGRB; 2740 acb->acb_flags |= ACB_F_MSG_START_BGRB;
2352 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0); 2741 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
2353 if (arcmsr_hba_wait_msgint_ready(acb)) { 2742 if (!arcmsr_hba_wait_msgint_ready(acb)) {
2354 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ 2743 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
2355 rebulid' timeout \n", acb->host->host_no); 2744 rebulid' timeout \n", acb->host->host_no);
2356 } 2745 }
@@ -2360,13 +2749,25 @@ static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
2360{ 2749{
2361 struct MessageUnit_B *reg = acb->pmuB; 2750 struct MessageUnit_B *reg = acb->pmuB;
2362 acb->acb_flags |= ACB_F_MSG_START_BGRB; 2751 acb->acb_flags |= ACB_F_MSG_START_BGRB;
2363 writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell_reg); 2752 writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell);
2364 if (arcmsr_hbb_wait_msgint_ready(acb)) { 2753 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
2365 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ 2754 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
2366 rebulid' timeout \n",acb->host->host_no); 2755 rebulid' timeout \n",acb->host->host_no);
2367 } 2756 }
2368} 2757}
2369 2758
2759static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *pACB)
2760{
2761 struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC;
2762 pACB->acb_flags |= ACB_F_MSG_START_BGRB;
2763 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &phbcmu->inbound_msgaddr0);
2764 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell);
2765 if (!arcmsr_hbc_wait_msgint_ready(pACB)) {
2766 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
2767 rebulid' timeout \n", pACB->host->host_no);
2768 }
2769 return;
2770}
2370static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb) 2771static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
2371{ 2772{
2372 switch (acb->adapter_type) { 2773 switch (acb->adapter_type) {
@@ -2376,6 +2777,8 @@ static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
2376 case ACB_ADAPTER_TYPE_B: 2777 case ACB_ADAPTER_TYPE_B:
2377 arcmsr_start_hbb_bgrb(acb); 2778 arcmsr_start_hbb_bgrb(acb);
2378 break; 2779 break;
2780 case ACB_ADAPTER_TYPE_C:
2781 arcmsr_start_hbc_bgrb(acb);
2379 } 2782 }
2380} 2783}
2381 2784
@@ -2396,11 +2799,19 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
2396 case ACB_ADAPTER_TYPE_B: { 2799 case ACB_ADAPTER_TYPE_B: {
2397 struct MessageUnit_B *reg = acb->pmuB; 2800 struct MessageUnit_B *reg = acb->pmuB;
2398 /*clear interrupt and message state*/ 2801 /*clear interrupt and message state*/
2399 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg); 2802 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
2400 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg); 2803 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
2401 /* let IOP know data has been read */ 2804 /* let IOP know data has been read */
2402 } 2805 }
2403 break; 2806 break;
2807 case ACB_ADAPTER_TYPE_C: {
2808 struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
2809 uint32_t outbound_doorbell;
2810 /* empty doorbell Qbuffer if door bell ringed */
2811 outbound_doorbell = readl(&reg->outbound_doorbell);
2812 writel(outbound_doorbell, &reg->outbound_doorbell_clear);
2813 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
2814 }
2404 } 2815 }
2405} 2816}
2406 2817
@@ -2412,13 +2823,15 @@ static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
2412 case ACB_ADAPTER_TYPE_B: 2823 case ACB_ADAPTER_TYPE_B:
2413 { 2824 {
2414 struct MessageUnit_B *reg = acb->pmuB; 2825 struct MessageUnit_B *reg = acb->pmuB;
2415 writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell_reg); 2826 writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell);
2416 if(arcmsr_hbb_wait_msgint_ready(acb)) { 2827 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
2417 printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT"); 2828 printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT");
2418 return; 2829 return;
2419 } 2830 }
2420 } 2831 }
2421 break; 2832 break;
2833 case ACB_ADAPTER_TYPE_C:
2834 return;
2422 } 2835 }
2423 return; 2836 return;
2424} 2837}
@@ -2426,15 +2839,33 @@ static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
2426static void arcmsr_hardware_reset(struct AdapterControlBlock *acb) 2839static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
2427{ 2840{
2428 uint8_t value[64]; 2841 uint8_t value[64];
2429 int i; 2842 int i, count = 0;
2430 2843 struct MessageUnit_A __iomem *pmuA = acb->pmuA;
2844 struct MessageUnit_C __iomem *pmuC = acb->pmuC;
2845 u32 temp = 0;
2431 /* backup pci config data */ 2846 /* backup pci config data */
2847 printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no);
2432 for (i = 0; i < 64; i++) { 2848 for (i = 0; i < 64; i++) {
2433 pci_read_config_byte(acb->pdev, i, &value[i]); 2849 pci_read_config_byte(acb->pdev, i, &value[i]);
2434 } 2850 }
2435 /* hardware reset signal */ 2851 /* hardware reset signal */
2436 pci_write_config_byte(acb->pdev, 0x84, 0x20); 2852 if ((acb->dev_id == 0x1680)) {
2437 msleep(1000); 2853 writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]);
2854 } else if ((acb->dev_id == 0x1880)) {
2855 do {
2856 count++;
2857 writel(0xF, &pmuC->write_sequence);
2858 writel(0x4, &pmuC->write_sequence);
2859 writel(0xB, &pmuC->write_sequence);
2860 writel(0x2, &pmuC->write_sequence);
2861 writel(0x7, &pmuC->write_sequence);
2862 writel(0xD, &pmuC->write_sequence);
2863 } while ((((temp = readl(&pmuC->host_diagnostic)) | ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
2864 writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic);
2865 } else {
2866 pci_write_config_byte(acb->pdev, 0x84, 0x20);
2867 }
2868 msleep(2000);
2438 /* write back pci config data */ 2869 /* write back pci config data */
2439 for (i = 0; i < 64; i++) { 2870 for (i = 0; i < 64; i++) {
2440 pci_write_config_byte(acb->pdev, i, value[i]); 2871 pci_write_config_byte(acb->pdev, i, value[i]);
@@ -2442,50 +2873,13 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
2442 msleep(1000); 2873 msleep(1000);
2443 return; 2874 return;
2444} 2875}
2445/*
2446****************************************************************************
2447****************************************************************************
2448*/
2449#ifdef CONFIG_SCSI_ARCMSR_RESET
2450 int arcmsr_sleep_for_bus_reset(struct scsi_cmnd *cmd)
2451 {
2452 struct Scsi_Host *shost = NULL;
2453 spinlock_t *host_lock = NULL;
2454 int i, isleep;
2455
2456 shost = cmd->device->host;
2457 host_lock = shost->host_lock;
2458
2459 printk(KERN_NOTICE "Host %d bus reset over, sleep %d seconds (busy %d, can queue %d) ...........\n",
2460 shost->host_no, sleeptime, shost->host_busy, shost->can_queue);
2461 isleep = sleeptime / 10;
2462 spin_unlock_irq(host_lock);
2463 if (isleep > 0) {
2464 for (i = 0; i < isleep; i++) {
2465 msleep(10000);
2466 printk(KERN_NOTICE "^%d^\n", i);
2467 }
2468 }
2469
2470 isleep = sleeptime % 10;
2471 if (isleep > 0) {
2472 msleep(isleep * 1000);
2473 printk(KERN_NOTICE "^v^\n");
2474 }
2475 spin_lock_irq(host_lock);
2476 printk(KERN_NOTICE "***** wake up *****\n");
2477 return 0;
2478 }
2479#endif
2480static void arcmsr_iop_init(struct AdapterControlBlock *acb) 2876static void arcmsr_iop_init(struct AdapterControlBlock *acb)
2481{ 2877{
2482 uint32_t intmask_org; 2878 uint32_t intmask_org;
2483 2879 /* disable all outbound interrupt */
2484 /* disable all outbound interrupt */ 2880 intmask_org = arcmsr_disable_outbound_ints(acb);
2485 intmask_org = arcmsr_disable_outbound_ints(acb);
2486 arcmsr_wait_firmware_ready(acb); 2881 arcmsr_wait_firmware_ready(acb);
2487 arcmsr_iop_confirm(acb); 2882 arcmsr_iop_confirm(acb);
2488 arcmsr_get_firmware_spec(acb, 1);
2489 /*start background rebuild*/ 2883 /*start background rebuild*/
2490 arcmsr_start_adapter_bgrb(acb); 2884 arcmsr_start_adapter_bgrb(acb);
2491 /* empty doorbell Qbuffer if door bell ringed */ 2885 /* empty doorbell Qbuffer if door bell ringed */
@@ -2502,20 +2896,17 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
2502 uint32_t intmask_org; 2896 uint32_t intmask_org;
2503 uint8_t rtnval = 0x00; 2897 uint8_t rtnval = 0x00;
2504 int i = 0; 2898 int i = 0;
2505
2506 if (atomic_read(&acb->ccboutstandingcount) != 0) { 2899 if (atomic_read(&acb->ccboutstandingcount) != 0) {
2507 /* disable all outbound interrupt */ 2900 /* disable all outbound interrupt */
2508 intmask_org = arcmsr_disable_outbound_ints(acb); 2901 intmask_org = arcmsr_disable_outbound_ints(acb);
2509 /* talk to iop 331 outstanding command aborted */ 2902 /* talk to iop 331 outstanding command aborted */
2510 rtnval = arcmsr_abort_allcmd(acb); 2903 rtnval = arcmsr_abort_allcmd(acb);
2511 /* wait for 3 sec for all command aborted*/
2512 ssleep(3);
2513 /* clear all outbound posted Q */ 2904 /* clear all outbound posted Q */
2514 arcmsr_done4abort_postqueue(acb); 2905 arcmsr_done4abort_postqueue(acb);
2515 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { 2906 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2516 ccb = acb->pccb_pool[i]; 2907 ccb = acb->pccb_pool[i];
2517 if (ccb->startdone == ARCMSR_CCB_START) { 2908 if (ccb->startdone == ARCMSR_CCB_START) {
2518 arcmsr_ccb_complete(ccb, 1); 2909 arcmsr_ccb_complete(ccb);
2519 } 2910 }
2520 } 2911 }
2521 atomic_set(&acb->ccboutstandingcount, 0); 2912 atomic_set(&acb->ccboutstandingcount, 0);
@@ -2530,94 +2921,190 @@ static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
2530{ 2921{
2531 struct AdapterControlBlock *acb = 2922 struct AdapterControlBlock *acb =
2532 (struct AdapterControlBlock *)cmd->device->host->hostdata; 2923 (struct AdapterControlBlock *)cmd->device->host->hostdata;
2533 int retry = 0; 2924 uint32_t intmask_org, outbound_doorbell;
2534 2925 int retry_count = 0;
2535 if (acb->acb_flags & ACB_F_BUS_RESET) 2926 int rtn = FAILED;
2536 return SUCCESS; 2927 acb = (struct AdapterControlBlock *) cmd->device->host->hostdata;
2537 2928 printk(KERN_ERR "arcmsr: executing bus reset eh.....num_resets = %d, num_aborts = %d \n", acb->num_resets, acb->num_aborts);
2538 printk(KERN_NOTICE "arcmsr%d: bus reset ..... \n", acb->adapter_index);
2539 acb->acb_flags |= ACB_F_BUS_RESET;
2540 acb->num_resets++; 2929 acb->num_resets++;
2541 while (atomic_read(&acb->ccboutstandingcount) != 0 && retry < 4) {
2542 arcmsr_interrupt(acb);
2543 retry++;
2544 }
2545 2930
2546 if (arcmsr_iop_reset(acb)) { 2931 switch(acb->adapter_type){
2547 switch (acb->adapter_type) { 2932 case ACB_ADAPTER_TYPE_A:{
2548 case ACB_ADAPTER_TYPE_A: { 2933 if (acb->acb_flags & ACB_F_BUS_RESET){
2549 printk(KERN_NOTICE "arcmsr%d: do hardware bus reset, num_resets = %d num_aborts = %d \n", 2934 long timeout;
2550 acb->adapter_index, acb->num_resets, acb->num_aborts); 2935 printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
2551 arcmsr_hardware_reset(acb); 2936 timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
2552 acb->acb_flags |= ACB_F_FIRMWARE_TRAP; 2937 if (timeout) {
2553 acb->acb_flags &= ~ACB_F_IOP_INITED; 2938 return SUCCESS;
2554 #ifdef CONFIG_SCSI_ARCMSR_RESET 2939 }
2555 struct MessageUnit_A __iomem *reg = acb->pmuA; 2940 }
2556 uint32_t intmask_org, outbound_doorbell; 2941 acb->acb_flags |= ACB_F_BUS_RESET;
2557 int retry_count = 0; 2942 if (!arcmsr_iop_reset(acb)) {
2943 struct MessageUnit_A __iomem *reg;
2944 reg = acb->pmuA;
2945 arcmsr_hardware_reset(acb);
2946 acb->acb_flags &= ~ACB_F_IOP_INITED;
2558sleep_again: 2947sleep_again:
2559 arcmsr_sleep_for_bus_reset(cmd); 2948 arcmsr_sleep_for_bus_reset(cmd);
2560 if ((readl(&reg->outbound_msgaddr1) & 2949 if ((readl(&reg->outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
2561 ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) { 2950 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d \n", acb->host->host_no, retry_count);
2562 printk(KERN_NOTICE "arcmsr%d: hardware bus reset and return busy, retry=%d \n", 2951 if (retry_count > retrycount) {
2563 acb->host->host_no, retry_count); 2952 acb->fw_flag = FW_DEADLOCK;
2564 if (retry_count > retrycount) { 2953 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!! \n", acb->host->host_no);
2565 printk(KERN_NOTICE "arcmsr%d: hardware bus reset and return busy, retry aborted \n", 2954 return FAILED;
2566 acb->host->host_no); 2955 }
2567 return SUCCESS; 2956 retry_count++;
2957 goto sleep_again;
2958 }
2959 acb->acb_flags |= ACB_F_IOP_INITED;
2960 /* disable all outbound interrupt */
2961 intmask_org = arcmsr_disable_outbound_ints(acb);
2962 arcmsr_get_firmware_spec(acb);
2963 arcmsr_start_adapter_bgrb(acb);
2964 /* clear Qbuffer if door bell ringed */
2965 outbound_doorbell = readl(&reg->outbound_doorbell);
2966 writel(outbound_doorbell, &reg->outbound_doorbell); /*clear interrupt */
2967 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
2968 /* enable outbound Post Queue,outbound doorbell Interrupt */
2969 arcmsr_enable_outbound_ints(acb, intmask_org);
2970 atomic_set(&acb->rq_map_token, 16);
2971 atomic_set(&acb->ante_token_value, 16);
2972 acb->fw_flag = FW_NORMAL;
2973 init_timer(&acb->eternal_timer);
2974 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
2975 acb->eternal_timer.data = (unsigned long) acb;
2976 acb->eternal_timer.function = &arcmsr_request_device_map;
2977 add_timer(&acb->eternal_timer);
2978 acb->acb_flags &= ~ACB_F_BUS_RESET;
2979 rtn = SUCCESS;
2980 printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
2981 } else {
2982 acb->acb_flags &= ~ACB_F_BUS_RESET;
2983 if (atomic_read(&acb->rq_map_token) == 0) {
2984 atomic_set(&acb->rq_map_token, 16);
2985 atomic_set(&acb->ante_token_value, 16);
2986 acb->fw_flag = FW_NORMAL;
2987 init_timer(&acb->eternal_timer);
2988 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
2989 acb->eternal_timer.data = (unsigned long) acb;
2990 acb->eternal_timer.function = &arcmsr_request_device_map;
2991 add_timer(&acb->eternal_timer);
2992 } else {
2993 atomic_set(&acb->rq_map_token, 16);
2994 atomic_set(&acb->ante_token_value, 16);
2995 acb->fw_flag = FW_NORMAL;
2996 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
2997 }
2998 rtn = SUCCESS;
2568 } 2999 }
2569 retry_count++; 3000 break;
2570 goto sleep_again; 3001 }
3002 case ACB_ADAPTER_TYPE_B:{
3003 acb->acb_flags |= ACB_F_BUS_RESET;
3004 if (!arcmsr_iop_reset(acb)) {
3005 acb->acb_flags &= ~ACB_F_BUS_RESET;
3006 rtn = FAILED;
3007 } else {
3008 acb->acb_flags &= ~ACB_F_BUS_RESET;
3009 if (atomic_read(&acb->rq_map_token) == 0) {
3010 atomic_set(&acb->rq_map_token, 16);
3011 atomic_set(&acb->ante_token_value, 16);
3012 acb->fw_flag = FW_NORMAL;
3013 init_timer(&acb->eternal_timer);
3014 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
3015 acb->eternal_timer.data = (unsigned long) acb;
3016 acb->eternal_timer.function = &arcmsr_request_device_map;
3017 add_timer(&acb->eternal_timer);
3018 } else {
3019 atomic_set(&acb->rq_map_token, 16);
3020 atomic_set(&acb->ante_token_value, 16);
3021 acb->fw_flag = FW_NORMAL;
3022 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
3023 }
3024 rtn = SUCCESS;
2571 } 3025 }
2572 acb->acb_flags &= ~ACB_F_FIRMWARE_TRAP; 3026 break;
2573 acb->acb_flags |= ACB_F_IOP_INITED;
2574 acb->acb_flags &= ~ACB_F_BUS_RESET;
2575 printk(KERN_NOTICE "arcmsr%d: hardware bus reset and reset ok \n",
2576 acb->host->host_no);
2577 /* disable all outbound interrupt */
2578 intmask_org = arcmsr_disable_outbound_ints(acb);
2579 arcmsr_get_firmware_spec(acb, 1);
2580 /*start background rebuild*/
2581 arcmsr_start_adapter_bgrb(acb);
2582 /* clear Qbuffer if door bell ringed */
2583 outbound_doorbell = readl(&reg->outbound_doorbell);
2584 writel(outbound_doorbell, &reg->outbound_doorbell); /*clear interrupt */
2585 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
2586 /* enable outbound Post Queue,outbound doorbell Interrupt */
2587 arcmsr_enable_outbound_ints(acb, intmask_org);
2588 atomic_set(&acb->rq_map_token, 16);
2589 init_timer(&acb->eternal_timer);
2590 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(20*HZ);
2591 acb->eternal_timer.data = (unsigned long) acb;
2592 acb->eternal_timer.function = &arcmsr_request_device_map;
2593 add_timer(&acb->eternal_timer);
2594 #endif
2595 } 3027 }
3028 case ACB_ADAPTER_TYPE_C:{
3029 if (acb->acb_flags & ACB_F_BUS_RESET) {
3030 long timeout;
3031 printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
3032 timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
3033 if (timeout) {
3034 return SUCCESS;
3035 }
3036 }
3037 acb->acb_flags |= ACB_F_BUS_RESET;
3038 if (!arcmsr_iop_reset(acb)) {
3039 struct MessageUnit_C __iomem *reg;
3040 reg = acb->pmuC;
3041 arcmsr_hardware_reset(acb);
3042 acb->acb_flags &= ~ACB_F_IOP_INITED;
3043sleep:
3044 arcmsr_sleep_for_bus_reset(cmd);
3045 if ((readl(&reg->host_diagnostic) & 0x04) != 0) {
3046 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d \n", acb->host->host_no, retry_count);
3047 if (retry_count > retrycount) {
3048 acb->fw_flag = FW_DEADLOCK;
3049 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!! \n", acb->host->host_no);
3050 return FAILED;
3051 }
3052 retry_count++;
3053 goto sleep;
3054 }
3055 acb->acb_flags |= ACB_F_IOP_INITED;
3056 /* disable all outbound interrupt */
3057 intmask_org = arcmsr_disable_outbound_ints(acb);
3058 arcmsr_get_firmware_spec(acb);
3059 arcmsr_start_adapter_bgrb(acb);
3060 /* clear Qbuffer if door bell ringed */
3061 outbound_doorbell = readl(&reg->outbound_doorbell);
3062 writel(outbound_doorbell, &reg->outbound_doorbell_clear); /*clear interrupt */
3063 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
3064 /* enable outbound Post Queue,outbound doorbell Interrupt */
3065 arcmsr_enable_outbound_ints(acb, intmask_org);
3066 atomic_set(&acb->rq_map_token, 16);
3067 atomic_set(&acb->ante_token_value, 16);
3068 acb->fw_flag = FW_NORMAL;
3069 init_timer(&acb->eternal_timer);
3070 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
3071 acb->eternal_timer.data = (unsigned long) acb;
3072 acb->eternal_timer.function = &arcmsr_request_device_map;
3073 add_timer(&acb->eternal_timer);
3074 acb->acb_flags &= ~ACB_F_BUS_RESET;
3075 rtn = SUCCESS;
3076 printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
3077 } else {
3078 acb->acb_flags &= ~ACB_F_BUS_RESET;
3079 if (atomic_read(&acb->rq_map_token) == 0) {
3080 atomic_set(&acb->rq_map_token, 16);
3081 atomic_set(&acb->ante_token_value, 16);
3082 acb->fw_flag = FW_NORMAL;
3083 init_timer(&acb->eternal_timer);
3084 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
3085 acb->eternal_timer.data = (unsigned long) acb;
3086 acb->eternal_timer.function = &arcmsr_request_device_map;
3087 add_timer(&acb->eternal_timer);
3088 } else {
3089 atomic_set(&acb->rq_map_token, 16);
3090 atomic_set(&acb->ante_token_value, 16);
3091 acb->fw_flag = FW_NORMAL;
3092 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
3093 }
3094 rtn = SUCCESS;
3095 }
2596 break; 3096 break;
2597 case ACB_ADAPTER_TYPE_B: {
2598 }
2599 } 3097 }
2600 } else {
2601 acb->acb_flags &= ~ACB_F_BUS_RESET;
2602 } 3098 }
2603 return SUCCESS; 3099 return rtn;
2604} 3100}
2605 3101
2606static void arcmsr_abort_one_cmd(struct AdapterControlBlock *acb, 3102static int arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
2607 struct CommandControlBlock *ccb) 3103 struct CommandControlBlock *ccb)
2608{ 3104{
2609 u32 intmask; 3105 int rtn;
2610 3106 rtn = arcmsr_polling_ccbdone(acb, ccb);
2611 ccb->startdone = ARCMSR_CCB_ABORTED; 3107 return rtn;
2612
2613 /*
2614 ** Wait for 3 sec for all command done.
2615 */
2616 ssleep(3);
2617
2618 intmask = arcmsr_disable_outbound_ints(acb);
2619 arcmsr_polling_ccbdone(acb, ccb);
2620 arcmsr_enable_outbound_ints(acb, intmask);
2621} 3108}
2622 3109
2623static int arcmsr_abort(struct scsi_cmnd *cmd) 3110static int arcmsr_abort(struct scsi_cmnd *cmd)
@@ -2625,10 +3112,11 @@ static int arcmsr_abort(struct scsi_cmnd *cmd)
2625 struct AdapterControlBlock *acb = 3112 struct AdapterControlBlock *acb =
2626 (struct AdapterControlBlock *)cmd->device->host->hostdata; 3113 (struct AdapterControlBlock *)cmd->device->host->hostdata;
2627 int i = 0; 3114 int i = 0;
2628 3115 int rtn = FAILED;
2629 printk(KERN_NOTICE 3116 printk(KERN_NOTICE
2630 "arcmsr%d: abort device command of scsi id = %d lun = %d \n", 3117 "arcmsr%d: abort device command of scsi id = %d lun = %d \n",
2631 acb->host->host_no, cmd->device->id, cmd->device->lun); 3118 acb->host->host_no, cmd->device->id, cmd->device->lun);
3119 acb->acb_flags |= ACB_F_ABORT;
2632 acb->num_aborts++; 3120 acb->num_aborts++;
2633 /* 3121 /*
2634 ************************************************ 3122 ************************************************
@@ -2637,17 +3125,18 @@ static int arcmsr_abort(struct scsi_cmnd *cmd)
2637 ************************************************ 3125 ************************************************
2638 */ 3126 */
2639 if (!atomic_read(&acb->ccboutstandingcount)) 3127 if (!atomic_read(&acb->ccboutstandingcount))
2640 return SUCCESS; 3128 return rtn;
2641 3129
2642 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { 3130 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2643 struct CommandControlBlock *ccb = acb->pccb_pool[i]; 3131 struct CommandControlBlock *ccb = acb->pccb_pool[i];
2644 if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) { 3132 if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
2645 arcmsr_abort_one_cmd(acb, ccb); 3133 ccb->startdone = ARCMSR_CCB_ABORTED;
3134 rtn = arcmsr_abort_one_cmd(acb, ccb);
2646 break; 3135 break;
2647 } 3136 }
2648 } 3137 }
2649 3138 acb->acb_flags &= ~ACB_F_ABORT;
2650 return SUCCESS; 3139 return rtn;
2651} 3140}
2652 3141
2653static const char *arcmsr_info(struct Scsi_Host *host) 3142static const char *arcmsr_info(struct Scsi_Host *host)
@@ -2657,7 +3146,6 @@ static const char *arcmsr_info(struct Scsi_Host *host)
2657 static char buf[256]; 3146 static char buf[256];
2658 char *type; 3147 char *type;
2659 int raid6 = 1; 3148 int raid6 = 1;
2660
2661 switch (acb->pdev->device) { 3149 switch (acb->pdev->device) {
2662 case PCI_DEVICE_ID_ARECA_1110: 3150 case PCI_DEVICE_ID_ARECA_1110:
2663 case PCI_DEVICE_ID_ARECA_1200: 3151 case PCI_DEVICE_ID_ARECA_1200:
@@ -2681,6 +3169,7 @@ static const char *arcmsr_info(struct Scsi_Host *host)
2681 case PCI_DEVICE_ID_ARECA_1381: 3169 case PCI_DEVICE_ID_ARECA_1381:
2682 case PCI_DEVICE_ID_ARECA_1680: 3170 case PCI_DEVICE_ID_ARECA_1680:
2683 case PCI_DEVICE_ID_ARECA_1681: 3171 case PCI_DEVICE_ID_ARECA_1681:
3172 case PCI_DEVICE_ID_ARECA_1880:
2684 type = "SAS"; 3173 type = "SAS";
2685 break; 3174 break;
2686 default: 3175 default:
diff --git a/drivers/scsi/be2iscsi/Kconfig b/drivers/scsi/be2iscsi/Kconfig
index 2952fcd008ea..84c275fb9f6b 100644
--- a/drivers/scsi/be2iscsi/Kconfig
+++ b/drivers/scsi/be2iscsi/Kconfig
@@ -1,6 +1,6 @@
1config BE2ISCSI 1config BE2ISCSI
2 tristate "ServerEngines' 10Gbps iSCSI - BladeEngine 2" 2 tristate "ServerEngines' 10Gbps iSCSI - BladeEngine 2"
3 depends on PCI && SCSI 3 depends on PCI && SCSI && NET
4 select SCSI_ISCSI_ATTRS 4 select SCSI_ISCSI_ATTRS
5 5
6 help 6 help
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index 136b49cea791..1cb8a5e85c7f 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -128,8 +128,8 @@ struct be_ctrl_info {
128#define mcc_timeout 120000 /* 5s timeout */ 128#define mcc_timeout 120000 /* 5s timeout */
129 129
130/* Returns number of pages spanned by the data starting at the given addr */ 130/* Returns number of pages spanned by the data starting at the given addr */
131#define PAGES_4K_SPANNED(_address, size) \ 131#define PAGES_4K_SPANNED(_address, size) \
132 ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \ 132 ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
133 (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K)) 133 (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
134 134
135/* Byte offset into the page corresponding to given address */ 135/* Byte offset into the page corresponding to given address */
@@ -137,7 +137,7 @@ struct be_ctrl_info {
137 ((size_t)(addr) & (PAGE_SIZE_4K-1)) 137 ((size_t)(addr) & (PAGE_SIZE_4K-1))
138 138
139/* Returns bit offset within a DWORD of a bitfield */ 139/* Returns bit offset within a DWORD of a bitfield */
140#define AMAP_BIT_OFFSET(_struct, field) \ 140#define AMAP_BIT_OFFSET(_struct, field) \
141 (((size_t)&(((_struct *)0)->field))%32) 141 (((size_t)&(((_struct *)0)->field))%32)
142 142
143/* Returns the bit mask of the field that is NOT shifted into location. */ 143/* Returns the bit mask of the field that is NOT shifted into location. */
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index cda6642c7368..7c7537335c88 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -19,6 +19,86 @@
19#include "be_mgmt.h" 19#include "be_mgmt.h"
20#include "be_main.h" 20#include "be_main.h"
21 21
22int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
23{
24 u32 sreset;
25 u8 *pci_reset_offset = 0;
26 u8 *pci_online0_offset = 0;
27 u8 *pci_online1_offset = 0;
28 u32 pconline0 = 0;
29 u32 pconline1 = 0;
30 u32 i;
31
32 pci_reset_offset = (u8 *)phba->pci_va + BE2_SOFT_RESET;
33 pci_online0_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE0;
34 pci_online1_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE1;
35 sreset = readl((void *)pci_reset_offset);
36 sreset |= BE2_SET_RESET;
37 writel(sreset, (void *)pci_reset_offset);
38
39 i = 0;
40 while (sreset & BE2_SET_RESET) {
41 if (i > 64)
42 break;
43 msleep(100);
44 sreset = readl((void *)pci_reset_offset);
45 i++;
46 }
47
48 if (sreset & BE2_SET_RESET) {
49 printk(KERN_ERR "Soft Reset did not deassert\n");
50 return -EIO;
51 }
52 pconline1 = BE2_MPU_IRAM_ONLINE;
53 writel(pconline0, (void *)pci_online0_offset);
54 writel(pconline1, (void *)pci_online1_offset);
55
56 sreset = BE2_SET_RESET;
57 writel(sreset, (void *)pci_reset_offset);
58
59 i = 0;
60 while (sreset & BE2_SET_RESET) {
61 if (i > 64)
62 break;
63 msleep(1);
64 sreset = readl((void *)pci_reset_offset);
65 i++;
66 }
67 if (sreset & BE2_SET_RESET) {
68 printk(KERN_ERR "MPU Online Soft Reset did not deassert\n");
69 return -EIO;
70 }
71 return 0;
72}
73
74int be_chk_reset_complete(struct beiscsi_hba *phba)
75{
76 unsigned int num_loop;
77 u8 *mpu_sem = 0;
78 u32 status;
79
80 num_loop = 1000;
81 mpu_sem = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
82 msleep(5000);
83
84 while (num_loop) {
85 status = readl((void *)mpu_sem);
86
87 if ((status & 0x80000000) || (status & 0x0000FFFF) == 0xC000)
88 break;
89 msleep(60);
90 num_loop--;
91 }
92
93 if ((status & 0x80000000) || (!num_loop)) {
94 printk(KERN_ERR "Failed in be_chk_reset_complete"
95 "status = 0x%x\n", status);
96 return -EIO;
97 }
98
99 return 0;
100}
101
22void be_mcc_notify(struct beiscsi_hba *phba) 102void be_mcc_notify(struct beiscsi_hba *phba)
23{ 103{
24 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; 104 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
@@ -98,7 +178,7 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
98 dev_err(&ctrl->pdev->dev, 178 dev_err(&ctrl->pdev->dev,
99 "error in cmd completion: status(compl/extd)=%d/%d\n", 179 "error in cmd completion: status(compl/extd)=%d/%d\n",
100 compl_status, extd_status); 180 compl_status, extd_status);
101 return -1; 181 return -EBUSY;
102 } 182 }
103 return 0; 183 return 0;
104} 184}
@@ -151,20 +231,20 @@ void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
151{ 231{
152 switch (evt->port_link_status) { 232 switch (evt->port_link_status) {
153 case ASYNC_EVENT_LINK_DOWN: 233 case ASYNC_EVENT_LINK_DOWN:
154 SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d \n", 234 SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d\n",
155 evt->physical_port); 235 evt->physical_port);
156 phba->state |= BE_ADAPTER_LINK_DOWN; 236 phba->state |= BE_ADAPTER_LINK_DOWN;
157 iscsi_host_for_each_session(phba->shost, 237 iscsi_host_for_each_session(phba->shost,
158 be2iscsi_fail_session); 238 be2iscsi_fail_session);
159 break; 239 break;
160 case ASYNC_EVENT_LINK_UP: 240 case ASYNC_EVENT_LINK_UP:
161 phba->state = BE_ADAPTER_UP; 241 phba->state = BE_ADAPTER_UP;
162 SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d \n", 242 SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d\n",
163 evt->physical_port); 243 evt->physical_port);
164 break; 244 break;
165 default: 245 default:
166 SE_DEBUG(DBG_LVL_1, "Unexpected Async Notification %d on" 246 SE_DEBUG(DBG_LVL_1, "Unexpected Async Notification %d on"
167 "Physical Port %d \n", 247 "Physical Port %d\n",
168 evt->port_link_status, 248 evt->port_link_status,
169 evt->physical_port); 249 evt->physical_port);
170 } 250 }
@@ -199,7 +279,7 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
199 else 279 else
200 SE_DEBUG(DBG_LVL_1, 280 SE_DEBUG(DBG_LVL_1,
201 " Unsupported Async Event, flags" 281 " Unsupported Async Event, flags"
202 " = 0x%08x \n", compl->flags); 282 " = 0x%08x\n", compl->flags);
203 283
204 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { 284 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
205 status = be_mcc_compl_process(ctrl, compl); 285 status = be_mcc_compl_process(ctrl, compl);
@@ -231,7 +311,7 @@ static int be_mcc_wait_compl(struct beiscsi_hba *phba)
231 } 311 }
232 if (i == mcc_timeout) { 312 if (i == mcc_timeout) {
233 dev_err(&phba->pcidev->dev, "mccq poll timed out\n"); 313 dev_err(&phba->pcidev->dev, "mccq poll timed out\n");
234 return -1; 314 return -EBUSY;
235 } 315 }
236 return 0; 316 return 0;
237} 317}
@@ -257,7 +337,7 @@ static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
257 337
258 if (cnt > 6000000) { 338 if (cnt > 6000000) {
259 dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n"); 339 dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n");
260 return -1; 340 return -EBUSY;
261 } 341 }
262 342
263 if (cnt > 50) { 343 if (cnt > 50) {
@@ -286,7 +366,7 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
286 366
287 status = be_mbox_db_ready_wait(ctrl); 367 status = be_mbox_db_ready_wait(ctrl);
288 if (status != 0) { 368 if (status != 0) {
289 SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed 1\n"); 369 SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed\n");
290 return status; 370 return status;
291 } 371 }
292 val = 0; 372 val = 0;
@@ -297,19 +377,19 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
297 377
298 status = be_mbox_db_ready_wait(ctrl); 378 status = be_mbox_db_ready_wait(ctrl);
299 if (status != 0) { 379 if (status != 0) {
300 SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed 2\n"); 380 SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed\n");
301 return status; 381 return status;
302 } 382 }
303 if (be_mcc_compl_is_new(compl)) { 383 if (be_mcc_compl_is_new(compl)) {
304 status = be_mcc_compl_process(ctrl, &mbox->compl); 384 status = be_mcc_compl_process(ctrl, &mbox->compl);
305 be_mcc_compl_use(compl); 385 be_mcc_compl_use(compl);
306 if (status) { 386 if (status) {
307 SE_DEBUG(DBG_LVL_1, "After be_mcc_compl_process \n"); 387 SE_DEBUG(DBG_LVL_1, "After be_mcc_compl_process\n");
308 return status; 388 return status;
309 } 389 }
310 } else { 390 } else {
311 dev_err(&ctrl->pdev->dev, "invalid mailbox completion\n"); 391 dev_err(&ctrl->pdev->dev, "invalid mailbox completion\n");
312 return -1; 392 return -EBUSY;
313 } 393 }
314 return 0; 394 return 0;
315} 395}
@@ -355,7 +435,7 @@ static int be_mbox_notify_wait(struct beiscsi_hba *phba)
355 return status; 435 return status;
356 } else { 436 } else {
357 dev_err(&phba->pcidev->dev, "invalid mailbox completion\n"); 437 dev_err(&phba->pcidev->dev, "invalid mailbox completion\n");
358 return -1; 438 return -EBUSY;
359 } 439 }
360 return 0; 440 return 0;
361} 441}
@@ -500,7 +580,7 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
500 580
501 status = be_mbox_notify(ctrl); 581 status = be_mbox_notify(ctrl);
502 if (status) 582 if (status)
503 SE_DEBUG(DBG_LVL_1, "be_cmd_fw_initialize Failed \n"); 583 SE_DEBUG(DBG_LVL_1, "be_cmd_fw_initialize Failed\n");
504 584
505 spin_unlock(&ctrl->mbox_lock); 585 spin_unlock(&ctrl->mbox_lock);
506 return status; 586 return status;
@@ -517,7 +597,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
517 void *ctxt = &req->context; 597 void *ctxt = &req->context;
518 int status; 598 int status;
519 599
520 SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_cq_create \n"); 600 SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_cq_create\n");
521 spin_lock(&ctrl->mbox_lock); 601 spin_lock(&ctrl->mbox_lock);
522 memset(wrb, 0, sizeof(*wrb)); 602 memset(wrb, 0, sizeof(*wrb));
523 603
@@ -550,7 +630,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
550 cq->id = le16_to_cpu(resp->cq_id); 630 cq->id = le16_to_cpu(resp->cq_id);
551 cq->created = true; 631 cq->created = true;
552 } else 632 } else
553 SE_DEBUG(DBG_LVL_1, "In be_cmd_cq_create, status=ox%08x \n", 633 SE_DEBUG(DBG_LVL_1, "In be_cmd_cq_create, status=ox%08x\n",
554 status); 634 status);
555 spin_unlock(&ctrl->mbox_lock); 635 spin_unlock(&ctrl->mbox_lock);
556 636
@@ -619,7 +699,7 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
619 u8 subsys = 0, opcode = 0; 699 u8 subsys = 0, opcode = 0;
620 int status; 700 int status;
621 701
622 SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_q_destroy \n"); 702 SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_q_destroy\n");
623 spin_lock(&ctrl->mbox_lock); 703 spin_lock(&ctrl->mbox_lock);
624 memset(wrb, 0, sizeof(*wrb)); 704 memset(wrb, 0, sizeof(*wrb));
625 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 705 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -652,7 +732,7 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
652 default: 732 default:
653 spin_unlock(&ctrl->mbox_lock); 733 spin_unlock(&ctrl->mbox_lock);
654 BUG(); 734 BUG();
655 return -1; 735 return -ENXIO;
656 } 736 }
657 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req)); 737 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
658 if (queue_type != QTYPE_SGL) 738 if (queue_type != QTYPE_SGL)
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 49fcc787ee8b..40641d0845f4 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -47,8 +47,8 @@ struct be_mcc_wrb {
47 47
48#define CQE_FLAGS_VALID_MASK (1 << 31) 48#define CQE_FLAGS_VALID_MASK (1 << 31)
49#define CQE_FLAGS_ASYNC_MASK (1 << 30) 49#define CQE_FLAGS_ASYNC_MASK (1 << 30)
50#define CQE_FLAGS_COMPLETED_MASK (1 << 28) 50#define CQE_FLAGS_COMPLETED_MASK (1 << 28)
51#define CQE_FLAGS_CONSUMED_MASK (1 << 27) 51#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
52 52
53/* Completion Status */ 53/* Completion Status */
54#define MCC_STATUS_SUCCESS 0x0 54#define MCC_STATUS_SUCCESS 0x0
@@ -56,7 +56,7 @@ struct be_mcc_wrb {
56#define CQE_STATUS_COMPL_MASK 0xFFFF 56#define CQE_STATUS_COMPL_MASK 0xFFFF
57#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */ 57#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
58#define CQE_STATUS_EXTD_MASK 0xFFFF 58#define CQE_STATUS_EXTD_MASK 0xFFFF
59#define CQE_STATUS_EXTD_SHIFT 0 /* bits 0 - 15 */ 59#define CQE_STATUS_EXTD_SHIFT 16 /* bits 0 - 15 */
60 60
61struct be_mcc_compl { 61struct be_mcc_compl {
62 u32 status; /* dword 0 */ 62 u32 status; /* dword 0 */
@@ -143,14 +143,14 @@ struct be_mcc_mailbox {
143 */ 143 */
144#define OPCODE_COMMON_CQ_CREATE 12 144#define OPCODE_COMMON_CQ_CREATE 12
145#define OPCODE_COMMON_EQ_CREATE 13 145#define OPCODE_COMMON_EQ_CREATE 13
146#define OPCODE_COMMON_MCC_CREATE 21 146#define OPCODE_COMMON_MCC_CREATE 21
147#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32 147#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
148#define OPCODE_COMMON_GET_FW_VERSION 35 148#define OPCODE_COMMON_GET_FW_VERSION 35
149#define OPCODE_COMMON_MODIFY_EQ_DELAY 41 149#define OPCODE_COMMON_MODIFY_EQ_DELAY 41
150#define OPCODE_COMMON_FIRMWARE_CONFIG 42 150#define OPCODE_COMMON_FIRMWARE_CONFIG 42
151#define OPCODE_COMMON_MCC_DESTROY 53 151#define OPCODE_COMMON_MCC_DESTROY 53
152#define OPCODE_COMMON_CQ_DESTROY 54 152#define OPCODE_COMMON_CQ_DESTROY 54
153#define OPCODE_COMMON_EQ_DESTROY 55 153#define OPCODE_COMMON_EQ_DESTROY 55
154#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58 154#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
155#define OPCODE_COMMON_FUNCTION_RESET 61 155#define OPCODE_COMMON_FUNCTION_RESET 61
156 156
@@ -164,9 +164,9 @@ struct be_mcc_mailbox {
164#define OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG 7 164#define OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG 7
165#define OPCODE_COMMON_ISCSI_SET_FRAGNUM_BITS_FOR_SGL_CRA 61 165#define OPCODE_COMMON_ISCSI_SET_FRAGNUM_BITS_FOR_SGL_CRA 61
166#define OPCODE_COMMON_ISCSI_DEFQ_CREATE 64 166#define OPCODE_COMMON_ISCSI_DEFQ_CREATE 64
167#define OPCODE_COMMON_ISCSI_DEFQ_DESTROY 65 167#define OPCODE_COMMON_ISCSI_DEFQ_DESTROY 65
168#define OPCODE_COMMON_ISCSI_WRBQ_CREATE 66 168#define OPCODE_COMMON_ISCSI_WRBQ_CREATE 66
169#define OPCODE_COMMON_ISCSI_WRBQ_DESTROY 67 169#define OPCODE_COMMON_ISCSI_WRBQ_DESTROY 67
170 170
171struct be_cmd_req_hdr { 171struct be_cmd_req_hdr {
172 u8 opcode; /* dword 0 */ 172 u8 opcode; /* dword 0 */
@@ -423,7 +423,7 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
423 struct be_queue_info *cq); 423 struct be_queue_info *cq);
424 424
425int be_poll_mcc(struct be_ctrl_info *ctrl); 425int be_poll_mcc(struct be_ctrl_info *ctrl);
426unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl, 426int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
427 struct beiscsi_hba *phba); 427 struct beiscsi_hba *phba);
428unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba); 428unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba);
429void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag); 429void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
@@ -875,7 +875,7 @@ struct be_fw_cfg {
875 */ 875 */
876#define UNSOL_HDR_NOTIFY 28 /* Unsolicited header notify.*/ 876#define UNSOL_HDR_NOTIFY 28 /* Unsolicited header notify.*/
877#define UNSOL_DATA_NOTIFY 29 /* Unsolicited data notify.*/ 877#define UNSOL_DATA_NOTIFY 29 /* Unsolicited data notify.*/
878#define UNSOL_DATA_DIGEST_ERROR_NOTIFY 30 /* Unsolicited data digest 878#define UNSOL_DATA_DIGEST_ERROR_NOTIFY 30 /* Unsolicited data digest
879 * error notify. 879 * error notify.
880 */ 880 */
881#define DRIVERMSG_NOTIFY 31 /* TCP acknowledge based 881#define DRIVERMSG_NOTIFY 31 /* TCP acknowledge based
@@ -901,6 +901,9 @@ struct be_fw_cfg {
901 * the cxn 901 * the cxn
902 */ 902 */
903 903
904int beiscsi_pci_soft_reset(struct beiscsi_hba *phba);
905int be_chk_reset_complete(struct beiscsi_hba *phba);
906
904void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len, 907void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
905 bool embedded, u8 sge_cnt); 908 bool embedded, u8 sge_cnt);
906 909
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index c3928cb8b042..6d63e7b312cf 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -52,7 +52,7 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
52 SE_DEBUG(DBG_LVL_8, "In beiscsi_session_create\n"); 52 SE_DEBUG(DBG_LVL_8, "In beiscsi_session_create\n");
53 53
54 if (!ep) { 54 if (!ep) {
55 SE_DEBUG(DBG_LVL_1, "beiscsi_session_create: invalid ep \n"); 55 SE_DEBUG(DBG_LVL_1, "beiscsi_session_create: invalid ep\n");
56 return NULL; 56 return NULL;
57 } 57 }
58 beiscsi_ep = ep->dd_data; 58 beiscsi_ep = ep->dd_data;
@@ -157,7 +157,7 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
157 "Connection table already occupied. Detected clash\n"); 157 "Connection table already occupied. Detected clash\n");
158 return -EINVAL; 158 return -EINVAL;
159 } else { 159 } else {
160 SE_DEBUG(DBG_LVL_8, "phba->conn_table[%d]=%p(beiscsi_conn) \n", 160 SE_DEBUG(DBG_LVL_8, "phba->conn_table[%d]=%p(beiscsi_conn)\n",
161 cid, beiscsi_conn); 161 cid, beiscsi_conn);
162 phba->conn_table[cid] = beiscsi_conn; 162 phba->conn_table[cid] = beiscsi_conn;
163 } 163 }
@@ -196,7 +196,7 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
196 196
197 if (beiscsi_ep->phba != phba) { 197 if (beiscsi_ep->phba != phba) {
198 SE_DEBUG(DBG_LVL_8, 198 SE_DEBUG(DBG_LVL_8,
199 "beiscsi_ep->hba=%p not equal to phba=%p \n", 199 "beiscsi_ep->hba=%p not equal to phba=%p\n",
200 beiscsi_ep->phba, phba); 200 beiscsi_ep->phba, phba);
201 return -EEXIST; 201 return -EEXIST;
202 } 202 }
@@ -204,7 +204,7 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
204 beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid; 204 beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid;
205 beiscsi_conn->ep = beiscsi_ep; 205 beiscsi_conn->ep = beiscsi_ep;
206 beiscsi_ep->conn = beiscsi_conn; 206 beiscsi_ep->conn = beiscsi_conn;
207 SE_DEBUG(DBG_LVL_8, "beiscsi_conn=%p conn=%p ep_cid=%d \n", 207 SE_DEBUG(DBG_LVL_8, "beiscsi_conn=%p conn=%p ep_cid=%d\n",
208 beiscsi_conn, conn, beiscsi_ep->ep_cid); 208 beiscsi_conn, conn, beiscsi_ep->ep_cid);
209 return beiscsi_bindconn_cid(phba, beiscsi_conn, beiscsi_ep->ep_cid); 209 return beiscsi_bindconn_cid(phba, beiscsi_conn, beiscsi_ep->ep_cid);
210} 210}
@@ -230,7 +230,7 @@ int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
230 if (!beiscsi_ep) { 230 if (!beiscsi_ep) {
231 SE_DEBUG(DBG_LVL_1, 231 SE_DEBUG(DBG_LVL_1,
232 "In beiscsi_conn_get_param , no beiscsi_ep\n"); 232 "In beiscsi_conn_get_param , no beiscsi_ep\n");
233 return -1; 233 return -ENODEV;
234 } 234 }
235 235
236 switch (param) { 236 switch (param) {
@@ -277,6 +277,10 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
277 if (session->max_burst > 262144) 277 if (session->max_burst > 262144)
278 session->max_burst = 262144; 278 session->max_burst = 262144;
279 break; 279 break;
280 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
281 if ((conn->max_xmit_dlength > 65536) ||
282 (conn->max_xmit_dlength == 0))
283 conn->max_xmit_dlength = 65536;
280 default: 284 default:
281 return 0; 285 return 0;
282 } 286 }
@@ -308,8 +312,8 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
308 case ISCSI_HOST_PARAM_HWADDRESS: 312 case ISCSI_HOST_PARAM_HWADDRESS:
309 tag = be_cmd_get_mac_addr(phba); 313 tag = be_cmd_get_mac_addr(phba);
310 if (!tag) { 314 if (!tag) {
311 SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed \n"); 315 SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n");
312 return -1; 316 return -EAGAIN;
313 } else 317 } else
314 wait_event_interruptible(phba->ctrl.mcc_wait[tag], 318 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
315 phba->ctrl.mcc_numtag[tag]); 319 phba->ctrl.mcc_numtag[tag]);
@@ -319,10 +323,10 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
319 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF; 323 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
320 if (status || extd_status) { 324 if (status || extd_status) {
321 SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed" 325 SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed"
322 " status = %d extd_status = %d \n", 326 " status = %d extd_status = %d\n",
323 status, extd_status); 327 status, extd_status);
324 free_mcc_tag(&phba->ctrl, tag); 328 free_mcc_tag(&phba->ctrl, tag);
325 return -1; 329 return -EAGAIN;
326 } else { 330 } else {
327 wrb = queue_get_wrb(mccq, wrb_num); 331 wrb = queue_get_wrb(mccq, wrb_num);
328 free_mcc_tag(&phba->ctrl, tag); 332 free_mcc_tag(&phba->ctrl, tag);
@@ -442,6 +446,31 @@ static int beiscsi_get_cid(struct beiscsi_hba *phba)
442} 446}
443 447
444/** 448/**
449 * beiscsi_put_cid - Free the cid
450 * @phba: The phba for which the cid is being freed
451 * @cid: The cid to free
452 */
453static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)
454{
455 phba->avlbl_cids++;
456 phba->cid_array[phba->cid_free++] = cid;
457 if (phba->cid_free == phba->params.cxns_per_ctrl)
458 phba->cid_free = 0;
459}
460
461/**
462 * beiscsi_free_ep - free endpoint
463 * @ep: pointer to iscsi endpoint structure
464 */
465static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep)
466{
467 struct beiscsi_hba *phba = beiscsi_ep->phba;
468
469 beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
470 beiscsi_ep->phba = NULL;
471}
472
473/**
445 * beiscsi_open_conn - Ask FW to open a TCP connection 474 * beiscsi_open_conn - Ask FW to open a TCP connection
446 * @ep: endpoint to be used 475 * @ep: endpoint to be used
447 * @src_addr: The source IP address 476 * @src_addr: The source IP address
@@ -459,8 +488,9 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
459 struct be_mcc_wrb *wrb; 488 struct be_mcc_wrb *wrb;
460 struct tcp_connect_and_offload_out *ptcpcnct_out; 489 struct tcp_connect_and_offload_out *ptcpcnct_out;
461 unsigned short status, extd_status; 490 unsigned short status, extd_status;
491 struct be_dma_mem nonemb_cmd;
462 unsigned int tag, wrb_num; 492 unsigned int tag, wrb_num;
463 int ret = -1; 493 int ret = -ENOMEM;
464 494
465 SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn\n"); 495 SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn\n");
466 beiscsi_ep->ep_cid = beiscsi_get_cid(phba); 496 beiscsi_ep->ep_cid = beiscsi_get_cid(phba);
@@ -468,22 +498,39 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
468 SE_DEBUG(DBG_LVL_1, "No free cid available\n"); 498 SE_DEBUG(DBG_LVL_1, "No free cid available\n");
469 return ret; 499 return ret;
470 } 500 }
471 SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn, ep_cid=%d ", 501 SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn, ep_cid=%d\n",
472 beiscsi_ep->ep_cid); 502 beiscsi_ep->ep_cid);
473 phba->ep_array[beiscsi_ep->ep_cid - 503 phba->ep_array[beiscsi_ep->ep_cid -
474 phba->fw_config.iscsi_cid_start] = ep; 504 phba->fw_config.iscsi_cid_start] = ep;
475 if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start + 505 if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start +
476 phba->params.cxns_per_ctrl * 2)) { 506 phba->params.cxns_per_ctrl * 2)) {
477 SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n"); 507 SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n");
478 return ret; 508 beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
509 goto free_ep;
479 } 510 }
480 511
481 beiscsi_ep->cid_vld = 0; 512 beiscsi_ep->cid_vld = 0;
482 tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep); 513 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
514 sizeof(struct tcp_connect_and_offload_in),
515 &nonemb_cmd.dma);
516 if (nonemb_cmd.va == NULL) {
517 SE_DEBUG(DBG_LVL_1,
518 "Failed to allocate memory for mgmt_open_connection"
519 "\n");
520 beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
521 return -ENOMEM;
522 }
523 nonemb_cmd.size = sizeof(struct tcp_connect_and_offload_in);
524 memset(nonemb_cmd.va, 0, nonemb_cmd.size);
525 tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd);
483 if (!tag) { 526 if (!tag) {
484 SE_DEBUG(DBG_LVL_1, 527 SE_DEBUG(DBG_LVL_1,
485 "mgmt_open_connection Failed for cid=%d \n", 528 "mgmt_open_connection Failed for cid=%d\n",
486 beiscsi_ep->ep_cid); 529 beiscsi_ep->ep_cid);
530 beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
531 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
532 nonemb_cmd.va, nonemb_cmd.dma);
533 return -EAGAIN;
487 } else { 534 } else {
488 wait_event_interruptible(phba->ctrl.mcc_wait[tag], 535 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
489 phba->ctrl.mcc_numtag[tag]); 536 phba->ctrl.mcc_numtag[tag]);
@@ -493,46 +540,31 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
493 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF; 540 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
494 if (status || extd_status) { 541 if (status || extd_status) {
495 SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed" 542 SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed"
496 " status = %d extd_status = %d \n", 543 " status = %d extd_status = %d\n",
497 status, extd_status); 544 status, extd_status);
545 beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
498 free_mcc_tag(&phba->ctrl, tag); 546 free_mcc_tag(&phba->ctrl, tag);
499 return -1; 547 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
548 nonemb_cmd.va, nonemb_cmd.dma);
549 goto free_ep;
500 } else { 550 } else {
501 wrb = queue_get_wrb(mccq, wrb_num); 551 wrb = queue_get_wrb(mccq, wrb_num);
502 free_mcc_tag(&phba->ctrl, tag); 552 free_mcc_tag(&phba->ctrl, tag);
503 553
504 ptcpcnct_out = embedded_payload(wrb); 554 ptcpcnct_out = embedded_payload(wrb);
505 beiscsi_ep = ep->dd_data; 555 beiscsi_ep = ep->dd_data;
506 beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle; 556 beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
507 beiscsi_ep->cid_vld = 1; 557 beiscsi_ep->cid_vld = 1;
508 SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n"); 558 SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n");
509 } 559 }
560 beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
561 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
562 nonemb_cmd.va, nonemb_cmd.dma);
510 return 0; 563 return 0;
511}
512
513/**
514 * beiscsi_put_cid - Free the cid
515 * @phba: The phba for which the cid is being freed
516 * @cid: The cid to free
517 */
518static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)
519{
520 phba->avlbl_cids++;
521 phba->cid_array[phba->cid_free++] = cid;
522 if (phba->cid_free == phba->params.cxns_per_ctrl)
523 phba->cid_free = 0;
524}
525
526/**
527 * beiscsi_free_ep - free endpoint
528 * @ep: pointer to iscsi endpoint structure
529 */
530static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep)
531{
532 struct beiscsi_hba *phba = beiscsi_ep->phba;
533 564
534 beiscsi_put_cid(phba, beiscsi_ep->ep_cid); 565free_ep:
535 beiscsi_ep->phba = NULL; 566 beiscsi_free_ep(beiscsi_ep);
567 return -EBUSY;
536} 568}
537 569
538/** 570/**
@@ -552,18 +584,18 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
552 struct iscsi_endpoint *ep; 584 struct iscsi_endpoint *ep;
553 int ret; 585 int ret;
554 586
555 SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_connect \n"); 587 SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_connect\n");
556 if (shost) 588 if (shost)
557 phba = iscsi_host_priv(shost); 589 phba = iscsi_host_priv(shost);
558 else { 590 else {
559 ret = -ENXIO; 591 ret = -ENXIO;
560 SE_DEBUG(DBG_LVL_1, "shost is NULL \n"); 592 SE_DEBUG(DBG_LVL_1, "shost is NULL\n");
561 return ERR_PTR(ret); 593 return ERR_PTR(ret);
562 } 594 }
563 595
564 if (phba->state != BE_ADAPTER_UP) { 596 if (phba->state != BE_ADAPTER_UP) {
565 ret = -EBUSY; 597 ret = -EBUSY;
566 SE_DEBUG(DBG_LVL_1, "The Adapter state is Not UP \n"); 598 SE_DEBUG(DBG_LVL_1, "The Adapter state is Not UP\n");
567 return ERR_PTR(ret); 599 return ERR_PTR(ret);
568 } 600 }
569 601
@@ -576,16 +608,16 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
576 beiscsi_ep = ep->dd_data; 608 beiscsi_ep = ep->dd_data;
577 beiscsi_ep->phba = phba; 609 beiscsi_ep->phba = phba;
578 beiscsi_ep->openiscsi_ep = ep; 610 beiscsi_ep->openiscsi_ep = ep;
579 if (beiscsi_open_conn(ep, NULL, dst_addr, non_blocking)) { 611 ret = beiscsi_open_conn(ep, NULL, dst_addr, non_blocking);
580 SE_DEBUG(DBG_LVL_1, "Failed in beiscsi_open_conn \n"); 612 if (ret) {
581 ret = -ENOMEM; 613 SE_DEBUG(DBG_LVL_1, "Failed in beiscsi_open_conn\n");
582 goto free_ep; 614 goto free_ep;
583 } 615 }
584 616
585 return ep; 617 return ep;
586 618
587free_ep: 619free_ep:
588 beiscsi_free_ep(beiscsi_ep); 620 iscsi_destroy_endpoint(ep);
589 return ERR_PTR(ret); 621 return ERR_PTR(ret);
590} 622}
591 623
@@ -620,9 +652,9 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag)
620 652
621 tag = mgmt_upload_connection(phba, beiscsi_ep->ep_cid, flag); 653 tag = mgmt_upload_connection(phba, beiscsi_ep->ep_cid, flag);
622 if (!tag) { 654 if (!tag) {
623 SE_DEBUG(DBG_LVL_8, "upload failed for cid 0x%x", 655 SE_DEBUG(DBG_LVL_8, "upload failed for cid 0x%x\n",
624 beiscsi_ep->ep_cid); 656 beiscsi_ep->ep_cid);
625 ret = -1; 657 ret = -EAGAIN;
626 } else { 658 } else {
627 wait_event_interruptible(phba->ctrl.mcc_wait[tag], 659 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
628 phba->ctrl.mcc_numtag[tag]); 660 phba->ctrl.mcc_numtag[tag]);
@@ -632,30 +664,6 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag)
632} 664}
633 665
634/** 666/**
635 * beiscsi_ep_disconnect - Tears down the TCP connection
636 * @ep: endpoint to be used
637 *
638 * Tears down the TCP connection
639 */
640void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
641{
642 struct beiscsi_conn *beiscsi_conn;
643 struct beiscsi_endpoint *beiscsi_ep;
644 struct beiscsi_hba *phba;
645
646 beiscsi_ep = ep->dd_data;
647 phba = beiscsi_ep->phba;
648 SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect for ep_cid = %d\n",
649 beiscsi_ep->ep_cid);
650
651 if (beiscsi_ep->conn) {
652 beiscsi_conn = beiscsi_ep->conn;
653 iscsi_suspend_queue(beiscsi_conn->conn);
654 }
655
656}
657
658/**
659 * beiscsi_unbind_conn_to_cid - Unbind the beiscsi_conn from phba conn table 667 * beiscsi_unbind_conn_to_cid - Unbind the beiscsi_conn from phba conn table
660 * @phba: The phba instance 668 * @phba: The phba instance
661 * @cid: The cid to free 669 * @cid: The cid to free
@@ -666,50 +674,57 @@ static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba,
666 if (phba->conn_table[cid]) 674 if (phba->conn_table[cid])
667 phba->conn_table[cid] = NULL; 675 phba->conn_table[cid] = NULL;
668 else { 676 else {
669 SE_DEBUG(DBG_LVL_8, "Connection table Not occupied. \n"); 677 SE_DEBUG(DBG_LVL_8, "Connection table Not occupied.\n");
670 return -EINVAL; 678 return -EINVAL;
671 } 679 }
672 return 0; 680 return 0;
673} 681}
674 682
675/** 683/**
676 * beiscsi_conn_stop - Invalidate and stop the connection 684 * beiscsi_ep_disconnect - Tears down the TCP connection
677 * @cls_conn: pointer to get iscsi_conn 685 * @ep: endpoint to be used
678 * @flag: The type of connection closure 686 *
687 * Tears down the TCP connection
679 */ 688 */
680void beiscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) 689void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
681{ 690{
682 struct iscsi_conn *conn = cls_conn->dd_data; 691 struct beiscsi_conn *beiscsi_conn;
683 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
684 struct beiscsi_endpoint *beiscsi_ep; 692 struct beiscsi_endpoint *beiscsi_ep;
685 struct iscsi_session *session = conn->session; 693 struct beiscsi_hba *phba;
686 struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
687 struct beiscsi_hba *phba = iscsi_host_priv(shost);
688 unsigned int tag; 694 unsigned int tag;
689 unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH; 695 unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH;
690 696
691 beiscsi_ep = beiscsi_conn->ep; 697 beiscsi_ep = ep->dd_data;
692 if (!beiscsi_ep) { 698 phba = beiscsi_ep->phba;
693 SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop , no beiscsi_ep\n"); 699 SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect for ep_cid = %d\n",
700 beiscsi_ep->ep_cid);
701
702 if (!beiscsi_ep->conn) {
703 SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect, no "
704 "beiscsi_ep\n");
694 return; 705 return;
695 } 706 }
696 SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop ep_cid = %d\n", 707 beiscsi_conn = beiscsi_ep->conn;
697 beiscsi_ep->ep_cid); 708 iscsi_suspend_queue(beiscsi_conn->conn);
709
710 SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect ep_cid = %d\n",
711 beiscsi_ep->ep_cid);
712
698 tag = mgmt_invalidate_connection(phba, beiscsi_ep, 713 tag = mgmt_invalidate_connection(phba, beiscsi_ep,
699 beiscsi_ep->ep_cid, 1, 714 beiscsi_ep->ep_cid, 1,
700 savecfg_flag); 715 savecfg_flag);
701 if (!tag) { 716 if (!tag) {
702 SE_DEBUG(DBG_LVL_1, 717 SE_DEBUG(DBG_LVL_1,
703 "mgmt_invalidate_connection Failed for cid=%d \n", 718 "mgmt_invalidate_connection Failed for cid=%d\n",
704 beiscsi_ep->ep_cid); 719 beiscsi_ep->ep_cid);
705 } else { 720 } else {
706 wait_event_interruptible(phba->ctrl.mcc_wait[tag], 721 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
707 phba->ctrl.mcc_numtag[tag]); 722 phba->ctrl.mcc_numtag[tag]);
708 free_mcc_tag(&phba->ctrl, tag); 723 free_mcc_tag(&phba->ctrl, tag);
709 } 724 }
725
710 beiscsi_close_conn(beiscsi_ep, CONNECTION_UPLOAD_GRACEFUL); 726 beiscsi_close_conn(beiscsi_ep, CONNECTION_UPLOAD_GRACEFUL);
711 beiscsi_free_ep(beiscsi_ep); 727 beiscsi_free_ep(beiscsi_ep);
712 iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
713 beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid); 728 beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
714 iscsi_conn_stop(cls_conn, flag); 729 iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
715} 730}
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index 1f512c28cbf9..870cdb2a73e4 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -59,8 +59,6 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
59 59
60int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn); 60int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn);
61 61
62void beiscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag);
63
64struct iscsi_endpoint *beiscsi_ep_connect(struct Scsi_Host *shost, 62struct iscsi_endpoint *beiscsi_ep_connect(struct Scsi_Host *shost,
65 struct sockaddr *dst_addr, 63 struct sockaddr *dst_addr,
66 int non_blocking); 64 int non_blocking);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index dd5b105f8f47..7436c5ad5697 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -41,6 +41,8 @@
41static unsigned int be_iopoll_budget = 10; 41static unsigned int be_iopoll_budget = 10;
42static unsigned int be_max_phys_size = 64; 42static unsigned int be_max_phys_size = 64;
43static unsigned int enable_msix = 1; 43static unsigned int enable_msix = 1;
44static unsigned int gcrashmode = 0;
45static unsigned int num_hba = 0;
44 46
45MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); 47MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
46MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); 48MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
@@ -69,6 +71,7 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
69 struct beiscsi_hba *phba; 71 struct beiscsi_hba *phba;
70 struct iscsi_session *session; 72 struct iscsi_session *session;
71 struct invalidate_command_table *inv_tbl; 73 struct invalidate_command_table *inv_tbl;
74 struct be_dma_mem nonemb_cmd;
72 unsigned int cid, tag, num_invalidate; 75 unsigned int cid, tag, num_invalidate;
73 76
74 cls_session = starget_to_session(scsi_target(sc->device)); 77 cls_session = starget_to_session(scsi_target(sc->device));
@@ -99,18 +102,34 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
99 inv_tbl->cid = cid; 102 inv_tbl->cid = cid;
100 inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index; 103 inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
101 num_invalidate = 1; 104 num_invalidate = 1;
102 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid); 105 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
106 sizeof(struct invalidate_commands_params_in),
107 &nonemb_cmd.dma);
108 if (nonemb_cmd.va == NULL) {
109 SE_DEBUG(DBG_LVL_1,
110 "Failed to allocate memory for"
111 "mgmt_invalidate_icds\n");
112 return FAILED;
113 }
114 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
115
116 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
117 cid, &nonemb_cmd);
103 if (!tag) { 118 if (!tag) {
104 shost_printk(KERN_WARNING, phba->shost, 119 shost_printk(KERN_WARNING, phba->shost,
105 "mgmt_invalidate_icds could not be" 120 "mgmt_invalidate_icds could not be"
106 " submitted\n"); 121 " submitted\n");
122 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
123 nonemb_cmd.va, nonemb_cmd.dma);
124
107 return FAILED; 125 return FAILED;
108 } else { 126 } else {
109 wait_event_interruptible(phba->ctrl.mcc_wait[tag], 127 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
110 phba->ctrl.mcc_numtag[tag]); 128 phba->ctrl.mcc_numtag[tag]);
111 free_mcc_tag(&phba->ctrl, tag); 129 free_mcc_tag(&phba->ctrl, tag);
112 } 130 }
113 131 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
132 nonemb_cmd.va, nonemb_cmd.dma);
114 return iscsi_eh_abort(sc); 133 return iscsi_eh_abort(sc);
115} 134}
116 135
@@ -124,6 +143,7 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
124 struct iscsi_session *session; 143 struct iscsi_session *session;
125 struct iscsi_cls_session *cls_session; 144 struct iscsi_cls_session *cls_session;
126 struct invalidate_command_table *inv_tbl; 145 struct invalidate_command_table *inv_tbl;
146 struct be_dma_mem nonemb_cmd;
127 unsigned int cid, tag, i, num_invalidate; 147 unsigned int cid, tag, i, num_invalidate;
128 int rc = FAILED; 148 int rc = FAILED;
129 149
@@ -158,18 +178,33 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
158 spin_unlock_bh(&session->lock); 178 spin_unlock_bh(&session->lock);
159 inv_tbl = phba->inv_tbl; 179 inv_tbl = phba->inv_tbl;
160 180
161 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid); 181 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
182 sizeof(struct invalidate_commands_params_in),
183 &nonemb_cmd.dma);
184 if (nonemb_cmd.va == NULL) {
185 SE_DEBUG(DBG_LVL_1,
186 "Failed to allocate memory for"
187 "mgmt_invalidate_icds\n");
188 return FAILED;
189 }
190 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
191 memset(nonemb_cmd.va, 0, nonemb_cmd.size);
192 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
193 cid, &nonemb_cmd);
162 if (!tag) { 194 if (!tag) {
163 shost_printk(KERN_WARNING, phba->shost, 195 shost_printk(KERN_WARNING, phba->shost,
164 "mgmt_invalidate_icds could not be" 196 "mgmt_invalidate_icds could not be"
165 " submitted\n"); 197 " submitted\n");
198 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
199 nonemb_cmd.va, nonemb_cmd.dma);
166 return FAILED; 200 return FAILED;
167 } else { 201 } else {
168 wait_event_interruptible(phba->ctrl.mcc_wait[tag], 202 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
169 phba->ctrl.mcc_numtag[tag]); 203 phba->ctrl.mcc_numtag[tag]);
170 free_mcc_tag(&phba->ctrl, tag); 204 free_mcc_tag(&phba->ctrl, tag);
171 } 205 }
172 206 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
207 nonemb_cmd.va, nonemb_cmd.dma);
173 return iscsi_eh_device_reset(sc); 208 return iscsi_eh_device_reset(sc);
174unlock: 209unlock:
175 spin_unlock_bh(&session->lock); 210 spin_unlock_bh(&session->lock);
@@ -216,7 +251,7 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
216 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0); 251 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
217 if (!shost) { 252 if (!shost) {
218 dev_err(&pcidev->dev, "beiscsi_hba_alloc -" 253 dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
219 "iscsi_host_alloc failed \n"); 254 "iscsi_host_alloc failed\n");
220 return NULL; 255 return NULL;
221 } 256 }
222 shost->dma_boundary = pcidev->dma_mask; 257 shost->dma_boundary = pcidev->dma_mask;
@@ -371,7 +406,7 @@ static void beiscsi_get_params(struct beiscsi_hba *phba)
371 + BE2_TMFS) / 512) + 1) * 512; 406 + BE2_TMFS) / 512) + 1) * 512;
372 phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024) 407 phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
373 ? 1024 : phba->params.num_eq_entries; 408 ? 1024 : phba->params.num_eq_entries;
374 SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d \n", 409 SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d\n",
375 phba->params.num_eq_entries); 410 phba->params.num_eq_entries);
376 phba->params.num_cq_entries = 411 phba->params.num_cq_entries =
377 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2 412 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
@@ -616,7 +651,7 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
616 struct pci_dev *pcidev = phba->pcidev; 651 struct pci_dev *pcidev = phba->pcidev;
617 struct hwi_controller *phwi_ctrlr; 652 struct hwi_controller *phwi_ctrlr;
618 struct hwi_context_memory *phwi_context; 653 struct hwi_context_memory *phwi_context;
619 int ret, msix_vec, i = 0; 654 int ret, msix_vec, i, j;
620 char desc[32]; 655 char desc[32];
621 656
622 phwi_ctrlr = phba->phwi_ctrlr; 657 phwi_ctrlr = phba->phwi_ctrlr;
@@ -628,10 +663,25 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
628 msix_vec = phba->msix_entries[i].vector; 663 msix_vec = phba->msix_entries[i].vector;
629 ret = request_irq(msix_vec, be_isr_msix, 0, desc, 664 ret = request_irq(msix_vec, be_isr_msix, 0, desc,
630 &phwi_context->be_eq[i]); 665 &phwi_context->be_eq[i]);
666 if (ret) {
667 shost_printk(KERN_ERR, phba->shost,
668 "beiscsi_init_irqs-Failed to"
669 "register msix for i = %d\n", i);
670 if (!i)
671 return ret;
672 goto free_msix_irqs;
673 }
631 } 674 }
632 msix_vec = phba->msix_entries[i].vector; 675 msix_vec = phba->msix_entries[i].vector;
633 ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc", 676 ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc",
634 &phwi_context->be_eq[i]); 677 &phwi_context->be_eq[i]);
678 if (ret) {
679 shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
680 "Failed to register beiscsi_msix_mcc\n");
681 i++;
682 goto free_msix_irqs;
683 }
684
635 } else { 685 } else {
636 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, 686 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
637 "beiscsi", phba); 687 "beiscsi", phba);
@@ -642,6 +692,10 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
642 } 692 }
643 } 693 }
644 return 0; 694 return 0;
695free_msix_irqs:
696 for (j = i - 1; j == 0; j++)
697 free_irq(msix_vec, &phwi_context->be_eq[j]);
698 return ret;
645} 699}
646 700
647static void hwi_ring_cq_db(struct beiscsi_hba *phba, 701static void hwi_ring_cq_db(struct beiscsi_hba *phba,
@@ -692,7 +746,7 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
692 break; 746 break;
693 default: 747 default:
694 shost_printk(KERN_WARNING, phba->shost, 748 shost_printk(KERN_WARNING, phba->shost,
695 "Unrecognized opcode 0x%x in async msg \n", 749 "Unrecognized opcode 0x%x in async msg\n",
696 (ppdu-> 750 (ppdu->
697 dw[offsetof(struct amap_pdu_base, opcode) / 32] 751 dw[offsetof(struct amap_pdu_base, opcode) / 32]
698 & PDUBASE_OPCODE_MASK)); 752 & PDUBASE_OPCODE_MASK));
@@ -711,7 +765,7 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
711 765
712 if (phba->io_sgl_hndl_avbl) { 766 if (phba->io_sgl_hndl_avbl) {
713 SE_DEBUG(DBG_LVL_8, 767 SE_DEBUG(DBG_LVL_8,
714 "In alloc_io_sgl_handle,io_sgl_alloc_index=%d \n", 768 "In alloc_io_sgl_handle,io_sgl_alloc_index=%d\n",
715 phba->io_sgl_alloc_index); 769 phba->io_sgl_alloc_index);
716 psgl_handle = phba->io_sgl_hndl_base[phba-> 770 psgl_handle = phba->io_sgl_hndl_base[phba->
717 io_sgl_alloc_index]; 771 io_sgl_alloc_index];
@@ -730,7 +784,7 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
730static void 784static void
731free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 785free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
732{ 786{
733 SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d \n", 787 SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d\n",
734 phba->io_sgl_free_index); 788 phba->io_sgl_free_index);
735 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) { 789 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
736 /* 790 /*
@@ -739,7 +793,7 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
739 */ 793 */
740 SE_DEBUG(DBG_LVL_8, 794 SE_DEBUG(DBG_LVL_8,
741 "Double Free in IO SGL io_sgl_free_index=%d," 795 "Double Free in IO SGL io_sgl_free_index=%d,"
742 "value there=%p \n", phba->io_sgl_free_index, 796 "value there=%p\n", phba->io_sgl_free_index,
743 phba->io_sgl_hndl_base[phba->io_sgl_free_index]); 797 phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
744 return; 798 return;
745 } 799 }
@@ -804,7 +858,7 @@ free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
804 858
805 SE_DEBUG(DBG_LVL_8, 859 SE_DEBUG(DBG_LVL_8,
806 "FREE WRB: pwrb_handle=%p free_index=0x%x" 860 "FREE WRB: pwrb_handle=%p free_index=0x%x"
807 "wrb_handles_available=%d \n", 861 "wrb_handles_available=%d\n",
808 pwrb_handle, pwrb_context->free_index, 862 pwrb_handle, pwrb_context->free_index,
809 pwrb_context->wrb_handles_available); 863 pwrb_context->wrb_handles_available);
810} 864}
@@ -816,7 +870,7 @@ static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
816 if (phba->eh_sgl_hndl_avbl) { 870 if (phba->eh_sgl_hndl_avbl) {
817 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index]; 871 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
818 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL; 872 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
819 SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x \n", 873 SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x\n",
820 phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index); 874 phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
821 phba->eh_sgl_hndl_avbl--; 875 phba->eh_sgl_hndl_avbl--;
822 if (phba->eh_sgl_alloc_index == 876 if (phba->eh_sgl_alloc_index ==
@@ -834,7 +888,7 @@ void
834free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 888free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
835{ 889{
836 890
837 SE_DEBUG(DBG_LVL_8, "In free_mgmt_sgl_handle,eh_sgl_free_index=%d \n", 891 SE_DEBUG(DBG_LVL_8, "In free_mgmt_sgl_handle,eh_sgl_free_index=%d\n",
838 phba->eh_sgl_free_index); 892 phba->eh_sgl_free_index);
839 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) { 893 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
840 /* 894 /*
@@ -842,7 +896,7 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
842 * failed in xmit_task or alloc_pdu. 896 * failed in xmit_task or alloc_pdu.
843 */ 897 */
844 SE_DEBUG(DBG_LVL_8, 898 SE_DEBUG(DBG_LVL_8,
845 "Double Free in eh SGL ,eh_sgl_free_index=%d \n", 899 "Double Free in eh SGL ,eh_sgl_free_index=%d\n",
846 phba->eh_sgl_free_index); 900 phba->eh_sgl_free_index);
847 return; 901 return;
848 } 902 }
@@ -1081,7 +1135,7 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1081 case HWH_TYPE_LOGIN: 1135 case HWH_TYPE_LOGIN:
1082 SE_DEBUG(DBG_LVL_1, 1136 SE_DEBUG(DBG_LVL_1,
1083 "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd" 1137 "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
1084 "- Solicited path \n"); 1138 "- Solicited path\n");
1085 break; 1139 break;
1086 1140
1087 case HWH_TYPE_NOP: 1141 case HWH_TYPE_NOP:
@@ -1164,7 +1218,7 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
1164 default: 1218 default:
1165 pbusy_list = NULL; 1219 pbusy_list = NULL;
1166 shost_printk(KERN_WARNING, phba->shost, 1220 shost_printk(KERN_WARNING, phba->shost,
1167 "Unexpected code=%d \n", 1221 "Unexpected code=%d\n",
1168 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, 1222 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1169 code) / 32] & PDUCQE_CODE_MASK); 1223 code) / 32] & PDUCQE_CODE_MASK);
1170 return NULL; 1224 return NULL;
@@ -1552,7 +1606,7 @@ static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1552 else 1606 else
1553 SE_DEBUG(DBG_LVL_1, 1607 SE_DEBUG(DBG_LVL_1,
1554 " Unsupported Async Event, flags" 1608 " Unsupported Async Event, flags"
1555 " = 0x%08x \n", mcc_compl->flags); 1609 " = 0x%08x\n", mcc_compl->flags);
1556 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) { 1610 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1557 be_mcc_compl_process_isr(&phba->ctrl, mcc_compl); 1611 be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
1558 atomic_dec(&phba->ctrl.mcc_obj.q.used); 1612 atomic_dec(&phba->ctrl.mcc_obj.q.used);
@@ -1611,7 +1665,7 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1611 hwi_complete_cmd(beiscsi_conn, phba, sol); 1665 hwi_complete_cmd(beiscsi_conn, phba, sol);
1612 break; 1666 break;
1613 case DRIVERMSG_NOTIFY: 1667 case DRIVERMSG_NOTIFY:
1614 SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY \n"); 1668 SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY\n");
1615 dmsg = (struct dmsg_cqe *)sol; 1669 dmsg = (struct dmsg_cqe *)sol;
1616 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); 1670 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1617 break; 1671 break;
@@ -1782,9 +1836,9 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1782 sg_len = sg_dma_len(sg); 1836 sg_len = sg_dma_len(sg);
1783 addr = (u64) sg_dma_address(sg); 1837 addr = (u64) sg_dma_address(sg);
1784 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 1838 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1785 (addr & 0xFFFFFFFF)); 1839 ((u32)(addr & 0xFFFFFFFF)));
1786 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 1840 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1787 (addr >> 32)); 1841 ((u32)(addr >> 32)));
1788 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 1842 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1789 sg_len); 1843 sg_len);
1790 sge_len = sg_len; 1844 sge_len = sg_len;
@@ -1794,9 +1848,9 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1794 sg_len = sg_dma_len(sg); 1848 sg_len = sg_dma_len(sg);
1795 addr = (u64) sg_dma_address(sg); 1849 addr = (u64) sg_dma_address(sg);
1796 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb, 1850 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
1797 (addr & 0xFFFFFFFF)); 1851 ((u32)(addr & 0xFFFFFFFF)));
1798 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb, 1852 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
1799 (addr >> 32)); 1853 ((u32)(addr >> 32)));
1800 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb, 1854 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
1801 sg_len); 1855 sg_len);
1802 } 1856 }
@@ -1872,9 +1926,9 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
1872 addr = 0; 1926 addr = 0;
1873 } 1927 }
1874 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 1928 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1875 (addr & 0xFFFFFFFF)); 1929 ((u32)(addr & 0xFFFFFFFF)));
1876 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 1930 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1877 (addr >> 32)); 1931 ((u32)(addr >> 32)));
1878 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 1932 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1879 task->data_count); 1933 task->data_count);
1880 1934
@@ -1904,9 +1958,9 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
1904 psgl++; 1958 psgl++;
1905 if (task->data) { 1959 if (task->data) {
1906 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 1960 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1907 (addr & 0xFFFFFFFF)); 1961 ((u32)(addr & 0xFFFFFFFF)));
1908 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 1962 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1909 (addr >> 32)); 1963 ((u32)(addr >> 32)));
1910 } 1964 }
1911 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106); 1965 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
1912 } 1966 }
@@ -2054,7 +2108,8 @@ free_mem:
2054 mem_descr->mem_array[j - 1].size, 2108 mem_descr->mem_array[j - 1].size,
2055 mem_descr->mem_array[j - 1]. 2109 mem_descr->mem_array[j - 1].
2056 virtual_address, 2110 virtual_address,
2057 mem_descr->mem_array[j - 1]. 2111 (unsigned long)mem_descr->
2112 mem_array[j - 1].
2058 bus_address.u.a64.address); 2113 bus_address.u.a64.address);
2059 } 2114 }
2060 if (i) { 2115 if (i) {
@@ -2223,10 +2278,10 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2223 if (mem_descr->mem_array[0].virtual_address) { 2278 if (mem_descr->mem_array[0].virtual_address) {
2224 SE_DEBUG(DBG_LVL_8, 2279 SE_DEBUG(DBG_LVL_8,
2225 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF" 2280 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
2226 "va=%p \n", mem_descr->mem_array[0].virtual_address); 2281 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2227 } else 2282 } else
2228 shost_printk(KERN_WARNING, phba->shost, 2283 shost_printk(KERN_WARNING, phba->shost,
2229 "No Virtual address \n"); 2284 "No Virtual address\n");
2230 2285
2231 pasync_ctx->async_header.va_base = 2286 pasync_ctx->async_header.va_base =
2232 mem_descr->mem_array[0].virtual_address; 2287 mem_descr->mem_array[0].virtual_address;
@@ -2239,10 +2294,10 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2239 if (mem_descr->mem_array[0].virtual_address) { 2294 if (mem_descr->mem_array[0].virtual_address) {
2240 SE_DEBUG(DBG_LVL_8, 2295 SE_DEBUG(DBG_LVL_8,
2241 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING" 2296 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
2242 "va=%p \n", mem_descr->mem_array[0].virtual_address); 2297 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2243 } else 2298 } else
2244 shost_printk(KERN_WARNING, phba->shost, 2299 shost_printk(KERN_WARNING, phba->shost,
2245 "No Virtual address \n"); 2300 "No Virtual address\n");
2246 pasync_ctx->async_header.ring_base = 2301 pasync_ctx->async_header.ring_base =
2247 mem_descr->mem_array[0].virtual_address; 2302 mem_descr->mem_array[0].virtual_address;
2248 2303
@@ -2251,10 +2306,10 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2251 if (mem_descr->mem_array[0].virtual_address) { 2306 if (mem_descr->mem_array[0].virtual_address) {
2252 SE_DEBUG(DBG_LVL_8, 2307 SE_DEBUG(DBG_LVL_8,
2253 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE" 2308 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
2254 "va=%p \n", mem_descr->mem_array[0].virtual_address); 2309 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2255 } else 2310 } else
2256 shost_printk(KERN_WARNING, phba->shost, 2311 shost_printk(KERN_WARNING, phba->shost,
2257 "No Virtual address \n"); 2312 "No Virtual address\n");
2258 2313
2259 pasync_ctx->async_header.handle_base = 2314 pasync_ctx->async_header.handle_base =
2260 mem_descr->mem_array[0].virtual_address; 2315 mem_descr->mem_array[0].virtual_address;
@@ -2266,10 +2321,10 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2266 if (mem_descr->mem_array[0].virtual_address) { 2321 if (mem_descr->mem_array[0].virtual_address) {
2267 SE_DEBUG(DBG_LVL_8, 2322 SE_DEBUG(DBG_LVL_8,
2268 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF" 2323 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2269 "va=%p \n", mem_descr->mem_array[0].virtual_address); 2324 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2270 } else 2325 } else
2271 shost_printk(KERN_WARNING, phba->shost, 2326 shost_printk(KERN_WARNING, phba->shost,
2272 "No Virtual address \n"); 2327 "No Virtual address\n");
2273 pasync_ctx->async_data.va_base = 2328 pasync_ctx->async_data.va_base =
2274 mem_descr->mem_array[0].virtual_address; 2329 mem_descr->mem_array[0].virtual_address;
2275 pasync_ctx->async_data.pa_base.u.a64.address = 2330 pasync_ctx->async_data.pa_base.u.a64.address =
@@ -2280,10 +2335,10 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2280 if (mem_descr->mem_array[0].virtual_address) { 2335 if (mem_descr->mem_array[0].virtual_address) {
2281 SE_DEBUG(DBG_LVL_8, 2336 SE_DEBUG(DBG_LVL_8,
2282 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING" 2337 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
2283 "va=%p \n", mem_descr->mem_array[0].virtual_address); 2338 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2284 } else 2339 } else
2285 shost_printk(KERN_WARNING, phba->shost, 2340 shost_printk(KERN_WARNING, phba->shost,
2286 "No Virtual address \n"); 2341 "No Virtual address\n");
2287 2342
2288 pasync_ctx->async_data.ring_base = 2343 pasync_ctx->async_data.ring_base =
2289 mem_descr->mem_array[0].virtual_address; 2344 mem_descr->mem_array[0].virtual_address;
@@ -2292,7 +2347,7 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2292 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE; 2347 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2293 if (!mem_descr->mem_array[0].virtual_address) 2348 if (!mem_descr->mem_array[0].virtual_address)
2294 shost_printk(KERN_WARNING, phba->shost, 2349 shost_printk(KERN_WARNING, phba->shost,
2295 "No Virtual address \n"); 2350 "No Virtual address\n");
2296 2351
2297 pasync_ctx->async_data.handle_base = 2352 pasync_ctx->async_data.handle_base =
2298 mem_descr->mem_array[0].virtual_address; 2353 mem_descr->mem_array[0].virtual_address;
@@ -2364,7 +2419,7 @@ be_sgl_create_contiguous(void *virtual_address,
2364 WARN_ON(!sgl); 2419 WARN_ON(!sgl);
2365 2420
2366 sgl->va = virtual_address; 2421 sgl->va = virtual_address;
2367 sgl->dma = physical_address; 2422 sgl->dma = (unsigned long)physical_address;
2368 sgl->size = length; 2423 sgl->size = length;
2369 2424
2370 return 0; 2425 return 0;
@@ -2447,7 +2502,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2447 sizeof(struct be_eq_entry), eq_vaddress); 2502 sizeof(struct be_eq_entry), eq_vaddress);
2448 if (ret) { 2503 if (ret) {
2449 shost_printk(KERN_ERR, phba->shost, 2504 shost_printk(KERN_ERR, phba->shost,
2450 "be_fill_queue Failed for EQ \n"); 2505 "be_fill_queue Failed for EQ\n");
2451 goto create_eq_error; 2506 goto create_eq_error;
2452 } 2507 }
2453 2508
@@ -2457,7 +2512,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2457 if (ret) { 2512 if (ret) {
2458 shost_printk(KERN_ERR, phba->shost, 2513 shost_printk(KERN_ERR, phba->shost,
2459 "beiscsi_cmd_eq_create" 2514 "beiscsi_cmd_eq_create"
2460 "Failedfor EQ \n"); 2515 "Failedfor EQ\n");
2461 goto create_eq_error; 2516 goto create_eq_error;
2462 } 2517 }
2463 SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id); 2518 SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
@@ -2505,7 +2560,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2505 sizeof(struct sol_cqe), cq_vaddress); 2560 sizeof(struct sol_cqe), cq_vaddress);
2506 if (ret) { 2561 if (ret) {
2507 shost_printk(KERN_ERR, phba->shost, 2562 shost_printk(KERN_ERR, phba->shost,
2508 "be_fill_queue Failed for ISCSI CQ \n"); 2563 "be_fill_queue Failed for ISCSI CQ\n");
2509 goto create_cq_error; 2564 goto create_cq_error;
2510 } 2565 }
2511 2566
@@ -2515,7 +2570,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2515 if (ret) { 2570 if (ret) {
2516 shost_printk(KERN_ERR, phba->shost, 2571 shost_printk(KERN_ERR, phba->shost,
2517 "beiscsi_cmd_eq_create" 2572 "beiscsi_cmd_eq_create"
2518 "Failed for ISCSI CQ \n"); 2573 "Failed for ISCSI CQ\n");
2519 goto create_cq_error; 2574 goto create_cq_error;
2520 } 2575 }
2521 SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n", 2576 SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
@@ -2565,7 +2620,8 @@ beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2565 "be_fill_queue Failed for DEF PDU HDR\n"); 2620 "be_fill_queue Failed for DEF PDU HDR\n");
2566 return ret; 2621 return ret;
2567 } 2622 }
2568 mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address; 2623 mem->dma = (unsigned long)mem_descr->mem_array[idx].
2624 bus_address.u.a64.address;
2569 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq, 2625 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2570 def_pdu_ring_sz, 2626 def_pdu_ring_sz,
2571 phba->params.defpdu_hdr_sz); 2627 phba->params.defpdu_hdr_sz);
@@ -2609,7 +2665,8 @@ beiscsi_create_def_data(struct beiscsi_hba *phba,
2609 "be_fill_queue Failed for DEF PDU DATA\n"); 2665 "be_fill_queue Failed for DEF PDU DATA\n");
2610 return ret; 2666 return ret;
2611 } 2667 }
2612 mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address; 2668 mem->dma = (unsigned long)mem_descr->mem_array[idx].
2669 bus_address.u.a64.address;
2613 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq, 2670 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
2614 def_pdu_ring_sz, 2671 def_pdu_ring_sz,
2615 phba->params.defpdu_data_sz); 2672 phba->params.defpdu_data_sz);
@@ -2623,7 +2680,7 @@ beiscsi_create_def_data(struct beiscsi_hba *phba,
2623 SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n", 2680 SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
2624 phwi_context->be_def_dataq.id); 2681 phwi_context->be_def_dataq.id);
2625 hwi_post_async_buffers(phba, 0); 2682 hwi_post_async_buffers(phba, 0);
2626 SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED \n"); 2683 SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED\n");
2627 return 0; 2684 return 0;
2628} 2685}
2629 2686
@@ -2655,7 +2712,7 @@ beiscsi_post_pages(struct beiscsi_hba *phba)
2655 } 2712 }
2656 pm_arr++; 2713 pm_arr++;
2657 } 2714 }
2658 SE_DEBUG(DBG_LVL_8, "POSTED PAGES \n"); 2715 SE_DEBUG(DBG_LVL_8, "POSTED PAGES\n");
2659 return 0; 2716 return 0;
2660} 2717}
2661 2718
@@ -2678,7 +2735,7 @@ static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
2678 mem->size = len * entry_size; 2735 mem->size = len * entry_size;
2679 mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma); 2736 mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
2680 if (!mem->va) 2737 if (!mem->va)
2681 return -1; 2738 return -ENOMEM;
2682 memset(mem->va, 0, mem->size); 2739 memset(mem->va, 0, mem->size);
2683 return 0; 2740 return 0;
2684} 2741}
@@ -2750,6 +2807,7 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2750 if (status != 0) { 2807 if (status != 0) {
2751 shost_printk(KERN_ERR, phba->shost, 2808 shost_printk(KERN_ERR, phba->shost,
2752 "wrbq create failed."); 2809 "wrbq create failed.");
2810 kfree(pwrb_arr);
2753 return status; 2811 return status;
2754 } 2812 }
2755 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i]. 2813 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
@@ -2873,7 +2931,7 @@ mcc_cq_destroy:
2873mcc_cq_free: 2931mcc_cq_free:
2874 be_queue_free(phba, cq); 2932 be_queue_free(phba, cq);
2875err: 2933err:
2876 return -1; 2934 return -ENOMEM;
2877} 2935}
2878 2936
2879static int find_num_cpus(void) 2937static int find_num_cpus(void)
@@ -2884,7 +2942,7 @@ static int find_num_cpus(void)
2884 if (num_cpus >= MAX_CPUS) 2942 if (num_cpus >= MAX_CPUS)
2885 num_cpus = MAX_CPUS - 1; 2943 num_cpus = MAX_CPUS - 1;
2886 2944
2887 SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", num_cpus); 2945 SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", num_cpus);
2888 return num_cpus; 2946 return num_cpus;
2889} 2947}
2890 2948
@@ -2907,7 +2965,7 @@ static int hwi_init_port(struct beiscsi_hba *phba)
2907 2965
2908 status = beiscsi_create_eqs(phba, phwi_context); 2966 status = beiscsi_create_eqs(phba, phwi_context);
2909 if (status != 0) { 2967 if (status != 0) {
2910 shost_printk(KERN_ERR, phba->shost, "EQ not created \n"); 2968 shost_printk(KERN_ERR, phba->shost, "EQ not created\n");
2911 goto error; 2969 goto error;
2912 } 2970 }
2913 2971
@@ -2918,7 +2976,7 @@ static int hwi_init_port(struct beiscsi_hba *phba)
2918 status = mgmt_check_supported_fw(ctrl, phba); 2976 status = mgmt_check_supported_fw(ctrl, phba);
2919 if (status != 0) { 2977 if (status != 0) {
2920 shost_printk(KERN_ERR, phba->shost, 2978 shost_printk(KERN_ERR, phba->shost,
2921 "Unsupported fw version \n"); 2979 "Unsupported fw version\n");
2922 goto error; 2980 goto error;
2923 } 2981 }
2924 2982
@@ -2974,7 +3032,7 @@ static int hwi_init_controller(struct beiscsi_hba *phba)
2974 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) { 3032 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
2975 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba-> 3033 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
2976 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address; 3034 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
2977 SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p \n", 3035 SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p\n",
2978 phwi_ctrlr->phwi_ctxt); 3036 phwi_ctrlr->phwi_ctxt);
2979 } else { 3037 } else {
2980 shost_printk(KERN_ERR, phba->shost, 3038 shost_printk(KERN_ERR, phba->shost,
@@ -3007,8 +3065,8 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)
3007 pci_free_consistent(phba->pcidev, 3065 pci_free_consistent(phba->pcidev,
3008 mem_descr->mem_array[j - 1].size, 3066 mem_descr->mem_array[j - 1].size,
3009 mem_descr->mem_array[j - 1].virtual_address, 3067 mem_descr->mem_array[j - 1].virtual_address,
3010 mem_descr->mem_array[j - 1].bus_address. 3068 (unsigned long)mem_descr->mem_array[j - 1].
3011 u.a64.address); 3069 bus_address.u.a64.address);
3012 } 3070 }
3013 kfree(mem_descr->mem_array); 3071 kfree(mem_descr->mem_array);
3014 mem_descr++; 3072 mem_descr++;
@@ -3024,7 +3082,7 @@ static int beiscsi_init_controller(struct beiscsi_hba *phba)
3024 ret = beiscsi_get_memory(phba); 3082 ret = beiscsi_get_memory(phba);
3025 if (ret < 0) { 3083 if (ret < 0) {
3026 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -" 3084 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
3027 "Failed in beiscsi_alloc_memory \n"); 3085 "Failed in beiscsi_alloc_memory\n");
3028 return ret; 3086 return ret;
3029 } 3087 }
3030 3088
@@ -3101,12 +3159,12 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3101 } 3159 }
3102 SE_DEBUG(DBG_LVL_8, 3160 SE_DEBUG(DBG_LVL_8,
3103 "phba->io_sgl_hndl_avbl=%d" 3161 "phba->io_sgl_hndl_avbl=%d"
3104 "phba->eh_sgl_hndl_avbl=%d \n", 3162 "phba->eh_sgl_hndl_avbl=%d\n",
3105 phba->io_sgl_hndl_avbl, 3163 phba->io_sgl_hndl_avbl,
3106 phba->eh_sgl_hndl_avbl); 3164 phba->eh_sgl_hndl_avbl);
3107 mem_descr_sg = phba->init_mem; 3165 mem_descr_sg = phba->init_mem;
3108 mem_descr_sg += HWI_MEM_SGE; 3166 mem_descr_sg += HWI_MEM_SGE;
3109 SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d \n", 3167 SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d\n",
3110 mem_descr_sg->num_elements); 3168 mem_descr_sg->num_elements);
3111 arr_index = 0; 3169 arr_index = 0;
3112 idx = 0; 3170 idx = 0;
@@ -3155,7 +3213,7 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3155 if (!phba->ep_array) { 3213 if (!phba->ep_array) {
3156 shost_printk(KERN_ERR, phba->shost, 3214 shost_printk(KERN_ERR, phba->shost,
3157 "Failed to allocate memory in " 3215 "Failed to allocate memory in "
3158 "hba_setup_cid_tbls \n"); 3216 "hba_setup_cid_tbls\n");
3159 kfree(phba->cid_array); 3217 kfree(phba->cid_array);
3160 return -ENOMEM; 3218 return -ENOMEM;
3161 } 3219 }
@@ -3168,7 +3226,7 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3168 return 0; 3226 return 0;
3169} 3227}
3170 3228
3171static unsigned char hwi_enable_intr(struct beiscsi_hba *phba) 3229static void hwi_enable_intr(struct beiscsi_hba *phba)
3172{ 3230{
3173 struct be_ctrl_info *ctrl = &phba->ctrl; 3231 struct be_ctrl_info *ctrl = &phba->ctrl;
3174 struct hwi_controller *phwi_ctrlr; 3232 struct hwi_controller *phwi_ctrlr;
@@ -3184,26 +3242,25 @@ static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
3184 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg + 3242 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3185 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); 3243 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3186 reg = ioread32(addr); 3244 reg = ioread32(addr);
3187 SE_DEBUG(DBG_LVL_8, "reg =x%08x \n", reg); 3245 SE_DEBUG(DBG_LVL_8, "reg =x%08x\n", reg);
3188 3246
3189 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 3247 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3190 if (!enabled) { 3248 if (!enabled) {
3191 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 3249 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3192 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr); 3250 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p\n", reg, addr);
3193 iowrite32(reg, addr); 3251 iowrite32(reg, addr);
3194 if (!phba->msix_enabled) { 3252 if (!phba->msix_enabled) {
3195 eq = &phwi_context->be_eq[0].q; 3253 eq = &phwi_context->be_eq[0].q;
3196 SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id); 3254 SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
3197 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 3255 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3198 } else { 3256 } else {
3199 for (i = 0; i <= phba->num_cpus; i++) { 3257 for (i = 0; i <= phba->num_cpus; i++) {
3200 eq = &phwi_context->be_eq[i].q; 3258 eq = &phwi_context->be_eq[i].q;
3201 SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id); 3259 SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
3202 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 3260 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3203 } 3261 }
3204 } 3262 }
3205 } 3263 }
3206 return true;
3207} 3264}
3208 3265
3209static void hwi_disable_intr(struct beiscsi_hba *phba) 3266static void hwi_disable_intr(struct beiscsi_hba *phba)
@@ -3219,7 +3276,7 @@ static void hwi_disable_intr(struct beiscsi_hba *phba)
3219 iowrite32(reg, addr); 3276 iowrite32(reg, addr);
3220 } else 3277 } else
3221 shost_printk(KERN_WARNING, phba->shost, 3278 shost_printk(KERN_WARNING, phba->shost,
3222 "In hwi_disable_intr, Already Disabled \n"); 3279 "In hwi_disable_intr, Already Disabled\n");
3223} 3280}
3224 3281
3225static int beiscsi_init_port(struct beiscsi_hba *phba) 3282static int beiscsi_init_port(struct beiscsi_hba *phba)
@@ -3230,14 +3287,14 @@ static int beiscsi_init_port(struct beiscsi_hba *phba)
3230 if (ret < 0) { 3287 if (ret < 0) {
3231 shost_printk(KERN_ERR, phba->shost, 3288 shost_printk(KERN_ERR, phba->shost,
3232 "beiscsi_dev_probe - Failed in" 3289 "beiscsi_dev_probe - Failed in"
3233 "beiscsi_init_controller \n"); 3290 "beiscsi_init_controller\n");
3234 return ret; 3291 return ret;
3235 } 3292 }
3236 ret = beiscsi_init_sgl_handle(phba); 3293 ret = beiscsi_init_sgl_handle(phba);
3237 if (ret < 0) { 3294 if (ret < 0) {
3238 shost_printk(KERN_ERR, phba->shost, 3295 shost_printk(KERN_ERR, phba->shost,
3239 "beiscsi_dev_probe - Failed in" 3296 "beiscsi_dev_probe - Failed in"
3240 "beiscsi_init_sgl_handle \n"); 3297 "beiscsi_init_sgl_handle\n");
3241 goto do_cleanup_ctrlr; 3298 goto do_cleanup_ctrlr;
3242 } 3299 }
3243 3300
@@ -3291,12 +3348,12 @@ static void hwi_purge_eq(struct beiscsi_hba *phba)
3291 3348
3292static void beiscsi_clean_port(struct beiscsi_hba *phba) 3349static void beiscsi_clean_port(struct beiscsi_hba *phba)
3293{ 3350{
3294 unsigned char mgmt_status; 3351 int mgmt_status;
3295 3352
3296 mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0); 3353 mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3297 if (mgmt_status) 3354 if (mgmt_status)
3298 shost_printk(KERN_WARNING, phba->shost, 3355 shost_printk(KERN_WARNING, phba->shost,
3299 "mgmt_epfw_cleanup FAILED \n"); 3356 "mgmt_epfw_cleanup FAILED\n");
3300 3357
3301 hwi_purge_eq(phba); 3358 hwi_purge_eq(phba);
3302 hwi_cleanup(phba); 3359 hwi_cleanup(phba);
@@ -3428,14 +3485,12 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3428 return -ENOMEM; 3485 return -ENOMEM;
3429 io_task->bhs_pa.u.a64.address = paddr; 3486 io_task->bhs_pa.u.a64.address = paddr;
3430 io_task->libiscsi_itt = (itt_t)task->itt; 3487 io_task->libiscsi_itt = (itt_t)task->itt;
3431 io_task->pwrb_handle = alloc_wrb_handle(phba,
3432 beiscsi_conn->beiscsi_conn_cid -
3433 phba->fw_config.iscsi_cid_start
3434 );
3435 io_task->conn = beiscsi_conn; 3488 io_task->conn = beiscsi_conn;
3436 3489
3437 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr; 3490 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
3438 task->hdr_max = sizeof(struct be_cmd_bhs); 3491 task->hdr_max = sizeof(struct be_cmd_bhs);
3492 io_task->psgl_handle = NULL;
3493 io_task->psgl_handle = NULL;
3439 3494
3440 if (task->sc) { 3495 if (task->sc) {
3441 spin_lock(&phba->io_sgl_lock); 3496 spin_lock(&phba->io_sgl_lock);
@@ -3443,6 +3498,11 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3443 spin_unlock(&phba->io_sgl_lock); 3498 spin_unlock(&phba->io_sgl_lock);
3444 if (!io_task->psgl_handle) 3499 if (!io_task->psgl_handle)
3445 goto free_hndls; 3500 goto free_hndls;
3501 io_task->pwrb_handle = alloc_wrb_handle(phba,
3502 beiscsi_conn->beiscsi_conn_cid -
3503 phba->fw_config.iscsi_cid_start);
3504 if (!io_task->pwrb_handle)
3505 goto free_io_hndls;
3446 } else { 3506 } else {
3447 io_task->scsi_cmnd = NULL; 3507 io_task->scsi_cmnd = NULL;
3448 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { 3508 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
@@ -3457,9 +3517,20 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3457 beiscsi_conn->login_in_progress = 1; 3517 beiscsi_conn->login_in_progress = 1;
3458 beiscsi_conn->plogin_sgl_handle = 3518 beiscsi_conn->plogin_sgl_handle =
3459 io_task->psgl_handle; 3519 io_task->psgl_handle;
3520 io_task->pwrb_handle =
3521 alloc_wrb_handle(phba,
3522 beiscsi_conn->beiscsi_conn_cid -
3523 phba->fw_config.iscsi_cid_start);
3524 if (!io_task->pwrb_handle)
3525 goto free_io_hndls;
3526 beiscsi_conn->plogin_wrb_handle =
3527 io_task->pwrb_handle;
3528
3460 } else { 3529 } else {
3461 io_task->psgl_handle = 3530 io_task->psgl_handle =
3462 beiscsi_conn->plogin_sgl_handle; 3531 beiscsi_conn->plogin_sgl_handle;
3532 io_task->pwrb_handle =
3533 beiscsi_conn->plogin_wrb_handle;
3463 } 3534 }
3464 } else { 3535 } else {
3465 spin_lock(&phba->mgmt_sgl_lock); 3536 spin_lock(&phba->mgmt_sgl_lock);
@@ -3467,6 +3538,13 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3467 spin_unlock(&phba->mgmt_sgl_lock); 3538 spin_unlock(&phba->mgmt_sgl_lock);
3468 if (!io_task->psgl_handle) 3539 if (!io_task->psgl_handle)
3469 goto free_hndls; 3540 goto free_hndls;
3541 io_task->pwrb_handle =
3542 alloc_wrb_handle(phba,
3543 beiscsi_conn->beiscsi_conn_cid -
3544 phba->fw_config.iscsi_cid_start);
3545 if (!io_task->pwrb_handle)
3546 goto free_mgmt_hndls;
3547
3470 } 3548 }
3471 } 3549 }
3472 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle-> 3550 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
@@ -3477,16 +3555,26 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3477 io_task->cmd_bhs->iscsi_hdr.itt = itt; 3555 io_task->cmd_bhs->iscsi_hdr.itt = itt;
3478 return 0; 3556 return 0;
3479 3557
3558free_io_hndls:
3559 spin_lock(&phba->io_sgl_lock);
3560 free_io_sgl_handle(phba, io_task->psgl_handle);
3561 spin_unlock(&phba->io_sgl_lock);
3562 goto free_hndls;
3563free_mgmt_hndls:
3564 spin_lock(&phba->mgmt_sgl_lock);
3565 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3566 spin_unlock(&phba->mgmt_sgl_lock);
3480free_hndls: 3567free_hndls:
3481 phwi_ctrlr = phba->phwi_ctrlr; 3568 phwi_ctrlr = phba->phwi_ctrlr;
3482 pwrb_context = &phwi_ctrlr->wrb_context[ 3569 pwrb_context = &phwi_ctrlr->wrb_context[
3483 beiscsi_conn->beiscsi_conn_cid - 3570 beiscsi_conn->beiscsi_conn_cid -
3484 phba->fw_config.iscsi_cid_start]; 3571 phba->fw_config.iscsi_cid_start];
3485 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 3572 if (io_task->pwrb_handle)
3573 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3486 io_task->pwrb_handle = NULL; 3574 io_task->pwrb_handle = NULL;
3487 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 3575 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3488 io_task->bhs_pa.u.a64.address); 3576 io_task->bhs_pa.u.a64.address);
3489 SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed \n"); 3577 SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed\n");
3490 return -ENOMEM; 3578 return -ENOMEM;
3491} 3579}
3492 3580
@@ -3653,7 +3741,7 @@ static int beiscsi_mtask(struct iscsi_task *task)
3653 break; 3741 break;
3654 3742
3655 default: 3743 default:
3656 SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported \n", 3744 SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported\n",
3657 task->hdr->opcode & ISCSI_OPCODE_MASK); 3745 task->hdr->opcode & ISCSI_OPCODE_MASK);
3658 return -EINVAL; 3746 return -EINVAL;
3659 } 3747 }
@@ -3689,13 +3777,11 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
3689 SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n") 3777 SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
3690 return num_sg; 3778 return num_sg;
3691 } 3779 }
3692 SE_DEBUG(DBG_LVL_4, "xferlen=0x%08x scmd=%p num_sg=%d sernum=%lu\n",
3693 (scsi_bufflen(sc)), sc, num_sg, sc->serial_number);
3694 xferlen = scsi_bufflen(sc); 3780 xferlen = scsi_bufflen(sc);
3695 sg = scsi_sglist(sc); 3781 sg = scsi_sglist(sc);
3696 if (sc->sc_data_direction == DMA_TO_DEVICE) { 3782 if (sc->sc_data_direction == DMA_TO_DEVICE) {
3697 writedir = 1; 3783 writedir = 1;
3698 SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x \n", 3784 SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x\n",
3699 task->imm_count); 3785 task->imm_count);
3700 } else 3786 } else
3701 writedir = 0; 3787 writedir = 0;
@@ -3709,10 +3795,12 @@ static void beiscsi_remove(struct pci_dev *pcidev)
3709 struct hwi_context_memory *phwi_context; 3795 struct hwi_context_memory *phwi_context;
3710 struct be_eq_obj *pbe_eq; 3796 struct be_eq_obj *pbe_eq;
3711 unsigned int i, msix_vec; 3797 unsigned int i, msix_vec;
3798 u8 *real_offset = 0;
3799 u32 value = 0;
3712 3800
3713 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev); 3801 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
3714 if (!phba) { 3802 if (!phba) {
3715 dev_err(&pcidev->dev, "beiscsi_remove called with no phba \n"); 3803 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
3716 return; 3804 return;
3717 } 3805 }
3718 3806
@@ -3737,6 +3825,14 @@ static void beiscsi_remove(struct pci_dev *pcidev)
3737 3825
3738 beiscsi_clean_port(phba); 3826 beiscsi_clean_port(phba);
3739 beiscsi_free_mem(phba); 3827 beiscsi_free_mem(phba);
3828 real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
3829
3830 value = readl((void *)real_offset);
3831
3832 if (value & 0x00010000) {
3833 value &= 0xfffeffff;
3834 writel(value, (void *)real_offset);
3835 }
3740 beiscsi_unmap_pci_function(phba); 3836 beiscsi_unmap_pci_function(phba);
3741 pci_free_consistent(phba->pcidev, 3837 pci_free_consistent(phba->pcidev,
3742 phba->ctrl.mbox_mem_alloced.size, 3838 phba->ctrl.mbox_mem_alloced.size,
@@ -3769,19 +3865,21 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3769 struct hwi_controller *phwi_ctrlr; 3865 struct hwi_controller *phwi_ctrlr;
3770 struct hwi_context_memory *phwi_context; 3866 struct hwi_context_memory *phwi_context;
3771 struct be_eq_obj *pbe_eq; 3867 struct be_eq_obj *pbe_eq;
3772 int ret, msix_vec, num_cpus, i; 3868 int ret, num_cpus, i;
3869 u8 *real_offset = 0;
3870 u32 value = 0;
3773 3871
3774 ret = beiscsi_enable_pci(pcidev); 3872 ret = beiscsi_enable_pci(pcidev);
3775 if (ret < 0) { 3873 if (ret < 0) {
3776 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" 3874 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
3777 "Failed to enable pci device \n"); 3875 " Failed to enable pci device\n");
3778 return ret; 3876 return ret;
3779 } 3877 }
3780 3878
3781 phba = beiscsi_hba_alloc(pcidev); 3879 phba = beiscsi_hba_alloc(pcidev);
3782 if (!phba) { 3880 if (!phba) {
3783 dev_err(&pcidev->dev, "beiscsi_dev_probe-" 3881 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
3784 " Failed in beiscsi_hba_alloc \n"); 3882 " Failed in beiscsi_hba_alloc\n");
3785 goto disable_pci; 3883 goto disable_pci;
3786 } 3884 }
3787 3885
@@ -3804,7 +3902,7 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3804 else 3902 else
3805 num_cpus = 1; 3903 num_cpus = 1;
3806 phba->num_cpus = num_cpus; 3904 phba->num_cpus = num_cpus;
3807 SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", phba->num_cpus); 3905 SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", phba->num_cpus);
3808 3906
3809 if (enable_msix) 3907 if (enable_msix)
3810 beiscsi_msix_enable(phba); 3908 beiscsi_msix_enable(phba);
@@ -3815,6 +3913,33 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3815 goto hba_free; 3913 goto hba_free;
3816 } 3914 }
3817 3915
3916 if (!num_hba) {
3917 real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
3918 value = readl((void *)real_offset);
3919 if (value & 0x00010000) {
3920 gcrashmode++;
3921 shost_printk(KERN_ERR, phba->shost,
3922 "Loading Driver in crashdump mode\n");
3923 ret = beiscsi_pci_soft_reset(phba);
3924 if (ret) {
3925 shost_printk(KERN_ERR, phba->shost,
3926 "Reset Failed. Aborting Crashdump\n");
3927 goto hba_free;
3928 }
3929 ret = be_chk_reset_complete(phba);
3930 if (ret) {
3931 shost_printk(KERN_ERR, phba->shost,
3932 "Failed to get out of reset."
3933 "Aborting Crashdump\n");
3934 goto hba_free;
3935 }
3936 } else {
3937 value |= 0x00010000;
3938 writel(value, (void *)real_offset);
3939 num_hba++;
3940 }
3941 }
3942
3818 spin_lock_init(&phba->io_sgl_lock); 3943 spin_lock_init(&phba->io_sgl_lock);
3819 spin_lock_init(&phba->mgmt_sgl_lock); 3944 spin_lock_init(&phba->mgmt_sgl_lock);
3820 spin_lock_init(&phba->isr_lock); 3945 spin_lock_init(&phba->isr_lock);
@@ -3870,25 +3995,10 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3870 "Failed to beiscsi_init_irqs\n"); 3995 "Failed to beiscsi_init_irqs\n");
3871 goto free_blkenbld; 3996 goto free_blkenbld;
3872 } 3997 }
3873 ret = hwi_enable_intr(phba); 3998 hwi_enable_intr(phba);
3874 if (ret < 0) { 3999 SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n");
3875 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3876 "Failed to hwi_enable_intr\n");
3877 goto free_ctrlr;
3878 }
3879 SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n");
3880 return 0; 4000 return 0;
3881 4001
3882free_ctrlr:
3883 if (phba->msix_enabled) {
3884 for (i = 0; i <= phba->num_cpus; i++) {
3885 msix_vec = phba->msix_entries[i].vector;
3886 free_irq(msix_vec, &phwi_context->be_eq[i]);
3887 }
3888 } else
3889 if (phba->pcidev->irq)
3890 free_irq(phba->pcidev->irq, phba);
3891 pci_disable_msix(phba->pcidev);
3892free_blkenbld: 4002free_blkenbld:
3893 destroy_workqueue(phba->wq); 4003 destroy_workqueue(phba->wq);
3894 if (blk_iopoll_enabled) 4004 if (blk_iopoll_enabled)
@@ -3900,12 +4010,23 @@ free_twq:
3900 beiscsi_clean_port(phba); 4010 beiscsi_clean_port(phba);
3901 beiscsi_free_mem(phba); 4011 beiscsi_free_mem(phba);
3902free_port: 4012free_port:
4013 real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
4014
4015 value = readl((void *)real_offset);
4016
4017 if (value & 0x00010000) {
4018 value &= 0xfffeffff;
4019 writel(value, (void *)real_offset);
4020 }
4021
3903 pci_free_consistent(phba->pcidev, 4022 pci_free_consistent(phba->pcidev,
3904 phba->ctrl.mbox_mem_alloced.size, 4023 phba->ctrl.mbox_mem_alloced.size,
3905 phba->ctrl.mbox_mem_alloced.va, 4024 phba->ctrl.mbox_mem_alloced.va,
3906 phba->ctrl.mbox_mem_alloced.dma); 4025 phba->ctrl.mbox_mem_alloced.dma);
3907 beiscsi_unmap_pci_function(phba); 4026 beiscsi_unmap_pci_function(phba);
3908hba_free: 4027hba_free:
4028 if (phba->msix_enabled)
4029 pci_disable_msix(phba->pcidev);
3909 iscsi_host_remove(phba->shost); 4030 iscsi_host_remove(phba->shost);
3910 pci_dev_put(phba->pcidev); 4031 pci_dev_put(phba->pcidev);
3911 iscsi_host_free(phba->shost); 4032 iscsi_host_free(phba->shost);
@@ -3955,7 +4076,7 @@ struct iscsi_transport beiscsi_iscsi_transport = {
3955 .get_session_param = iscsi_session_get_param, 4076 .get_session_param = iscsi_session_get_param,
3956 .get_host_param = beiscsi_get_host_param, 4077 .get_host_param = beiscsi_get_host_param,
3957 .start_conn = beiscsi_conn_start, 4078 .start_conn = beiscsi_conn_start,
3958 .stop_conn = beiscsi_conn_stop, 4079 .stop_conn = iscsi_conn_stop,
3959 .send_pdu = iscsi_conn_send_pdu, 4080 .send_pdu = iscsi_conn_send_pdu,
3960 .xmit_task = beiscsi_task_xmit, 4081 .xmit_task = beiscsi_task_xmit,
3961 .cleanup_task = beiscsi_cleanup_task, 4082 .cleanup_task = beiscsi_cleanup_task,
@@ -3988,7 +4109,7 @@ static int __init beiscsi_module_init(void)
3988 "transport.\n"); 4109 "transport.\n");
3989 return -ENOMEM; 4110 return -ENOMEM;
3990 } 4111 }
3991 SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p \n", 4112 SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p\n",
3992 &beiscsi_iscsi_transport); 4113 &beiscsi_iscsi_transport);
3993 4114
3994 ret = pci_register_driver(&beiscsi_pci_driver); 4115 ret = pci_register_driver(&beiscsi_pci_driver);
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 87ec21280a37..c643bb3736fc 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -23,6 +23,7 @@
23 23
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/pci.h> 25#include <linux/pci.h>
26#include <linux/if_ether.h>
26#include <linux/in.h> 27#include <linux/in.h>
27#include <scsi/scsi.h> 28#include <scsi/scsi.h>
28#include <scsi/scsi_cmnd.h> 29#include <scsi/scsi_cmnd.h>
@@ -39,7 +40,7 @@
39 "Linux iSCSI Driver version" BUILD_STR 40 "Linux iSCSI Driver version" BUILD_STR
40#define DRV_DESC BE_NAME " " "Driver" 41#define DRV_DESC BE_NAME " " "Driver"
41 42
42#define BE_VENDOR_ID 0x19A2 43#define BE_VENDOR_ID 0x19A2
43/* DEVICE ID's for BE2 */ 44/* DEVICE ID's for BE2 */
44#define BE_DEVICE_ID1 0x212 45#define BE_DEVICE_ID1 0x212
45#define OC_DEVICE_ID1 0x702 46#define OC_DEVICE_ID1 0x702
@@ -68,8 +69,15 @@
68#define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */ 69#define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */
69#define BEISCSI_NUM_DEVICES_SUPPORTED 0x01 70#define BEISCSI_NUM_DEVICES_SUPPORTED 0x01
70#define BEISCSI_MAX_FRAGS_INIT 192 71#define BEISCSI_MAX_FRAGS_INIT 192
71#define BE_NUM_MSIX_ENTRIES 1 72#define BE_NUM_MSIX_ENTRIES 1
72#define MPU_EP_SEMAPHORE 0xac 73
74#define MPU_EP_CONTROL 0
75#define MPU_EP_SEMAPHORE 0xac
76#define BE2_SOFT_RESET 0x5c
77#define BE2_PCI_ONLINE0 0xb0
78#define BE2_PCI_ONLINE1 0xb4
79#define BE2_SET_RESET 0x80
80#define BE2_MPU_IRAM_ONLINE 0x00000080
73 81
74#define BE_SENSE_INFO_SIZE 258 82#define BE_SENSE_INFO_SIZE 258
75#define BE_ISCSI_PDU_HEADER_SIZE 64 83#define BE_ISCSI_PDU_HEADER_SIZE 64
@@ -105,7 +113,7 @@ do { \
105#define HWI_GET_ASYNC_PDU_CTX(phwi) (phwi->phwi_ctxt->pasync_ctx) 113#define HWI_GET_ASYNC_PDU_CTX(phwi) (phwi->phwi_ctxt->pasync_ctx)
106 114
107/********* Memory BAR register ************/ 115/********* Memory BAR register ************/
108#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc 116#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
109/** 117/**
110 * Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt 118 * Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
111 * Disable" may still globally block interrupts in addition to individual 119 * Disable" may still globally block interrupts in addition to individual
@@ -116,7 +124,7 @@ do { \
116#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */ 124#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */
117 125
118/********* ISR0 Register offset **********/ 126/********* ISR0 Register offset **********/
119#define CEV_ISR0_OFFSET 0xC18 127#define CEV_ISR0_OFFSET 0xC18
120#define CEV_ISR_SIZE 4 128#define CEV_ISR_SIZE 4
121 129
122/** 130/**
@@ -139,12 +147,12 @@ do { \
139#define DB_EQ_REARM_SHIFT (29) /* bit 29 */ 147#define DB_EQ_REARM_SHIFT (29) /* bit 29 */
140 148
141/********* Compl Q door bell *************/ 149/********* Compl Q door bell *************/
142#define DB_CQ_OFFSET 0x120 150#define DB_CQ_OFFSET 0x120
143#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */ 151#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
144/* Number of event entries processed */ 152/* Number of event entries processed */
145#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ 153#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
146/* Rearm bit */ 154/* Rearm bit */
147#define DB_CQ_REARM_SHIFT (29) /* bit 29 */ 155#define DB_CQ_REARM_SHIFT (29) /* bit 29 */
148 156
149#define GET_HWI_CONTROLLER_WS(pc) (pc->phwi_ctrlr) 157#define GET_HWI_CONTROLLER_WS(pc) (pc->phwi_ctrlr)
150#define HWI_GET_DEF_BUFQ_ID(pc) (((struct hwi_controller *)\ 158#define HWI_GET_DEF_BUFQ_ID(pc) (((struct hwi_controller *)\
@@ -161,12 +169,12 @@ enum be_mem_enum {
161 HWI_MEM_WRBH, 169 HWI_MEM_WRBH,
162 HWI_MEM_SGLH, 170 HWI_MEM_SGLH,
163 HWI_MEM_SGE, 171 HWI_MEM_SGE,
164 HWI_MEM_ASYNC_HEADER_BUF, /* 5 */ 172 HWI_MEM_ASYNC_HEADER_BUF, /* 5 */
165 HWI_MEM_ASYNC_DATA_BUF, 173 HWI_MEM_ASYNC_DATA_BUF,
166 HWI_MEM_ASYNC_HEADER_RING, 174 HWI_MEM_ASYNC_HEADER_RING,
167 HWI_MEM_ASYNC_DATA_RING, 175 HWI_MEM_ASYNC_DATA_RING,
168 HWI_MEM_ASYNC_HEADER_HANDLE, 176 HWI_MEM_ASYNC_HEADER_HANDLE,
169 HWI_MEM_ASYNC_DATA_HANDLE, /* 10 */ 177 HWI_MEM_ASYNC_DATA_HANDLE, /* 10 */
170 HWI_MEM_ASYNC_PDU_CONTEXT, 178 HWI_MEM_ASYNC_PDU_CONTEXT,
171 ISCSI_MEM_GLOBAL_HEADER, 179 ISCSI_MEM_GLOBAL_HEADER,
172 SE_MEM_MAX 180 SE_MEM_MAX
@@ -352,6 +360,7 @@ struct beiscsi_conn {
352 u32 beiscsi_conn_cid; 360 u32 beiscsi_conn_cid;
353 struct beiscsi_endpoint *ep; 361 struct beiscsi_endpoint *ep;
354 unsigned short login_in_progress; 362 unsigned short login_in_progress;
363 struct wrb_handle *plogin_wrb_handle;
355 struct sgl_handle *plogin_sgl_handle; 364 struct sgl_handle *plogin_sgl_handle;
356 struct beiscsi_session *beiscsi_sess; 365 struct beiscsi_session *beiscsi_sess;
357 struct iscsi_task *task; 366 struct iscsi_task *task;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 350cbeaae160..3f3fab91a7d1 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -21,7 +21,7 @@
21#include "be_mgmt.h" 21#include "be_mgmt.h"
22#include "be_iscsi.h" 22#include "be_iscsi.h"
23 23
24unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl, 24int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
25 struct beiscsi_hba *phba) 25 struct beiscsi_hba *phba)
26{ 26{
27 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); 27 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
@@ -50,7 +50,7 @@ unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl,
50 pfw_cfg->ulp[0].sq_count; 50 pfw_cfg->ulp[0].sq_count;
51 if (phba->fw_config.iscsi_cid_count > (BE2_MAX_SESSIONS / 2)) { 51 if (phba->fw_config.iscsi_cid_count > (BE2_MAX_SESSIONS / 2)) {
52 SE_DEBUG(DBG_LVL_8, 52 SE_DEBUG(DBG_LVL_8,
53 "FW reported MAX CXNS as %d \t" 53 "FW reported MAX CXNS as %d\t"
54 "Max Supported = %d.\n", 54 "Max Supported = %d.\n",
55 phba->fw_config.iscsi_cid_count, 55 phba->fw_config.iscsi_cid_count,
56 BE2_MAX_SESSIONS); 56 BE2_MAX_SESSIONS);
@@ -58,14 +58,14 @@ unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl,
58 } 58 }
59 } else { 59 } else {
60 shost_printk(KERN_WARNING, phba->shost, 60 shost_printk(KERN_WARNING, phba->shost,
61 "Failed in mgmt_get_fw_config \n"); 61 "Failed in mgmt_get_fw_config\n");
62 } 62 }
63 63
64 spin_unlock(&ctrl->mbox_lock); 64 spin_unlock(&ctrl->mbox_lock);
65 return status; 65 return status;
66} 66}
67 67
68unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl, 68int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
69 struct beiscsi_hba *phba) 69 struct beiscsi_hba *phba)
70{ 70{
71 struct be_dma_mem nonemb_cmd; 71 struct be_dma_mem nonemb_cmd;
@@ -81,7 +81,7 @@ unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
81 SE_DEBUG(DBG_LVL_1, 81 SE_DEBUG(DBG_LVL_1,
82 "Failed to allocate memory for mgmt_check_supported_fw" 82 "Failed to allocate memory for mgmt_check_supported_fw"
83 "\n"); 83 "\n");
84 return -1; 84 return -ENOMEM;
85 } 85 }
86 nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes); 86 nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
87 req = nonemb_cmd.va; 87 req = nonemb_cmd.va;
@@ -117,8 +117,7 @@ unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
117 return status; 117 return status;
118} 118}
119 119
120 120int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
121unsigned char mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
122{ 121{
123 struct be_ctrl_info *ctrl = &phba->ctrl; 122 struct be_ctrl_info *ctrl = &phba->ctrl;
124 struct be_mcc_wrb *wrb = wrb_from_mccq(phba); 123 struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
@@ -144,11 +143,12 @@ unsigned char mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
144 return status; 143 return status;
145} 144}
146 145
147unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba, 146unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba,
148 struct invalidate_command_table *inv_tbl, 147 struct invalidate_command_table *inv_tbl,
149 unsigned int num_invalidate, unsigned int cid) 148 unsigned int num_invalidate, unsigned int cid,
149 struct be_dma_mem *nonemb_cmd)
150
150{ 151{
151 struct be_dma_mem nonemb_cmd;
152 struct be_ctrl_info *ctrl = &phba->ctrl; 152 struct be_ctrl_info *ctrl = &phba->ctrl;
153 struct be_mcc_wrb *wrb; 153 struct be_mcc_wrb *wrb;
154 struct be_sge *sge; 154 struct be_sge *sge;
@@ -162,17 +162,7 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
162 return tag; 162 return tag;
163 } 163 }
164 164
165 nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev, 165 req = nonemb_cmd->va;
166 sizeof(struct invalidate_commands_params_in),
167 &nonemb_cmd.dma);
168 if (nonemb_cmd.va == NULL) {
169 SE_DEBUG(DBG_LVL_1,
170 "Failed to allocate memory for mgmt_invalidate_icds\n");
171 spin_unlock(&ctrl->mbox_lock);
172 return 0;
173 }
174 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
175 req = nonemb_cmd.va;
176 memset(req, 0, sizeof(*req)); 166 memset(req, 0, sizeof(*req));
177 wrb = wrb_from_mccq(phba); 167 wrb = wrb_from_mccq(phba);
178 sge = nonembedded_sgl(wrb); 168 sge = nonembedded_sgl(wrb);
@@ -190,19 +180,16 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
190 req->icd_count++; 180 req->icd_count++;
191 inv_tbl++; 181 inv_tbl++;
192 } 182 }
193 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma)); 183 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
194 sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF); 184 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
195 sge->len = cpu_to_le32(nonemb_cmd.size); 185 sge->len = cpu_to_le32(nonemb_cmd->size);
196 186
197 be_mcc_notify(phba); 187 be_mcc_notify(phba);
198 spin_unlock(&ctrl->mbox_lock); 188 spin_unlock(&ctrl->mbox_lock);
199 if (nonemb_cmd.va)
200 pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
201 nonemb_cmd.va, nonemb_cmd.dma);
202 return tag; 189 return tag;
203} 190}
204 191
205unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba, 192unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
206 struct beiscsi_endpoint *beiscsi_ep, 193 struct beiscsi_endpoint *beiscsi_ep,
207 unsigned short cid, 194 unsigned short cid,
208 unsigned short issue_reset, 195 unsigned short issue_reset,
@@ -239,7 +226,7 @@ unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
239 return tag; 226 return tag;
240} 227}
241 228
242unsigned char mgmt_upload_connection(struct beiscsi_hba *phba, 229unsigned int mgmt_upload_connection(struct beiscsi_hba *phba,
243 unsigned short cid, unsigned int upload_flag) 230 unsigned short cid, unsigned int upload_flag)
244{ 231{
245 struct be_ctrl_info *ctrl = &phba->ctrl; 232 struct be_ctrl_info *ctrl = &phba->ctrl;
@@ -269,7 +256,9 @@ unsigned char mgmt_upload_connection(struct beiscsi_hba *phba,
269 256
270int mgmt_open_connection(struct beiscsi_hba *phba, 257int mgmt_open_connection(struct beiscsi_hba *phba,
271 struct sockaddr *dst_addr, 258 struct sockaddr *dst_addr,
272 struct beiscsi_endpoint *beiscsi_ep) 259 struct beiscsi_endpoint *beiscsi_ep,
260 struct be_dma_mem *nonemb_cmd)
261
273{ 262{
274 struct hwi_controller *phwi_ctrlr; 263 struct hwi_controller *phwi_ctrlr;
275 struct hwi_context_memory *phwi_context; 264 struct hwi_context_memory *phwi_context;
@@ -285,6 +274,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
285 unsigned int tag = 0; 274 unsigned int tag = 0;
286 unsigned int i; 275 unsigned int i;
287 unsigned short cid = beiscsi_ep->ep_cid; 276 unsigned short cid = beiscsi_ep->ep_cid;
277 struct be_sge *sge;
288 278
289 phwi_ctrlr = phba->phwi_ctrlr; 279 phwi_ctrlr = phba->phwi_ctrlr;
290 phwi_context = phwi_ctrlr->phwi_ctxt; 280 phwi_context = phwi_ctrlr->phwi_ctxt;
@@ -300,10 +290,14 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
300 return tag; 290 return tag;
301 } 291 }
302 wrb = wrb_from_mccq(phba); 292 wrb = wrb_from_mccq(phba);
303 req = embedded_payload(wrb); 293 memset(wrb, 0, sizeof(*wrb));
294 sge = nonembedded_sgl(wrb);
295
296 req = nonemb_cmd->va;
297 memset(req, 0, sizeof(*req));
304 wrb->tag0 |= tag; 298 wrb->tag0 |= tag;
305 299
306 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 300 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 1);
307 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, 301 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
308 OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD, 302 OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD,
309 sizeof(*req)); 303 sizeof(*req));
@@ -331,6 +325,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
331 shost_printk(KERN_ERR, phba->shost, "unknown addr family %d\n", 325 shost_printk(KERN_ERR, phba->shost, "unknown addr family %d\n",
332 dst_addr->sa_family); 326 dst_addr->sa_family);
333 spin_unlock(&ctrl->mbox_lock); 327 spin_unlock(&ctrl->mbox_lock);
328 free_mcc_tag(&phba->ctrl, tag);
334 return -EINVAL; 329 return -EINVAL;
335 330
336 } 331 }
@@ -339,13 +334,16 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
339 if (phba->nxt_cqid == phba->num_cpus) 334 if (phba->nxt_cqid == phba->num_cpus)
340 phba->nxt_cqid = 0; 335 phba->nxt_cqid = 0;
341 req->cq_id = phwi_context->be_cq[i].id; 336 req->cq_id = phwi_context->be_cq[i].id;
342 SE_DEBUG(DBG_LVL_8, "i=%d cq_id=%d \n", i, req->cq_id); 337 SE_DEBUG(DBG_LVL_8, "i=%d cq_id=%d\n", i, req->cq_id);
343 req->defq_id = def_hdr_id; 338 req->defq_id = def_hdr_id;
344 req->hdr_ring_id = def_hdr_id; 339 req->hdr_ring_id = def_hdr_id;
345 req->data_ring_id = def_data_id; 340 req->data_ring_id = def_data_id;
346 req->do_offload = 1; 341 req->do_offload = 1;
347 req->dataout_template_pa.lo = ptemplate_address->lo; 342 req->dataout_template_pa.lo = ptemplate_address->lo;
348 req->dataout_template_pa.hi = ptemplate_address->hi; 343 req->dataout_template_pa.hi = ptemplate_address->hi;
344 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
345 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
346 sge->len = cpu_to_le32(nonemb_cmd->size);
349 be_mcc_notify(phba); 347 be_mcc_notify(phba);
350 spin_unlock(&ctrl->mbox_lock); 348 spin_unlock(&ctrl->mbox_lock);
351 return tag; 349 return tag;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 3d316b82feb1..b9acedf78653 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -86,16 +86,19 @@ struct mcc_wrb {
86 struct mcc_wrb_payload payload; 86 struct mcc_wrb_payload payload;
87}; 87};
88 88
89unsigned char mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute); 89int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute);
90int mgmt_open_connection(struct beiscsi_hba *phba, struct sockaddr *dst_addr, 90int mgmt_open_connection(struct beiscsi_hba *phba,
91 struct beiscsi_endpoint *beiscsi_ep); 91 struct sockaddr *dst_addr,
92 struct beiscsi_endpoint *beiscsi_ep,
93 struct be_dma_mem *nonemb_cmd);
92 94
93unsigned char mgmt_upload_connection(struct beiscsi_hba *phba, 95unsigned int mgmt_upload_connection(struct beiscsi_hba *phba,
94 unsigned short cid, 96 unsigned short cid,
95 unsigned int upload_flag); 97 unsigned int upload_flag);
96unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba, 98unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba,
97 struct invalidate_command_table *inv_tbl, 99 struct invalidate_command_table *inv_tbl,
98 unsigned int num_invalidate, unsigned int cid); 100 unsigned int num_invalidate, unsigned int cid,
101 struct be_dma_mem *nonemb_cmd);
99 102
100struct iscsi_invalidate_connection_params_in { 103struct iscsi_invalidate_connection_params_in {
101 struct be_cmd_req_hdr hdr; 104 struct be_cmd_req_hdr hdr;
@@ -237,10 +240,10 @@ struct beiscsi_endpoint {
237 u16 cid_vld; 240 u16 cid_vld;
238}; 241};
239 242
240unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl, 243int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
241 struct beiscsi_hba *phba); 244 struct beiscsi_hba *phba);
242 245
243unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba, 246unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
244 struct beiscsi_endpoint *beiscsi_ep, 247 struct beiscsi_endpoint *beiscsi_ep,
245 unsigned short cid, 248 unsigned short cid,
246 unsigned short issue_reset, 249 unsigned short issue_reset,
diff --git a/drivers/scsi/bfa/Makefile b/drivers/scsi/bfa/Makefile
index 17e06cae71b2..ac3fdf02d5f6 100644
--- a/drivers/scsi/bfa/Makefile
+++ b/drivers/scsi/bfa/Makefile
@@ -1,7 +1,7 @@
1obj-$(CONFIG_SCSI_BFA_FC) := bfa.o 1obj-$(CONFIG_SCSI_BFA_FC) := bfa.o
2 2
3bfa-y := bfad.o bfad_intr.o bfad_os.o bfad_im.o bfad_attr.o bfad_fwimg.o 3bfa-y := bfad.o bfad_intr.o bfad_os.o bfad_im.o bfad_attr.o bfad_fwimg.o
4 4bfa-y += bfad_debugfs.o
5bfa-y += bfa_core.o bfa_ioc.o bfa_ioc_ct.o bfa_ioc_cb.o bfa_iocfc.o bfa_fcxp.o 5bfa-y += bfa_core.o bfa_ioc.o bfa_ioc_ct.o bfa_ioc_cb.o bfa_iocfc.o bfa_fcxp.o
6bfa-y += bfa_lps.o bfa_hw_cb.o bfa_hw_ct.o bfa_intr.o bfa_timer.o bfa_rport.o 6bfa-y += bfa_lps.o bfa_hw_cb.o bfa_hw_ct.o bfa_intr.o bfa_timer.o bfa_rport.o
7bfa-y += bfa_fcport.o bfa_port.o bfa_uf.o bfa_sgpg.o bfa_module.o bfa_ioim.o 7bfa-y += bfa_fcport.o bfa_port.o bfa_uf.o bfa_sgpg.o bfa_module.o bfa_ioim.o
diff --git a/drivers/scsi/bfa/bfa_cb_ioim_macros.h b/drivers/scsi/bfa/bfa_cb_ioim_macros.h
index 53a616f5f50d..3906ed926966 100644
--- a/drivers/scsi/bfa/bfa_cb_ioim_macros.h
+++ b/drivers/scsi/bfa/bfa_cb_ioim_macros.h
@@ -171,6 +171,11 @@ bfa_cb_ioim_get_cdblen(struct bfad_ioim_s *dio)
171 return cmnd->cmd_len; 171 return cmnd->cmd_len;
172} 172}
173 173
174 174/**
175 * Assign queue to be used for the I/O request. This value depends on whether
176 * the driver wants to use the queues via any specific algorithm. Currently,
177 * this is not supported.
178 */
179#define bfa_cb_ioim_get_reqq(__dio) BFA_FALSE
175 180
176#endif /* __BFA_HCB_IOIM_MACROS_H__ */ 181#endif /* __BFA_HCB_IOIM_MACROS_H__ */
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 3a7b3f88932f..bef70924d5c8 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -333,6 +333,7 @@ bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
333 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P}, 333 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P},
334 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P}, 334 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P},
335 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT}, 335 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT},
336 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
336 }; 337 };
337 338
338 *npciids = sizeof(__pciids) / sizeof(__pciids[0]); 339 *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 790c945aeae6..8c703d8dc94b 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -80,11 +80,6 @@ bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
80} 80}
81 81
82static void 82static void
83bfa_fcpim_initdone(struct bfa_s *bfa)
84{
85}
86
87static void
88bfa_fcpim_detach(struct bfa_s *bfa) 83bfa_fcpim_detach(struct bfa_s *bfa)
89{ 84{
90 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 85 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
@@ -172,4 +167,28 @@ bfa_fcpim_qdepth_get(struct bfa_s *bfa)
172 return fcpim->q_depth; 167 return fcpim->q_depth;
173} 168}
174 169
170void
171bfa_fcpim_update_ioredirect(struct bfa_s *bfa)
172{
173 bfa_boolean_t ioredirect;
174
175 /*
176 * IO redirection is turned off when QoS is enabled and vice versa
177 */
178 ioredirect = bfa_fcport_is_qos_enabled(bfa) ? BFA_FALSE : BFA_TRUE;
175 179
180 /*
181 * Notify the bfad module of a possible state change in
182 * IO redirection capability, due to a QoS state change. bfad will
183 * check on the support for io redirection and update the
184 * fcpim's ioredirect state accordingly.
185 */
186 bfa_cb_ioredirect_state_change((void *)(bfa->bfad), ioredirect);
187}
188
189void
190bfa_fcpim_set_ioredirect(struct bfa_s *bfa, bfa_boolean_t state)
191{
192 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
193 fcpim->ioredirect = state;
194}
diff --git a/drivers/scsi/bfa/bfa_fcpim_priv.h b/drivers/scsi/bfa/bfa_fcpim_priv.h
index 5cf418460f75..762516cb5cb2 100644
--- a/drivers/scsi/bfa/bfa_fcpim_priv.h
+++ b/drivers/scsi/bfa/bfa_fcpim_priv.h
@@ -49,7 +49,8 @@ struct bfa_fcpim_mod_s {
49 int num_tskim_reqs; 49 int num_tskim_reqs;
50 u32 path_tov; 50 u32 path_tov;
51 u16 q_depth; 51 u16 q_depth;
52 u16 rsvd; 52 u8 reqq; /* Request queue to be used */
53 u8 rsvd;
53 struct list_head itnim_q; /* queue of active itnim */ 54 struct list_head itnim_q; /* queue of active itnim */
54 struct list_head ioim_free_q; /* free IO resources */ 55 struct list_head ioim_free_q; /* free IO resources */
55 struct list_head ioim_resfree_q; /* IOs waiting for f/w */ 56 struct list_head ioim_resfree_q; /* IOs waiting for f/w */
@@ -58,6 +59,7 @@ struct bfa_fcpim_mod_s {
58 u32 ios_active; /* current active IOs */ 59 u32 ios_active; /* current active IOs */
59 u32 delay_comp; 60 u32 delay_comp;
60 struct bfa_fcpim_stats_s stats; 61 struct bfa_fcpim_stats_s stats;
62 bfa_boolean_t ioredirect;
61}; 63};
62 64
63struct bfa_ioim_s; 65struct bfa_ioim_s;
@@ -82,6 +84,7 @@ struct bfa_ioim_s {
82 struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */ 84 struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
83 bfa_cb_cbfn_t io_cbfn; /* IO completion handler */ 85 bfa_cb_cbfn_t io_cbfn; /* IO completion handler */
84 struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */ 86 struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */
87 u8 reqq; /* Request queue for I/O */
85}; 88};
86 89
87struct bfa_ioim_sp_s { 90struct bfa_ioim_sp_s {
@@ -141,6 +144,7 @@ struct bfa_itnim_s {
141 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ 144 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
142 struct bfa_fcpim_mod_s *fcpim; /* fcpim module */ 145 struct bfa_fcpim_mod_s *fcpim; /* fcpim module */
143 struct bfa_itnim_hal_stats_s stats; 146 struct bfa_itnim_hal_stats_s stats;
147 struct bfa_itnim_latency_s io_latency;
144}; 148};
145 149
146#define bfa_itnim_is_online(_itnim) ((_itnim)->is_online) 150#define bfa_itnim_is_online(_itnim) ((_itnim)->is_online)
diff --git a/drivers/scsi/bfa/bfa_fcport.c b/drivers/scsi/bfa/bfa_fcport.c
index c589488db0c1..f0933d8d1eda 100644
--- a/drivers/scsi/bfa/bfa_fcport.c
+++ b/drivers/scsi/bfa/bfa_fcport.c
@@ -18,6 +18,7 @@
18#include <bfa.h> 18#include <bfa.h>
19#include <bfa_svc.h> 19#include <bfa_svc.h>
20#include <bfi/bfi_pport.h> 20#include <bfi/bfi_pport.h>
21#include <bfi/bfi_pbc.h>
21#include <cs/bfa_debug.h> 22#include <cs/bfa_debug.h>
22#include <aen/bfa_aen.h> 23#include <aen/bfa_aen.h>
23#include <cs/bfa_plog.h> 24#include <cs/bfa_plog.h>
@@ -310,10 +311,12 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
310 311
311 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { 312 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
312 313
313 bfa_trc(fcport->bfa, pevent->link_state.fcf.fipenabled); 314 bfa_trc(fcport->bfa,
314 bfa_trc(fcport->bfa, pevent->link_state.fcf.fipfailed); 315 pevent->link_state.vc_fcf.fcf.fipenabled);
316 bfa_trc(fcport->bfa,
317 pevent->link_state.vc_fcf.fcf.fipfailed);
315 318
316 if (pevent->link_state.fcf.fipfailed) 319 if (pevent->link_state.vc_fcf.fcf.fipfailed)
317 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 320 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
318 BFA_PL_EID_FIP_FCF_DISC, 0, 321 BFA_PL_EID_FIP_FCF_DISC, 0,
319 "FIP FCF Discovery Failed"); 322 "FIP FCF Discovery Failed");
@@ -888,6 +891,7 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
888 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 891 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
889 struct bfa_pport_cfg_s *port_cfg = &fcport->cfg; 892 struct bfa_pport_cfg_s *port_cfg = &fcport->cfg;
890 struct bfa_fcport_ln_s *ln = &fcport->ln; 893 struct bfa_fcport_ln_s *ln = &fcport->ln;
894 struct bfa_timeval_s tv;
891 895
892 bfa_os_memset(fcport, 0, sizeof(struct bfa_fcport_s)); 896 bfa_os_memset(fcport, 0, sizeof(struct bfa_fcport_s));
893 fcport->bfa = bfa; 897 fcport->bfa = bfa;
@@ -899,6 +903,12 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
899 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn); 903 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
900 904
901 /** 905 /**
906 * initialize time stamp for stats reset
907 */
908 bfa_os_gettimeofday(&tv);
909 fcport->stats_reset_time = tv.tv_sec;
910
911 /**
902 * initialize and set default configuration 912 * initialize and set default configuration
903 */ 913 */
904 port_cfg->topology = BFA_PPORT_TOPOLOGY_P2P; 914 port_cfg->topology = BFA_PPORT_TOPOLOGY_P2P;
@@ -912,25 +922,6 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
912} 922}
913 923
914static void 924static void
915bfa_fcport_initdone(struct bfa_s *bfa)
916{
917 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
918
919 /**
920 * Initialize port attributes from IOC hardware data.
921 */
922 bfa_fcport_set_wwns(fcport);
923 if (fcport->cfg.maxfrsize == 0)
924 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
925 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
926 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
927
928 bfa_assert(fcport->cfg.maxfrsize);
929 bfa_assert(fcport->cfg.rx_bbcredit);
930 bfa_assert(fcport->speed_sup);
931}
932
933static void
934bfa_fcport_detach(struct bfa_s *bfa) 925bfa_fcport_detach(struct bfa_s *bfa)
935{ 926{
936} 927}
@@ -971,14 +962,15 @@ bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
971 fcport->topology = pevent->link_state.topology; 962 fcport->topology = pevent->link_state.topology;
972 963
973 if (fcport->topology == BFA_PPORT_TOPOLOGY_LOOP) 964 if (fcport->topology == BFA_PPORT_TOPOLOGY_LOOP)
974 fcport->myalpa = 965 fcport->myalpa = 0;
975 pevent->link_state.tl.loop_info.myalpa;
976 966
977 /* 967 /*
978 * QoS Details 968 * QoS Details
979 */ 969 */
980 bfa_os_assign(fcport->qos_attr, pevent->link_state.qos_attr); 970 bfa_os_assign(fcport->qos_attr, pevent->link_state.qos_attr);
981 bfa_os_assign(fcport->qos_vc_attr, pevent->link_state.qos_vc_attr); 971 bfa_os_assign(fcport->qos_vc_attr,
972 pevent->link_state.vc_fcf.qos_vc_attr);
973
982 974
983 bfa_trc(fcport->bfa, fcport->speed); 975 bfa_trc(fcport->bfa, fcport->speed);
984 bfa_trc(fcport->bfa, fcport->topology); 976 bfa_trc(fcport->bfa, fcport->topology);
@@ -1145,16 +1137,22 @@ __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
1145 1137
1146 if (complete) { 1138 if (complete) {
1147 if (fcport->stats_status == BFA_STATUS_OK) { 1139 if (fcport->stats_status == BFA_STATUS_OK) {
1140 struct bfa_timeval_s tv;
1148 1141
1149 /* Swap FC QoS or FCoE stats */ 1142 /* Swap FC QoS or FCoE stats */
1150 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) 1143 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
1151 bfa_fcport_qos_stats_swap( 1144 bfa_fcport_qos_stats_swap(
1152 &fcport->stats_ret->fcqos, 1145 &fcport->stats_ret->fcqos,
1153 &fcport->stats->fcqos); 1146 &fcport->stats->fcqos);
1154 else 1147 } else {
1155 bfa_fcport_fcoe_stats_swap( 1148 bfa_fcport_fcoe_stats_swap(
1156 &fcport->stats_ret->fcoe, 1149 &fcport->stats_ret->fcoe,
1157 &fcport->stats->fcoe); 1150 &fcport->stats->fcoe);
1151
1152 bfa_os_gettimeofday(&tv);
1153 fcport->stats_ret->fcoe.secs_reset =
1154 tv.tv_sec - fcport->stats_reset_time;
1155 }
1158 } 1156 }
1159 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); 1157 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
1160 } else { 1158 } else {
@@ -1210,6 +1208,14 @@ __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
1210 struct bfa_fcport_s *fcport = cbarg; 1208 struct bfa_fcport_s *fcport = cbarg;
1211 1209
1212 if (complete) { 1210 if (complete) {
1211 struct bfa_timeval_s tv;
1212
1213 /**
1214 * re-initialize time stamp for stats reset
1215 */
1216 bfa_os_gettimeofday(&tv);
1217 fcport->stats_reset_time = tv.tv_sec;
1218
1213 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); 1219 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
1214 } else { 1220 } else {
1215 fcport->stats_busy = BFA_FALSE; 1221 fcport->stats_busy = BFA_FALSE;
@@ -1263,6 +1269,29 @@ bfa_fcport_send_stats_clear(void *cbarg)
1263 */ 1269 */
1264 1270
1265/** 1271/**
1272 * Called to initialize port attributes
1273 */
1274void
1275bfa_fcport_init(struct bfa_s *bfa)
1276{
1277 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1278
1279 /**
1280 * Initialize port attributes from IOC hardware data.
1281 */
1282 bfa_fcport_set_wwns(fcport);
1283 if (fcport->cfg.maxfrsize == 0)
1284 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
1285 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
1286 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
1287
1288 bfa_assert(fcport->cfg.maxfrsize);
1289 bfa_assert(fcport->cfg.rx_bbcredit);
1290 bfa_assert(fcport->speed_sup);
1291}
1292
1293
1294/**
1266 * Firmware message handler. 1295 * Firmware message handler.
1267 */ 1296 */
1268void 1297void
@@ -1355,6 +1384,17 @@ bfa_status_t
1355bfa_fcport_enable(struct bfa_s *bfa) 1384bfa_fcport_enable(struct bfa_s *bfa)
1356{ 1385{
1357 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1386 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1387 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1388 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1389
1390 /* if port is PBC disabled, return error */
1391 if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
1392 bfa_trc(bfa, fcport->pwwn);
1393 return BFA_STATUS_PBC;
1394 }
1395
1396 if (bfa_ioc_is_disabled(&bfa->ioc))
1397 return BFA_STATUS_IOC_DISABLED;
1358 1398
1359 if (fcport->diag_busy) 1399 if (fcport->diag_busy)
1360 return BFA_STATUS_DIAG_BUSY; 1400 return BFA_STATUS_DIAG_BUSY;
@@ -1369,6 +1409,16 @@ bfa_fcport_enable(struct bfa_s *bfa)
1369bfa_status_t 1409bfa_status_t
1370bfa_fcport_disable(struct bfa_s *bfa) 1410bfa_fcport_disable(struct bfa_s *bfa)
1371{ 1411{
1412 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1413 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1414 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1415
1416 /* if port is PBC disabled, return error */
1417 if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
1418 bfa_trc(bfa, fcport->pwwn);
1419 return BFA_STATUS_PBC;
1420 }
1421
1372 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE); 1422 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
1373 return BFA_STATUS_OK; 1423 return BFA_STATUS_OK;
1374} 1424}
@@ -1559,12 +1609,17 @@ void
1559bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr) 1609bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr)
1560{ 1610{
1561 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1611 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1612 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1613 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1562 1614
1563 bfa_os_memset(attr, 0, sizeof(struct bfa_pport_attr_s)); 1615 bfa_os_memset(attr, 0, sizeof(struct bfa_pport_attr_s));
1564 1616
1565 attr->nwwn = fcport->nwwn; 1617 attr->nwwn = fcport->nwwn;
1566 attr->pwwn = fcport->pwwn; 1618 attr->pwwn = fcport->pwwn;
1567 1619
1620 attr->factorypwwn = bfa_ioc_get_mfg_pwwn(&bfa->ioc);
1621 attr->factorynwwn = bfa_ioc_get_mfg_nwwn(&bfa->ioc);
1622
1568 bfa_os_memcpy(&attr->pport_cfg, &fcport->cfg, 1623 bfa_os_memcpy(&attr->pport_cfg, &fcport->cfg,
1569 sizeof(struct bfa_pport_cfg_s)); 1624 sizeof(struct bfa_pport_cfg_s));
1570 /* 1625 /*
@@ -1590,11 +1645,18 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr)
1590 1645
1591 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa); 1646 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
1592 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa); 1647 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
1593 attr->port_state = bfa_sm_to_state(hal_pport_sm_table, fcport->sm); 1648
1594 if (bfa_ioc_is_disabled(&fcport->bfa->ioc)) 1649 /* PBC Disabled State */
1595 attr->port_state = BFA_PPORT_ST_IOCDIS; 1650 if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED)
1596 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc)) 1651 attr->port_state = BFA_PPORT_ST_PREBOOT_DISABLED;
1597 attr->port_state = BFA_PPORT_ST_FWMISMATCH; 1652 else {
1653 attr->port_state = bfa_sm_to_state(
1654 hal_pport_sm_table, fcport->sm);
1655 if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
1656 attr->port_state = BFA_PPORT_ST_IOCDIS;
1657 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
1658 attr->port_state = BFA_PPORT_ST_FWMISMATCH;
1659 }
1598} 1660}
1599 1661
1600#define BFA_FCPORT_STATS_TOV 1000 1662#define BFA_FCPORT_STATS_TOV 1000
@@ -1801,8 +1863,13 @@ bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
1801 1863
1802 bfa_trc(bfa, ioc_type); 1864 bfa_trc(bfa, ioc_type);
1803 1865
1804 if (ioc_type == BFA_IOC_TYPE_FC) 1866 if (ioc_type == BFA_IOC_TYPE_FC) {
1805 fcport->cfg.qos_enabled = on_off; 1867 fcport->cfg.qos_enabled = on_off;
1868 /**
1869 * Notify fcpim of the change in QoS state
1870 */
1871 bfa_fcpim_update_ioredirect(bfa);
1872 }
1806} 1873}
1807 1874
1808void 1875void
@@ -1886,4 +1953,10 @@ bfa_fcport_is_linkup(struct bfa_s *bfa)
1886 return bfa_sm_cmp_state(BFA_FCPORT_MOD(bfa), bfa_fcport_sm_linkup); 1953 return bfa_sm_cmp_state(BFA_FCPORT_MOD(bfa), bfa_fcport_sm_linkup);
1887} 1954}
1888 1955
1956bfa_boolean_t
1957bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
1958{
1959 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1889 1960
1961 return fcport->cfg.qos_enabled;
1962}
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index 3516172c597c..3ec2f49de61d 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -99,14 +99,22 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
99void 99void
100bfa_fcs_init(struct bfa_fcs_s *fcs) 100bfa_fcs_init(struct bfa_fcs_s *fcs)
101{ 101{
102 int i; 102 int i, npbc_vports;
103 struct bfa_fcs_mod_s *mod; 103 struct bfa_fcs_mod_s *mod;
104 struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS];
104 105
105 for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) { 106 for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
106 mod = &fcs_modules[i]; 107 mod = &fcs_modules[i];
107 if (mod->modinit) 108 if (mod->modinit)
108 mod->modinit(fcs); 109 mod->modinit(fcs);
109 } 110 }
111 /* Initialize pbc vports */
112 if (!fcs->min_cfg) {
113 npbc_vports =
114 bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports);
115 for (i = 0; i < npbc_vports; i++)
116 bfa_fcb_pbc_vport_create(fcs->bfa->bfad, pbc_vports[i]);
117 }
110} 118}
111 119
112/** 120/**
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 7c1251c682d8..35df20e68a52 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -135,6 +135,9 @@ bfa_fcs_port_sm_init(struct bfa_fcs_port_s *port, enum bfa_fcs_port_event event)
135 bfa_fcs_port_deleted(port); 135 bfa_fcs_port_deleted(port);
136 break; 136 break;
137 137
138 case BFA_FCS_PORT_SM_OFFLINE:
139 break;
140
138 default: 141 default:
139 bfa_sm_fault(port->fcs, event); 142 bfa_sm_fault(port->fcs, event);
140 } 143 }
diff --git a/drivers/scsi/bfa/bfa_fcxp.c b/drivers/scsi/bfa/bfa_fcxp.c
index cf0ad6782686..8258f88bfee6 100644
--- a/drivers/scsi/bfa/bfa_fcxp.c
+++ b/drivers/scsi/bfa/bfa_fcxp.c
@@ -149,11 +149,6 @@ bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
149} 149}
150 150
151static void 151static void
152bfa_fcxp_initdone(struct bfa_s *bfa)
153{
154}
155
156static void
157bfa_fcxp_detach(struct bfa_s *bfa) 152bfa_fcxp_detach(struct bfa_s *bfa)
158{ 153{
159} 154}
@@ -225,7 +220,7 @@ bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
225 bfa_status_t req_status, u32 rsp_len, 220 bfa_status_t req_status, u32 rsp_len,
226 u32 resid_len, struct fchs_s *rsp_fchs) 221 u32 resid_len, struct fchs_s *rsp_fchs)
227{ 222{
228 /**discarded fcxp completion */ 223 /* discarded fcxp completion */
229} 224}
230 225
231static void 226static void
@@ -527,11 +522,8 @@ bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
527 if (nreq_sgles > BFI_SGE_INLINE) { 522 if (nreq_sgles > BFI_SGE_INLINE) {
528 nreq_sgpg = BFA_SGPG_NPAGE(nreq_sgles); 523 nreq_sgpg = BFA_SGPG_NPAGE(nreq_sgles);
529 524
530 if (bfa_sgpg_malloc 525 if (bfa_sgpg_malloc(bfa, &fcxp->req_sgpg_q, nreq_sgpg)
531 (bfa, &fcxp->req_sgpg_q, nreq_sgpg)
532 != BFA_STATUS_OK) { 526 != BFA_STATUS_OK) {
533 /* bfa_sgpg_wait(bfa, &fcxp->req_sgpg_wqe,
534 nreq_sgpg); */
535 /* 527 /*
536 * TODO 528 * TODO
537 */ 529 */
@@ -685,7 +677,7 @@ bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
685 fcxp->send_cbarg = cbarg; 677 fcxp->send_cbarg = cbarg;
686 678
687 /** 679 /**
688 * If no room in CPE queue, wait for 680 * If no room in CPE queue, wait for space in request queue
689 */ 681 */
690 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP); 682 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
691 if (!send_req) { 683 if (!send_req) {
diff --git a/drivers/scsi/bfa/bfa_fwimg_priv.h b/drivers/scsi/bfa/bfa_fwimg_priv.h
index 1ec1355924d9..d33e19e54395 100644
--- a/drivers/scsi/bfa/bfa_fwimg_priv.h
+++ b/drivers/scsi/bfa/bfa_fwimg_priv.h
@@ -21,11 +21,24 @@
21#define BFI_FLASH_CHUNK_SZ 256 /* Flash chunk size */ 21#define BFI_FLASH_CHUNK_SZ 256 /* Flash chunk size */
22#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32)) 22#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
23 23
24extern u32 *bfi_image_ct_get_chunk(u32 off); 24/**
25extern u32 bfi_image_ct_size; 25 * BFI FW image type
26extern u32 *bfi_image_cb_get_chunk(u32 off); 26 */
27extern u32 bfi_image_cb_size; 27enum {
28extern u32 *bfi_image_cb; 28 BFI_IMAGE_CB_FC,
29extern u32 *bfi_image_ct; 29 BFI_IMAGE_CT_FC,
30 BFI_IMAGE_CT_CNA,
31 BFI_IMAGE_MAX,
32};
33
34extern u32 *bfi_image_get_chunk(int type, uint32_t off);
35extern u32 bfi_image_get_size(int type);
36extern u32 bfi_image_ct_fc_size;
37extern u32 bfi_image_ct_cna_size;
38extern u32 bfi_image_cb_fc_size;
39extern u32 *bfi_image_ct_fc;
40extern u32 *bfi_image_ct_cna;
41extern u32 *bfi_image_cb_fc;
42
30 43
31#endif /* __BFA_FWIMG_PRIV_H__ */ 44#endif /* __BFA_FWIMG_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
index 871a4e28575c..edfd729445cf 100644
--- a/drivers/scsi/bfa/bfa_hw_cb.c
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -152,4 +152,9 @@ bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
152 bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix; 152 bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
153} 153}
154 154
155 155void
156bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end)
157{
158 *start = BFA_MSIX_RME_Q0;
159 *end = BFA_MSIX_RME_Q7;
160}
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
index 76ceb9a4bf2f..a357fb3066fd 100644
--- a/drivers/scsi/bfa/bfa_hw_ct.c
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -168,4 +168,9 @@ bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
168 bfa_ioc_isr_mode_set(&bfa->ioc, msix); 168 bfa_ioc_isr_mode_set(&bfa->ioc, msix);
169} 169}
170 170
171 171void
172bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end)
173{
174 *start = BFA_MSIX_RME_Q0;
175 *end = BFA_MSIX_RME_Q3;
176}
diff --git a/drivers/scsi/bfa/bfa_intr.c b/drivers/scsi/bfa/bfa_intr.c
index 0eba3f930d5b..493678889b24 100644
--- a/drivers/scsi/bfa/bfa_intr.c
+++ b/drivers/scsi/bfa/bfa_intr.c
@@ -134,6 +134,7 @@ bfa_isr_enable(struct bfa_s *bfa)
134 134
135 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr_unmask); 135 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr_unmask);
136 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, ~intr_unmask); 136 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, ~intr_unmask);
137 bfa->iocfc.intr_mask = ~intr_unmask;
137 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0); 138 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
138} 139}
139 140
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index e038bc9769f6..8e78f20110a5 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -59,22 +59,18 @@ BFA_TRC_FILE(CNA, IOC);
59 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc)) 59 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
60#define bfa_ioc_firmware_unlock(__ioc) \ 60#define bfa_ioc_firmware_unlock(__ioc) \
61 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) 61 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
62#define bfa_ioc_fwimg_get_chunk(__ioc, __off) \
63 ((__ioc)->ioc_hwif->ioc_fwimg_get_chunk(__ioc, __off))
64#define bfa_ioc_fwimg_get_size(__ioc) \
65 ((__ioc)->ioc_hwif->ioc_fwimg_get_size(__ioc))
66#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) 62#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
67#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) 63#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
68#define bfa_ioc_notify_hbfail(__ioc) \ 64#define bfa_ioc_notify_hbfail(__ioc) \
69 ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc)) 65 ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
66#define bfa_ioc_is_optrom(__ioc) \
67 (bfi_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
70 68
71bfa_boolean_t bfa_auto_recover = BFA_TRUE; 69bfa_boolean_t bfa_auto_recover = BFA_TRUE;
72 70
73/* 71/*
74 * forward declarations 72 * forward declarations
75 */ 73 */
76static void bfa_ioc_aen_post(struct bfa_ioc_s *bfa,
77 enum bfa_ioc_aen_event event);
78static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc); 74static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
79static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc); 75static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
80static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force); 76static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
@@ -88,6 +84,7 @@ static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
88static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc); 84static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
89static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc); 85static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
90static void bfa_ioc_recover(struct bfa_ioc_s *ioc); 86static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
87static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
91static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); 88static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
92static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc); 89static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
93 90
@@ -433,6 +430,7 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
433 switch (event) { 430 switch (event) {
434 case IOC_E_FWRSP_GETATTR: 431 case IOC_E_FWRSP_GETATTR:
435 bfa_ioc_timer_stop(ioc); 432 bfa_ioc_timer_stop(ioc);
433 bfa_ioc_check_attr_wwns(ioc);
436 bfa_fsm_set_state(ioc, bfa_ioc_sm_op); 434 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
437 break; 435 break;
438 436
@@ -879,8 +877,8 @@ bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
879 struct bfi_ioc_image_hdr_s *drv_fwhdr; 877 struct bfi_ioc_image_hdr_s *drv_fwhdr;
880 int i; 878 int i;
881 879
882 drv_fwhdr = 880 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
883 (struct bfi_ioc_image_hdr_s *)bfa_ioc_fwimg_get_chunk(ioc, 0); 881 bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
884 882
885 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { 883 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
886 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) { 884 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
@@ -907,12 +905,13 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc)
907 /** 905 /**
908 * If bios/efi boot (flash based) -- return true 906 * If bios/efi boot (flash based) -- return true
909 */ 907 */
910 if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ) 908 if (bfa_ioc_is_optrom(ioc))
911 return BFA_TRUE; 909 return BFA_TRUE;
912 910
913 bfa_ioc_fwver_get(ioc, &fwhdr); 911 bfa_ioc_fwver_get(ioc, &fwhdr);
914 drv_fwhdr = 912 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
915 (struct bfi_ioc_image_hdr_s *)bfa_ioc_fwimg_get_chunk(ioc, 0); 913 bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
914
916 915
917 if (fwhdr.signature != drv_fwhdr->signature) { 916 if (fwhdr.signature != drv_fwhdr->signature) {
918 bfa_trc(ioc, fwhdr.signature); 917 bfa_trc(ioc, fwhdr.signature);
@@ -980,8 +979,13 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
980 /** 979 /**
981 * If IOC function is disabled and firmware version is same, 980 * If IOC function is disabled and firmware version is same,
982 * just re-enable IOC. 981 * just re-enable IOC.
982 *
983 * If option rom, IOC must not be in operational state. With
984 * convergence, IOC will be in operational state when 2nd driver
985 * is loaded.
983 */ 986 */
984 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) { 987 if (ioc_fwstate == BFI_IOC_DISABLED ||
988 (!bfa_ioc_is_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
985 bfa_trc(ioc, ioc_fwstate); 989 bfa_trc(ioc, ioc_fwstate);
986 990
987 /** 991 /**
@@ -1125,21 +1129,22 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1125 /** 1129 /**
1126 * Flash based firmware boot 1130 * Flash based firmware boot
1127 */ 1131 */
1128 bfa_trc(ioc, bfa_ioc_fwimg_get_size(ioc)); 1132 bfa_trc(ioc, bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
1129 if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ) 1133 if (bfa_ioc_is_optrom(ioc))
1130 boot_type = BFI_BOOT_TYPE_FLASH; 1134 boot_type = BFI_BOOT_TYPE_FLASH;
1131 fwimg = bfa_ioc_fwimg_get_chunk(ioc, chunkno); 1135 fwimg = bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1136
1132 1137
1133 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1138 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1134 pgoff = bfa_ioc_smem_pgoff(ioc, loff); 1139 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1135 1140
1136 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); 1141 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1137 1142
1138 for (i = 0; i < bfa_ioc_fwimg_get_size(ioc); i++) { 1143 for (i = 0; i < bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1139 1144
1140 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { 1145 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1141 chunkno = BFA_IOC_FLASH_CHUNK_NO(i); 1146 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1142 fwimg = bfa_ioc_fwimg_get_chunk(ioc, 1147 fwimg = bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
1143 BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); 1148 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1144 } 1149 }
1145 1150
@@ -1188,6 +1193,7 @@ bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1188 struct bfi_ioc_attr_s *attr = ioc->attr; 1193 struct bfi_ioc_attr_s *attr = ioc->attr;
1189 1194
1190 attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop); 1195 attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop);
1196 attr->card_type = bfa_os_ntohl(attr->card_type);
1191 attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize); 1197 attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize);
1192 1198
1193 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); 1199 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
@@ -1282,6 +1288,7 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param)
1282 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_INITING); 1288 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_INITING);
1283 } 1289 }
1284 1290
1291 bfa_ioc_msgflush(ioc);
1285 bfa_ioc_download_fw(ioc, boot_type, boot_param); 1292 bfa_ioc_download_fw(ioc, boot_type, boot_param);
1286 1293
1287 /** 1294 /**
@@ -1416,7 +1423,7 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
1416{ 1423{
1417 ioc->ioc_mc = mc; 1424 ioc->ioc_mc = mc;
1418 ioc->pcidev = *pcidev; 1425 ioc->pcidev = *pcidev;
1419 ioc->ctdev = (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT); 1426 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
1420 ioc->cna = ioc->ctdev && !ioc->fcmode; 1427 ioc->cna = ioc->ctdev && !ioc->fcmode;
1421 1428
1422 /** 1429 /**
@@ -1607,6 +1614,13 @@ bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
1607 bfa_fsm_send_event(ioc, IOC_E_HWERROR); 1614 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
1608} 1615}
1609 1616
1617void
1618bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
1619{
1620 ioc->fcmode = BFA_TRUE;
1621 ioc->port_id = bfa_ioc_pcifn(ioc);
1622}
1623
1610#ifndef BFA_BIOS_BUILD 1624#ifndef BFA_BIOS_BUILD
1611 1625
1612/** 1626/**
@@ -1696,6 +1710,9 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
1696 /* For now, model descr uses same model string */ 1710 /* For now, model descr uses same model string */
1697 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr); 1711 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
1698 1712
1713 ad_attr->card_type = ioc_attr->card_type;
1714 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
1715
1699 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop)) 1716 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
1700 ad_attr->prototype = 1; 1717 ad_attr->prototype = 1;
1701 else 1718 else
@@ -1779,28 +1796,17 @@ void
1779bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model) 1796bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
1780{ 1797{
1781 struct bfi_ioc_attr_s *ioc_attr; 1798 struct bfi_ioc_attr_s *ioc_attr;
1782 u8 nports;
1783 u8 max_speed;
1784 1799
1785 bfa_assert(model); 1800 bfa_assert(model);
1786 bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN); 1801 bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
1787 1802
1788 ioc_attr = ioc->attr; 1803 ioc_attr = ioc->attr;
1789 1804
1790 nports = bfa_ioc_get_nports(ioc);
1791 max_speed = bfa_ioc_speed_sup(ioc);
1792
1793 /** 1805 /**
1794 * model name 1806 * model name
1795 */ 1807 */
1796 if (max_speed == 10) { 1808 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
1797 strcpy(model, "BR-10?0"); 1809 BFA_MFG_NAME, ioc_attr->card_type);
1798 model[5] = '0' + nports;
1799 } else {
1800 strcpy(model, "Brocade-??5");
1801 model[8] = '0' + max_speed;
1802 model[9] = '0' + nports;
1803 }
1804} 1810}
1805 1811
1806enum bfa_ioc_state 1812enum bfa_ioc_state
@@ -1827,78 +1833,54 @@ bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
1827} 1833}
1828 1834
1829/** 1835/**
1830 * hal_wwn_public 1836 * bfa_wwn_public
1831 */ 1837 */
1832wwn_t 1838wwn_t
1833bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc) 1839bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc)
1834{ 1840{
1835 union { 1841 return ioc->attr->pwwn;
1836 wwn_t wwn;
1837 u8 byte[sizeof(wwn_t)];
1838 }
1839 w;
1840
1841 w.wwn = ioc->attr->mfg_wwn;
1842
1843 if (bfa_ioc_portid(ioc) == 1)
1844 w.byte[7]++;
1845
1846 return w.wwn;
1847} 1842}
1848 1843
1849wwn_t 1844wwn_t
1850bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc) 1845bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc)
1851{ 1846{
1852 union { 1847 return ioc->attr->nwwn;
1853 wwn_t wwn; 1848}
1854 u8 byte[sizeof(wwn_t)];
1855 }
1856 w;
1857
1858 w.wwn = ioc->attr->mfg_wwn;
1859
1860 if (bfa_ioc_portid(ioc) == 1)
1861 w.byte[7]++;
1862 1849
1863 w.byte[0] = 0x20; 1850u64
1851bfa_ioc_get_adid(struct bfa_ioc_s *ioc)
1852{
1853 return ioc->attr->mfg_pwwn;
1854}
1864 1855
1865 return w.wwn; 1856mac_t
1857bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
1858{
1859 /*
1860 * Currently mfg mac is used as FCoE enode mac (not configured by PBC)
1861 */
1862 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
1863 return bfa_ioc_get_mfg_mac(ioc);
1864 else
1865 return ioc->attr->mac;
1866} 1866}
1867 1867
1868wwn_t 1868wwn_t
1869bfa_ioc_get_wwn_naa5(struct bfa_ioc_s *ioc, u16 inst) 1869bfa_ioc_get_mfg_pwwn(struct bfa_ioc_s *ioc)
1870{ 1870{
1871 union { 1871 return ioc->attr->mfg_pwwn;
1872 wwn_t wwn;
1873 u8 byte[sizeof(wwn_t)];
1874 }
1875 w , w5;
1876
1877 bfa_trc(ioc, inst);
1878
1879 w.wwn = ioc->attr->mfg_wwn;
1880 w5.byte[0] = 0x50 | w.byte[2] >> 4;
1881 w5.byte[1] = w.byte[2] << 4 | w.byte[3] >> 4;
1882 w5.byte[2] = w.byte[3] << 4 | w.byte[4] >> 4;
1883 w5.byte[3] = w.byte[4] << 4 | w.byte[5] >> 4;
1884 w5.byte[4] = w.byte[5] << 4 | w.byte[6] >> 4;
1885 w5.byte[5] = w.byte[6] << 4 | w.byte[7] >> 4;
1886 w5.byte[6] = w.byte[7] << 4 | (inst & 0x0f00) >> 8;
1887 w5.byte[7] = (inst & 0xff);
1888
1889 return w5.wwn;
1890} 1872}
1891 1873
1892u64 1874wwn_t
1893bfa_ioc_get_adid(struct bfa_ioc_s *ioc) 1875bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc)
1894{ 1876{
1895 return ioc->attr->mfg_wwn; 1877 return ioc->attr->mfg_nwwn;
1896} 1878}
1897 1879
1898mac_t 1880mac_t
1899bfa_ioc_get_mac(struct bfa_ioc_s *ioc) 1881bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
1900{ 1882{
1901 mac_t mac; 1883 mac_t mac;
1902 1884
1903 mac = ioc->attr->mfg_mac; 1885 mac = ioc->attr->mfg_mac;
1904 mac.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc); 1886 mac.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
@@ -1906,23 +1888,16 @@ bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
1906 return mac; 1888 return mac;
1907} 1889}
1908 1890
1909void
1910bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
1911{
1912 ioc->fcmode = BFA_TRUE;
1913 ioc->port_id = bfa_ioc_pcifn(ioc);
1914}
1915
1916bfa_boolean_t 1891bfa_boolean_t
1917bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc) 1892bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
1918{ 1893{
1919 return ioc->fcmode || (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_CT); 1894 return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
1920} 1895}
1921 1896
1922/** 1897/**
1923 * Send AEN notification 1898 * Send AEN notification
1924 */ 1899 */
1925static void 1900void
1926bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event) 1901bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
1927{ 1902{
1928 union bfa_aen_data_u aen_data; 1903 union bfa_aen_data_u aen_data;
@@ -2070,19 +2045,16 @@ bfa_ioc_recover(struct bfa_ioc_s *ioc)
2070 bfa_fsm_send_event(ioc, IOC_E_HBFAIL); 2045 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2071} 2046}
2072 2047
2073#else
2074
2075static void 2048static void
2076bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event) 2049bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
2077{ 2050{
2078} 2051 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2052 return;
2079 2053
2080static void 2054 if (ioc->attr->nwwn == 0)
2081bfa_ioc_recover(struct bfa_ioc_s *ioc) 2055 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN);
2082{ 2056 if (ioc->attr->pwwn == 0)
2083 bfa_assert(0); 2057 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN);
2084} 2058}
2085 2059
2086#endif 2060#endif
2087
2088
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index d0804406ea1a..cae05b251c99 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -186,9 +186,6 @@ struct bfa_ioc_hwif_s {
186 bfa_status_t (*ioc_pll_init) (struct bfa_ioc_s *ioc); 186 bfa_status_t (*ioc_pll_init) (struct bfa_ioc_s *ioc);
187 bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc); 187 bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc);
188 void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc); 188 void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc);
189 u32 * (*ioc_fwimg_get_chunk) (struct bfa_ioc_s *ioc,
190 u32 off);
191 u32 (*ioc_fwimg_get_size) (struct bfa_ioc_s *ioc);
192 void (*ioc_reg_init) (struct bfa_ioc_s *ioc); 189 void (*ioc_reg_init) (struct bfa_ioc_s *ioc);
193 void (*ioc_map_port) (struct bfa_ioc_s *ioc); 190 void (*ioc_map_port) (struct bfa_ioc_s *ioc);
194 void (*ioc_isr_mode_set) (struct bfa_ioc_s *ioc, 191 void (*ioc_isr_mode_set) (struct bfa_ioc_s *ioc,
@@ -214,6 +211,10 @@ struct bfa_ioc_hwif_s {
214 211
215#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++) 212#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++)
216#define BFA_IOC_FWIMG_MINSZ (16 * 1024) 213#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
214#define BFA_IOC_FWIMG_TYPE(__ioc) \
215 (((__ioc)->ctdev) ? \
216 (((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) : \
217 BFI_IMAGE_CB_FC)
217 218
218#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS) 219#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
219#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) 220#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
@@ -296,14 +297,17 @@ void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
296 struct bfi_ioc_image_hdr_s *fwhdr); 297 struct bfi_ioc_image_hdr_s *fwhdr);
297bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, 298bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
298 struct bfi_ioc_image_hdr_s *fwhdr); 299 struct bfi_ioc_image_hdr_s *fwhdr);
300void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event);
299 301
300/* 302/*
301 * bfa mfg wwn API functions 303 * bfa mfg wwn API functions
302 */ 304 */
303wwn_t bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc); 305wwn_t bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc);
304wwn_t bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc); 306wwn_t bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc);
305wwn_t bfa_ioc_get_wwn_naa5(struct bfa_ioc_s *ioc, u16 inst);
306mac_t bfa_ioc_get_mac(struct bfa_ioc_s *ioc); 307mac_t bfa_ioc_get_mac(struct bfa_ioc_s *ioc);
308wwn_t bfa_ioc_get_mfg_pwwn(struct bfa_ioc_s *ioc);
309wwn_t bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc);
310mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc);
307u64 bfa_ioc_get_adid(struct bfa_ioc_s *ioc); 311u64 bfa_ioc_get_adid(struct bfa_ioc_s *ioc);
308 312
309#endif /* __BFA_IOC_H__ */ 313#endif /* __BFA_IOC_H__ */
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
index 3ce85319f739..324bdde7ea2e 100644
--- a/drivers/scsi/bfa/bfa_ioc_cb.c
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -33,26 +33,13 @@ BFA_TRC_FILE(CNA, IOC_CB);
33static bfa_status_t bfa_ioc_cb_pll_init(struct bfa_ioc_s *ioc); 33static bfa_status_t bfa_ioc_cb_pll_init(struct bfa_ioc_s *ioc);
34static bfa_boolean_t bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc); 34static bfa_boolean_t bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc);
35static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc); 35static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc);
36static u32 *bfa_ioc_cb_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off);
37static u32 bfa_ioc_cb_fwimg_get_size(struct bfa_ioc_s *ioc);
38static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc); 36static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc);
39static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc); 37static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc);
40static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix); 38static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
41static void bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc); 39static void bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc);
42static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc); 40static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
43 41
44struct bfa_ioc_hwif_s hwif_cb = { 42struct bfa_ioc_hwif_s hwif_cb;
45 bfa_ioc_cb_pll_init,
46 bfa_ioc_cb_firmware_lock,
47 bfa_ioc_cb_firmware_unlock,
48 bfa_ioc_cb_fwimg_get_chunk,
49 bfa_ioc_cb_fwimg_get_size,
50 bfa_ioc_cb_reg_init,
51 bfa_ioc_cb_map_port,
52 bfa_ioc_cb_isr_mode_set,
53 bfa_ioc_cb_notify_hbfail,
54 bfa_ioc_cb_ownership_reset,
55};
56 43
57/** 44/**
58 * Called from bfa_ioc_attach() to map asic specific calls. 45 * Called from bfa_ioc_attach() to map asic specific calls.
@@ -60,19 +47,16 @@ struct bfa_ioc_hwif_s hwif_cb = {
60void 47void
61bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc) 48bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
62{ 49{
63 ioc->ioc_hwif = &hwif_cb; 50 hwif_cb.ioc_pll_init = bfa_ioc_cb_pll_init;
64} 51 hwif_cb.ioc_firmware_lock = bfa_ioc_cb_firmware_lock;
65 52 hwif_cb.ioc_firmware_unlock = bfa_ioc_cb_firmware_unlock;
66static u32 * 53 hwif_cb.ioc_reg_init = bfa_ioc_cb_reg_init;
67bfa_ioc_cb_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off) 54 hwif_cb.ioc_map_port = bfa_ioc_cb_map_port;
68{ 55 hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set;
69 return bfi_image_cb_get_chunk(off); 56 hwif_cb.ioc_notify_hbfail = bfa_ioc_cb_notify_hbfail;
70} 57 hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset;
71 58
72static u32 59 ioc->ioc_hwif = &hwif_cb;
73bfa_ioc_cb_fwimg_get_size(struct bfa_ioc_s *ioc)
74{
75 return bfi_image_cb_size;
76} 60}
77 61
78/** 62/**
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index 20b58ad5f95c..68f027da001e 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -33,27 +33,13 @@ BFA_TRC_FILE(CNA, IOC_CT);
33static bfa_status_t bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc); 33static bfa_status_t bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc);
34static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc); 34static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
35static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc); 35static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
36static u32* bfa_ioc_ct_fwimg_get_chunk(struct bfa_ioc_s *ioc,
37 u32 off);
38static u32 bfa_ioc_ct_fwimg_get_size(struct bfa_ioc_s *ioc);
39static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc); 36static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc);
40static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc); 37static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
41static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix); 38static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
42static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc); 39static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc);
43static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc); 40static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
44 41
45struct bfa_ioc_hwif_s hwif_ct = { 42struct bfa_ioc_hwif_s hwif_ct;
46 bfa_ioc_ct_pll_init,
47 bfa_ioc_ct_firmware_lock,
48 bfa_ioc_ct_firmware_unlock,
49 bfa_ioc_ct_fwimg_get_chunk,
50 bfa_ioc_ct_fwimg_get_size,
51 bfa_ioc_ct_reg_init,
52 bfa_ioc_ct_map_port,
53 bfa_ioc_ct_isr_mode_set,
54 bfa_ioc_ct_notify_hbfail,
55 bfa_ioc_ct_ownership_reset,
56};
57 43
58/** 44/**
59 * Called from bfa_ioc_attach() to map asic specific calls. 45 * Called from bfa_ioc_attach() to map asic specific calls.
@@ -61,19 +47,16 @@ struct bfa_ioc_hwif_s hwif_ct = {
61void 47void
62bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc) 48bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
63{ 49{
64 ioc->ioc_hwif = &hwif_ct; 50 hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
65} 51 hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
52 hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
53 hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
54 hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
55 hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
56 hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail;
57 hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
66 58
67static u32* 59 ioc->ioc_hwif = &hwif_ct;
68bfa_ioc_ct_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off)
69{
70 return bfi_image_ct_get_chunk(off);
71}
72
73static u32
74bfa_ioc_ct_fwimg_get_size(struct bfa_ioc_s *ioc)
75{
76 return bfi_image_ct_size;
77} 60}
78 61
79/** 62/**
@@ -95,7 +78,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
95 /** 78 /**
96 * If bios boot (flash based) -- do not increment usage count 79 * If bios boot (flash based) -- do not increment usage count
97 */ 80 */
98 if (bfa_ioc_ct_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ) 81 if (bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < BFA_IOC_FWIMG_MINSZ)
99 return BFA_TRUE; 82 return BFA_TRUE;
100 83
101 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 84 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
@@ -146,9 +129,14 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
146 129
147 /** 130 /**
148 * Firmware lock is relevant only for CNA. 131 * Firmware lock is relevant only for CNA.
132 */
133 if (!ioc->cna)
134 return;
135
136 /**
149 * If bios boot (flash based) -- do not decrement usage count 137 * If bios boot (flash based) -- do not decrement usage count
150 */ 138 */
151 if (!ioc->cna || bfa_ioc_ct_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ) 139 if (bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < BFA_IOC_FWIMG_MINSZ)
152 return; 140 return;
153 141
154 /** 142 /**
@@ -388,10 +376,35 @@ bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc)
388 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk | 376 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk |
389 __APP_PLL_425_ENABLE); 377 __APP_PLL_425_ENABLE);
390 378
379 /**
380 * PSS memory reset is asserted at power-on-reset. Need to clear
381 * this before running EDRAM BISTR
382 */
383 if (ioc->cna) {
384 bfa_reg_write((rb + PMM_1T_RESET_REG_P0), __PMM_1T_RESET_P);
385 bfa_reg_write((rb + PMM_1T_RESET_REG_P1), __PMM_1T_RESET_P);
386 }
387
388 r32 = bfa_reg_read((rb + PSS_CTL_REG));
389 r32 &= ~__PSS_LMEM_RESET;
390 bfa_reg_write((rb + PSS_CTL_REG), r32);
391 bfa_os_udelay(1000);
392
393 if (ioc->cna) {
394 bfa_reg_write((rb + PMM_1T_RESET_REG_P0), 0);
395 bfa_reg_write((rb + PMM_1T_RESET_REG_P1), 0);
396 }
397
391 bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START); 398 bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START);
392 bfa_os_udelay(1000); 399 bfa_os_udelay(1000);
393 r32 = bfa_reg_read((rb + MBIST_STAT_REG)); 400 r32 = bfa_reg_read((rb + MBIST_STAT_REG));
394 bfa_trc(ioc, r32); 401 bfa_trc(ioc, r32);
402
403 /**
404 * Clear BISTR
405 */
406 bfa_reg_write((rb + MBIST_CTL_REG), 0);
407
395 /* 408 /*
396 * release semaphore. 409 * release semaphore.
397 */ 410 */
diff --git a/drivers/scsi/bfa/bfa_iocfc.c b/drivers/scsi/bfa/bfa_iocfc.c
index a76de2669bfc..90820be99864 100644
--- a/drivers/scsi/bfa/bfa_iocfc.c
+++ b/drivers/scsi/bfa/bfa_iocfc.c
@@ -113,7 +113,6 @@ bfa_iocfc_send_cfg(void *bfa_arg)
113 bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS); 113 bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS);
114 bfa_trc(bfa, cfg->fwcfg.num_cqs); 114 bfa_trc(bfa, cfg->fwcfg.num_cqs);
115 115
116 iocfc->cfgdone = BFA_FALSE;
117 bfa_iocfc_reset_queues(bfa); 116 bfa_iocfc_reset_queues(bfa);
118 117
119 /** 118 /**
@@ -145,6 +144,15 @@ bfa_iocfc_send_cfg(void *bfa_arg)
145 } 144 }
146 145
147 /** 146 /**
147 * Enable interrupt coalescing if it is driver init path
148 * and not ioc disable/enable path.
149 */
150 if (!iocfc->cfgdone)
151 cfg_info->intr_attr.coalesce = BFA_TRUE;
152
153 iocfc->cfgdone = BFA_FALSE;
154
155 /**
148 * dma map IOC configuration itself 156 * dma map IOC configuration itself
149 */ 157 */
150 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ, 158 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
@@ -170,7 +178,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
170 /** 178 /**
171 * Initialize chip specific handlers. 179 * Initialize chip specific handlers.
172 */ 180 */
173 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT) { 181 if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) {
174 iocfc->hwif.hw_reginit = bfa_hwct_reginit; 182 iocfc->hwif.hw_reginit = bfa_hwct_reginit;
175 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack; 183 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
176 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack; 184 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
@@ -179,6 +187,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
179 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall; 187 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
180 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set; 188 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
181 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs; 189 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
190 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
182 } else { 191 } else {
183 iocfc->hwif.hw_reginit = bfa_hwcb_reginit; 192 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
184 iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack; 193 iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
@@ -188,6 +197,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
188 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall; 197 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
189 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set; 198 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
190 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs; 199 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
200 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
191 } 201 }
192 202
193 iocfc->hwif.hw_reginit(bfa); 203 iocfc->hwif.hw_reginit(bfa);
@@ -291,18 +301,6 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
291} 301}
292 302
293/** 303/**
294 * BFA submodules initialization completion notification.
295 */
296static void
297bfa_iocfc_initdone_submod(struct bfa_s *bfa)
298{
299 int i;
300
301 for (i = 0; hal_mods[i]; i++)
302 hal_mods[i]->initdone(bfa);
303}
304
305/**
306 * Start BFA submodules. 304 * Start BFA submodules.
307 */ 305 */
308static void 306static void
@@ -376,7 +374,6 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
376 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 374 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
377 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; 375 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
378 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg; 376 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
379 struct bfi_iocfc_cfg_s *cfginfo = iocfc->cfginfo;
380 377
381 fwcfg->num_cqs = fwcfg->num_cqs; 378 fwcfg->num_cqs = fwcfg->num_cqs;
382 fwcfg->num_ioim_reqs = bfa_os_ntohs(fwcfg->num_ioim_reqs); 379 fwcfg->num_ioim_reqs = bfa_os_ntohs(fwcfg->num_ioim_reqs);
@@ -385,15 +382,13 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
385 fwcfg->num_uf_bufs = bfa_os_ntohs(fwcfg->num_uf_bufs); 382 fwcfg->num_uf_bufs = bfa_os_ntohs(fwcfg->num_uf_bufs);
386 fwcfg->num_rports = bfa_os_ntohs(fwcfg->num_rports); 383 fwcfg->num_rports = bfa_os_ntohs(fwcfg->num_rports);
387 384
388 cfginfo->intr_attr.coalesce = cfgrsp->intr_attr.coalesce;
389 cfginfo->intr_attr.delay = bfa_os_ntohs(cfgrsp->intr_attr.delay);
390 cfginfo->intr_attr.latency = bfa_os_ntohs(cfgrsp->intr_attr.latency);
391
392 iocfc->cfgdone = BFA_TRUE; 385 iocfc->cfgdone = BFA_TRUE;
393 386
394 /** 387 /**
395 * Configuration is complete - initialize/start submodules 388 * Configuration is complete - initialize/start submodules
396 */ 389 */
390 bfa_fcport_init(bfa);
391
397 if (iocfc->action == BFA_IOCFC_ACT_INIT) 392 if (iocfc->action == BFA_IOCFC_ACT_INIT)
398 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa); 393 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
399 else 394 else
@@ -531,7 +526,6 @@ bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
531 return; 526 return;
532 } 527 }
533 528
534 bfa_iocfc_initdone_submod(bfa);
535 bfa_iocfc_send_cfg(bfa); 529 bfa_iocfc_send_cfg(bfa);
536} 530}
537 531
@@ -625,9 +619,9 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
625 bfa->trcmod, bfa->aen, bfa->logm); 619 bfa->trcmod, bfa->aen, bfa->logm);
626 620
627 /** 621 /**
628 * Choose FC (ssid: 0x1C) v/s FCoE (ssid: 0x14) mode. 622 * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
629 */ 623 */
630 if (0) 624 if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
631 bfa_ioc_set_fcmode(&bfa->ioc); 625 bfa_ioc_set_fcmode(&bfa->ioc);
632 626
633 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC); 627 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
@@ -748,10 +742,20 @@ bfa_adapter_get_id(struct bfa_s *bfa)
748void 742void
749bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr) 743bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
750{ 744{
751 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 745 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
746
747 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
748
749 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
750 bfa_os_ntohs(iocfc->cfginfo->intr_attr.delay) :
751 bfa_os_ntohs(iocfc->cfgrsp->intr_attr.delay);
752
753 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
754 bfa_os_ntohs(iocfc->cfginfo->intr_attr.latency) :
755 bfa_os_ntohs(iocfc->cfgrsp->intr_attr.latency);
756
757 attr->config = iocfc->cfg;
752 758
753 attr->intr_attr = iocfc->cfginfo->intr_attr;
754 attr->config = iocfc->cfg;
755} 759}
756 760
757bfa_status_t 761bfa_status_t
@@ -760,7 +764,10 @@ bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
760 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 764 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
761 struct bfi_iocfc_set_intr_req_s *m; 765 struct bfi_iocfc_set_intr_req_s *m;
762 766
763 iocfc->cfginfo->intr_attr = *attr; 767 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
768 iocfc->cfginfo->intr_attr.delay = bfa_os_htons(attr->delay);
769 iocfc->cfginfo->intr_attr.latency = bfa_os_htons(attr->latency);
770
764 if (!bfa_iocfc_is_operational(bfa)) 771 if (!bfa_iocfc_is_operational(bfa))
765 return BFA_STATUS_OK; 772 return BFA_STATUS_OK;
766 773
@@ -770,9 +777,10 @@ bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
770 777
771 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ, 778 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
772 bfa_lpuid(bfa)); 779 bfa_lpuid(bfa));
773 m->coalesce = attr->coalesce; 780 m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
774 m->delay = bfa_os_htons(attr->delay); 781 m->delay = iocfc->cfginfo->intr_attr.delay;
775 m->latency = bfa_os_htons(attr->latency); 782 m->latency = iocfc->cfginfo->intr_attr.latency;
783
776 784
777 bfa_trc(bfa, attr->delay); 785 bfa_trc(bfa, attr->delay);
778 bfa_trc(bfa, attr->latency); 786 bfa_trc(bfa, attr->latency);
@@ -872,15 +880,48 @@ bfa_iocfc_is_operational(struct bfa_s *bfa)
872 * Return boot target port wwns -- read from boot information in flash. 880 * Return boot target port wwns -- read from boot information in flash.
873 */ 881 */
874void 882void
875bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t **wwns) 883bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
876{ 884{
877 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 885 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
878 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; 886 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
887 int i;
888
889 if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
890 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
891 *nwwns = cfgrsp->pbc_cfg.nbluns;
892 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
893 wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
894
895 return;
896 }
879 897
880 *nwwns = cfgrsp->bootwwns.nwwns; 898 *nwwns = cfgrsp->bootwwns.nwwns;
881 *wwns = cfgrsp->bootwwns.wwn; 899 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
900}
901
902void
903bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa, struct bfa_boot_pbc_s *pbcfg)
904{
905 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
906 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
907
908 pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
909 pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
910 pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
911 memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
882} 912}
883 913
914int
915bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
916{
917 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
918 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
919
920 memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
921 return cfgrsp->pbc_cfg.nvports;
922}
923
924
884#endif 925#endif
885 926
886 927
diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
index fbb4bdc9d600..74a6a048d1fd 100644
--- a/drivers/scsi/bfa/bfa_iocfc.h
+++ b/drivers/scsi/bfa/bfa_iocfc.h
@@ -21,6 +21,7 @@
21#include <bfa_ioc.h> 21#include <bfa_ioc.h>
22#include <bfa.h> 22#include <bfa.h>
23#include <bfi/bfi_iocfc.h> 23#include <bfi/bfi_iocfc.h>
24#include <bfi/bfi_pbc.h>
24#include <bfa_callback_priv.h> 25#include <bfa_callback_priv.h>
25 26
26#define BFA_REQQ_NELEMS_MIN (4) 27#define BFA_REQQ_NELEMS_MIN (4)
@@ -62,6 +63,8 @@ struct bfa_hwif_s {
62 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix); 63 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
63 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap, 64 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
64 u32 *nvecs, u32 *maxvec); 65 u32 *nvecs, u32 *maxvec);
66 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
67 u32 *end);
65}; 68};
66typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status); 69typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
67 70
@@ -103,7 +106,8 @@ struct bfa_iocfc_s {
103 struct bfa_hwif_s hwif; 106 struct bfa_hwif_s hwif;
104 107
105 bfa_cb_iocfc_t updateq_cbfn; /* bios callback function */ 108 bfa_cb_iocfc_t updateq_cbfn; /* bios callback function */
106 void *updateq_cbarg; /* bios callback arg */ 109 void *updateq_cbarg; /* bios callback arg */
110 u32 intr_mask;
107}; 111};
108 112
109#define bfa_lpuid(__bfa) bfa_ioc_portid(&(__bfa)->ioc) 113#define bfa_lpuid(__bfa) bfa_ioc_portid(&(__bfa)->ioc)
@@ -116,7 +120,10 @@ struct bfa_iocfc_s {
116#define bfa_isr_mode_set(__bfa, __msix) \ 120#define bfa_isr_mode_set(__bfa, __msix) \
117 ((__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix)) 121 ((__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix))
118#define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) \ 122#define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) \
119 (__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) 123 ((__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap, \
124 __nvecs, __maxvec))
125#define bfa_msix_get_rme_range(__bfa, __start, __end) \
126 ((__bfa)->iocfc.hwif.hw_msix_get_rme_range(__bfa, __start, __end))
120 127
121/* 128/*
122 * FC specific IOC functions. 129 * FC specific IOC functions.
@@ -152,6 +159,7 @@ void bfa_hwcb_msix_uninstall(struct bfa_s *bfa);
152void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix); 159void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
153void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, 160void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap,
154 u32 *nvecs, u32 *maxvec); 161 u32 *nvecs, u32 *maxvec);
162void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end);
155void bfa_hwct_reginit(struct bfa_s *bfa); 163void bfa_hwct_reginit(struct bfa_s *bfa);
156void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq); 164void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
157void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq); 165void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq);
@@ -161,11 +169,16 @@ void bfa_hwct_msix_uninstall(struct bfa_s *bfa);
161void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix); 169void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
162void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, 170void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap,
163 u32 *nvecs, u32 *maxvec); 171 u32 *nvecs, u32 *maxvec);
172void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end);
164 173
165void bfa_com_meminfo(bfa_boolean_t mincfg, u32 *dm_len); 174void bfa_com_meminfo(bfa_boolean_t mincfg, u32 *dm_len);
166void bfa_com_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi, 175void bfa_com_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi,
167 bfa_boolean_t mincfg); 176 bfa_boolean_t mincfg);
168void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t **wwns); 177void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns);
178void bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa,
179 struct bfa_boot_pbc_s *pbcfg);
180int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
181 struct bfi_pbc_vport_s *pbc_vport);
169 182
170#endif /* __BFA_IOCFC_H__ */ 183#endif /* __BFA_IOCFC_H__ */
171 184
diff --git a/drivers/scsi/bfa/bfa_ioim.c b/drivers/scsi/bfa/bfa_ioim.c
index 687f3d6e252b..4148ae09f998 100644
--- a/drivers/scsi/bfa/bfa_ioim.c
+++ b/drivers/scsi/bfa/bfa_ioim.c
@@ -133,6 +133,8 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
133 133
134 case BFA_IOIM_SM_IOTOV: 134 case BFA_IOIM_SM_IOTOV:
135 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 135 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
136 list_del(&ioim->qe);
137 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
136 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, 138 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
137 __bfa_cb_ioim_pathtov, ioim); 139 __bfa_cb_ioim_pathtov, ioim);
138 break; 140 break;
@@ -182,6 +184,8 @@ bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
182 case BFA_IOIM_SM_ABORT: 184 case BFA_IOIM_SM_ABORT:
183 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 185 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
184 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe); 186 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
187 list_del(&ioim->qe);
188 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
185 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, 189 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
186 ioim); 190 ioim);
187 break; 191 break;
@@ -189,6 +193,8 @@ bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
189 case BFA_IOIM_SM_HWFAIL: 193 case BFA_IOIM_SM_HWFAIL:
190 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 194 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
191 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe); 195 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
196 list_del(&ioim->qe);
197 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
192 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, 198 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
193 ioim); 199 ioim);
194 break; 200 break;
@@ -210,18 +216,24 @@ bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
210 switch (event) { 216 switch (event) {
211 case BFA_IOIM_SM_COMP_GOOD: 217 case BFA_IOIM_SM_COMP_GOOD:
212 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 218 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
219 list_del(&ioim->qe);
220 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
213 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, 221 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
214 __bfa_cb_ioim_good_comp, ioim); 222 __bfa_cb_ioim_good_comp, ioim);
215 break; 223 break;
216 224
217 case BFA_IOIM_SM_COMP: 225 case BFA_IOIM_SM_COMP:
218 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 226 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
227 list_del(&ioim->qe);
228 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
219 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp, 229 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
220 ioim); 230 ioim);
221 break; 231 break;
222 232
223 case BFA_IOIM_SM_DONE: 233 case BFA_IOIM_SM_DONE:
224 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); 234 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
235 list_del(&ioim->qe);
236 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
225 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp, 237 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
226 ioim); 238 ioim);
227 break; 239 break;
@@ -234,8 +246,8 @@ bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
234 bfa_sm_set_state(ioim, bfa_ioim_sm_abort); 246 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
235 else { 247 else {
236 bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull); 248 bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
237 bfa_reqq_wait(ioim->bfa, ioim->itnim->reqq, 249 bfa_reqq_wait(ioim->bfa, ioim->reqq,
238 &ioim->iosp->reqq_wait); 250 &ioim->iosp->reqq_wait);
239 } 251 }
240 break; 252 break;
241 253
@@ -247,13 +259,15 @@ bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
247 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); 259 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
248 else { 260 else {
249 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); 261 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
250 bfa_reqq_wait(ioim->bfa, ioim->itnim->reqq, 262 bfa_reqq_wait(ioim->bfa, ioim->reqq,
251 &ioim->iosp->reqq_wait); 263 &ioim->iosp->reqq_wait);
252 } 264 }
253 break; 265 break;
254 266
255 case BFA_IOIM_SM_HWFAIL: 267 case BFA_IOIM_SM_HWFAIL:
256 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 268 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
269 list_del(&ioim->qe);
270 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
257 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, 271 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
258 ioim); 272 ioim);
259 break; 273 break;
@@ -287,12 +301,16 @@ bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
287 301
288 case BFA_IOIM_SM_ABORT_COMP: 302 case BFA_IOIM_SM_ABORT_COMP:
289 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 303 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
304 list_del(&ioim->qe);
305 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
290 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, 306 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
291 ioim); 307 ioim);
292 break; 308 break;
293 309
294 case BFA_IOIM_SM_COMP_UTAG: 310 case BFA_IOIM_SM_COMP_UTAG:
295 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 311 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
312 list_del(&ioim->qe);
313 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
296 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, 314 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
297 ioim); 315 ioim);
298 break; 316 break;
@@ -305,13 +323,15 @@ bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
305 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); 323 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
306 else { 324 else {
307 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); 325 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
308 bfa_reqq_wait(ioim->bfa, ioim->itnim->reqq, 326 bfa_reqq_wait(ioim->bfa, ioim->reqq,
309 &ioim->iosp->reqq_wait); 327 &ioim->iosp->reqq_wait);
310 } 328 }
311 break; 329 break;
312 330
313 case BFA_IOIM_SM_HWFAIL: 331 case BFA_IOIM_SM_HWFAIL:
314 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 332 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
333 list_del(&ioim->qe);
334 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
315 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, 335 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
316 ioim); 336 ioim);
317 break; 337 break;
@@ -365,6 +385,8 @@ bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
365 385
366 case BFA_IOIM_SM_HWFAIL: 386 case BFA_IOIM_SM_HWFAIL:
367 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 387 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
388 list_del(&ioim->qe);
389 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
368 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, 390 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
369 ioim); 391 ioim);
370 break; 392 break;
@@ -399,6 +421,8 @@ bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
399 case BFA_IOIM_SM_ABORT: 421 case BFA_IOIM_SM_ABORT:
400 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 422 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
401 bfa_reqq_wcancel(&ioim->iosp->reqq_wait); 423 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
424 list_del(&ioim->qe);
425 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
402 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, 426 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
403 ioim); 427 ioim);
404 break; 428 break;
@@ -414,6 +438,8 @@ bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
414 case BFA_IOIM_SM_HWFAIL: 438 case BFA_IOIM_SM_HWFAIL:
415 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 439 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
416 bfa_reqq_wcancel(&ioim->iosp->reqq_wait); 440 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
441 list_del(&ioim->qe);
442 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
417 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, 443 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
418 ioim); 444 ioim);
419 break; 445 break;
@@ -448,6 +474,8 @@ bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
448 case BFA_IOIM_SM_COMP: 474 case BFA_IOIM_SM_COMP:
449 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 475 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
450 bfa_reqq_wcancel(&ioim->iosp->reqq_wait); 476 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
477 list_del(&ioim->qe);
478 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
451 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, 479 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
452 ioim); 480 ioim);
453 break; 481 break;
@@ -455,6 +483,8 @@ bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
455 case BFA_IOIM_SM_DONE: 483 case BFA_IOIM_SM_DONE:
456 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); 484 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
457 bfa_reqq_wcancel(&ioim->iosp->reqq_wait); 485 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
486 list_del(&ioim->qe);
487 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
458 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, 488 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
459 ioim); 489 ioim);
460 break; 490 break;
@@ -462,6 +492,8 @@ bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
462 case BFA_IOIM_SM_HWFAIL: 492 case BFA_IOIM_SM_HWFAIL:
463 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 493 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
464 bfa_reqq_wcancel(&ioim->iosp->reqq_wait); 494 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
495 list_del(&ioim->qe);
496 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
465 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, 497 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
466 ioim); 498 ioim);
467 break; 499 break;
@@ -511,6 +543,8 @@ bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
511 case BFA_IOIM_SM_HWFAIL: 543 case BFA_IOIM_SM_HWFAIL:
512 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 544 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
513 bfa_reqq_wcancel(&ioim->iosp->reqq_wait); 545 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
546 list_del(&ioim->qe);
547 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
514 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, 548 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
515 ioim); 549 ioim);
516 break; 550 break;
@@ -738,9 +772,9 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
738 /** 772 /**
739 * check for room in queue to send request now 773 * check for room in queue to send request now
740 */ 774 */
741 m = bfa_reqq_next(ioim->bfa, itnim->reqq); 775 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
742 if (!m) { 776 if (!m) {
743 bfa_reqq_wait(ioim->bfa, ioim->itnim->reqq, 777 bfa_reqq_wait(ioim->bfa, ioim->reqq,
744 &ioim->iosp->reqq_wait); 778 &ioim->iosp->reqq_wait);
745 return BFA_FALSE; 779 return BFA_FALSE;
746 } 780 }
@@ -832,7 +866,7 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
832 /** 866 /**
833 * queue I/O message to firmware 867 * queue I/O message to firmware
834 */ 868 */
835 bfa_reqq_produce(ioim->bfa, itnim->reqq); 869 bfa_reqq_produce(ioim->bfa, ioim->reqq);
836 return BFA_TRUE; 870 return BFA_TRUE;
837} 871}
838 872
@@ -930,14 +964,13 @@ bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
930static bfa_boolean_t 964static bfa_boolean_t
931bfa_ioim_send_abort(struct bfa_ioim_s *ioim) 965bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
932{ 966{
933 struct bfa_itnim_s *itnim = ioim->itnim;
934 struct bfi_ioim_abort_req_s *m; 967 struct bfi_ioim_abort_req_s *m;
935 enum bfi_ioim_h2i msgop; 968 enum bfi_ioim_h2i msgop;
936 969
937 /** 970 /**
938 * check for room in queue to send request now 971 * check for room in queue to send request now
939 */ 972 */
940 m = bfa_reqq_next(ioim->bfa, itnim->reqq); 973 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
941 if (!m) 974 if (!m)
942 return BFA_FALSE; 975 return BFA_FALSE;
943 976
@@ -956,7 +989,7 @@ bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
956 /** 989 /**
957 * queue I/O message to firmware 990 * queue I/O message to firmware
958 */ 991 */
959 bfa_reqq_produce(ioim->bfa, itnim->reqq); 992 bfa_reqq_produce(ioim->bfa, ioim->reqq);
960 return BFA_TRUE; 993 return BFA_TRUE;
961} 994}
962 995
@@ -1306,6 +1339,14 @@ void
1306bfa_ioim_start(struct bfa_ioim_s *ioim) 1339bfa_ioim_start(struct bfa_ioim_s *ioim)
1307{ 1340{
1308 bfa_trc_fp(ioim->bfa, ioim->iotag); 1341 bfa_trc_fp(ioim->bfa, ioim->iotag);
1342
1343 /**
1344 * Obtain the queue over which this request has to be issued
1345 */
1346 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
1347 bfa_cb_ioim_get_reqq(ioim->dio) :
1348 bfa_itnim_get_reqq(ioim);
1349
1309 bfa_sm_send_event(ioim, BFA_IOIM_SM_START); 1350 bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
1310} 1351}
1311 1352
diff --git a/drivers/scsi/bfa/bfa_log_module.c b/drivers/scsi/bfa/bfa_log_module.c
index 5c154d341d69..cf577ef7cb97 100644
--- a/drivers/scsi/bfa/bfa_log_module.c
+++ b/drivers/scsi/bfa/bfa_log_module.c
@@ -110,6 +110,27 @@ struct bfa_log_msgdef_s bfa_log_msg_array[] = {
110 "Running firmware version is incompatible with the driver version.", 110 "Running firmware version is incompatible with the driver version.",
111 (0), 0}, 111 (0), 0},
112 112
113{BFA_AEN_IOC_FWCFG_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
114 BFA_LOG_CRITICAL, "BFA_AEN_IOC_FWCFG_ERROR",
115 "Link initialization failed due to firmware configuration read error:"
116 " WWN = %s.",
117 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
118
119{BFA_AEN_IOC_INVALID_VENDOR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
120 BFA_LOG_ERROR, "BFA_AEN_IOC_INVALID_VENDOR",
121 "Unsupported switch vendor. Link initialization failed: WWN = %s.",
122 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
123
124{BFA_AEN_IOC_INVALID_NWWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
125 BFA_LOG_ERROR, "BFA_AEN_IOC_INVALID_NWWN",
126 "Invalid NWWN. Link initialization failed: NWWN = 00:00:00:00:00:00:00:00.",
127 (0), 0},
128
129{BFA_AEN_IOC_INVALID_PWWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
130 BFA_LOG_ERROR, "BFA_AEN_IOC_INVALID_PWWN",
131 "Invalid PWWN. Link initialization failed: PWWN = 00:00:00:00:00:00:00:00.",
132 (0), 0},
133
113 134
114 135
115 136
@@ -347,6 +368,22 @@ struct bfa_log_msgdef_s bfa_log_msg_array[] = {
347 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) | 368 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) |
348 (BFA_LOG_D << BFA_LOG_ARG2) | 0), 3}, 369 (BFA_LOG_D << BFA_LOG_ARG2) | 0), 3},
349 370
371{BFA_LOG_HAL_DRIVER_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
372 BFA_LOG_INFO, "HAL_DRIVER_ERROR",
373 "%s",
374 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
375
376{BFA_LOG_HAL_DRIVER_CONFIG_ERROR,
377 BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
378 "HAL_DRIVER_CONFIG_ERROR",
379 "%s",
380 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
381
382{BFA_LOG_HAL_MBOX_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
383 BFA_LOG_INFO, "HAL_MBOX_ERROR",
384 "%s",
385 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
386
350 387
351 388
352 389
@@ -412,6 +449,55 @@ struct bfa_log_msgdef_s bfa_log_msg_array[] = {
412 ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_P << BFA_LOG_ARG1) | 449 ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_P << BFA_LOG_ARG1) |
413 (BFA_LOG_X << BFA_LOG_ARG2) | 0), 3}, 450 (BFA_LOG_X << BFA_LOG_ARG2) | 0), 3},
414 451
452{BFA_LOG_LINUX_DRIVER_CONFIG_ERROR,
453 BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
454 "LINUX_DRIVER_CONFIG_ERROR",
455 "%s",
456 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
457
458{BFA_LOG_LINUX_BNA_STATE_MACHINE,
459 BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
460 "LINUX_BNA_STATE_MACHINE",
461 "%s",
462 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
463
464{BFA_LOG_LINUX_IOC_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
465 BFA_LOG_INFO, "LINUX_IOC_ERROR",
466 "%s",
467 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
468
469{BFA_LOG_LINUX_RESOURCE_ALLOC_ERROR,
470 BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
471 "LINUX_RESOURCE_ALLOC_ERROR",
472 "%s",
473 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
474
475{BFA_LOG_LINUX_RING_BUFFER_ERROR,
476 BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
477 "LINUX_RING_BUFFER_ERROR",
478 "%s",
479 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
480
481{BFA_LOG_LINUX_DRIVER_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
482 BFA_LOG_ERROR, "LINUX_DRIVER_ERROR",
483 "%s",
484 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
485
486{BFA_LOG_LINUX_DRIVER_INFO, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
487 BFA_LOG_INFO, "LINUX_DRIVER_INFO",
488 "%s",
489 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
490
491{BFA_LOG_LINUX_DRIVER_DIAG, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
492 BFA_LOG_INFO, "LINUX_DRIVER_DIAG",
493 "%s",
494 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
495
496{BFA_LOG_LINUX_DRIVER_AEN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
497 BFA_LOG_INFO, "LINUX_DRIVER_AEN",
498 "%s",
499 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
500
415 501
416 502
417 503
diff --git a/drivers/scsi/bfa/bfa_lps.c b/drivers/scsi/bfa/bfa_lps.c
index ad06f6189092..acabb44f092f 100644
--- a/drivers/scsi/bfa/bfa_lps.c
+++ b/drivers/scsi/bfa/bfa_lps.c
@@ -41,7 +41,6 @@ static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
41 struct bfa_iocfc_cfg_s *cfg, 41 struct bfa_iocfc_cfg_s *cfg,
42 struct bfa_meminfo_s *meminfo, 42 struct bfa_meminfo_s *meminfo,
43 struct bfa_pcidev_s *pcidev); 43 struct bfa_pcidev_s *pcidev);
44static void bfa_lps_initdone(struct bfa_s *bfa);
45static void bfa_lps_detach(struct bfa_s *bfa); 44static void bfa_lps_detach(struct bfa_s *bfa);
46static void bfa_lps_start(struct bfa_s *bfa); 45static void bfa_lps_start(struct bfa_s *bfa);
47static void bfa_lps_stop(struct bfa_s *bfa); 46static void bfa_lps_stop(struct bfa_s *bfa);
@@ -347,11 +346,6 @@ bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
347} 346}
348 347
349static void 348static void
350bfa_lps_initdone(struct bfa_s *bfa)
351{
352}
353
354static void
355bfa_lps_detach(struct bfa_s *bfa) 349bfa_lps_detach(struct bfa_s *bfa)
356{ 350{
357} 351}
diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c
index cab19028361a..c7e69f1e56e3 100644
--- a/drivers/scsi/bfa/bfa_port.c
+++ b/drivers/scsi/bfa/bfa_port.c
@@ -102,9 +102,14 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
102 port->stats_busy = BFA_FALSE; 102 port->stats_busy = BFA_FALSE;
103 103
104 if (status == BFA_STATUS_OK) { 104 if (status == BFA_STATUS_OK) {
105 struct bfa_timeval_s tv;
106
105 memcpy(port->stats, port->stats_dma.kva, 107 memcpy(port->stats, port->stats_dma.kva,
106 sizeof(union bfa_pport_stats_u)); 108 sizeof(union bfa_pport_stats_u));
107 bfa_port_stats_swap(port, port->stats); 109 bfa_port_stats_swap(port, port->stats);
110
111 bfa_os_gettimeofday(&tv);
112 port->stats->fc.secs_reset = tv.tv_sec - port->stats_reset_time;
108 } 113 }
109 114
110 if (port->stats_cbfn) { 115 if (port->stats_cbfn) {
@@ -125,9 +130,17 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
125static void 130static void
126bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status) 131bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
127{ 132{
133 struct bfa_timeval_s tv;
134
128 port->stats_status = status; 135 port->stats_status = status;
129 port->stats_busy = BFA_FALSE; 136 port->stats_busy = BFA_FALSE;
130 137
138 /**
139 * re-initialize time stamp for stats reset
140 */
141 bfa_os_gettimeofday(&tv);
142 port->stats_reset_time = tv.tv_sec;
143
131 if (port->stats_cbfn) { 144 if (port->stats_cbfn) {
132 port->stats_cbfn(port->stats_cbarg, status); 145 port->stats_cbfn(port->stats_cbarg, status);
133 port->stats_cbfn = NULL; 146 port->stats_cbfn = NULL;
@@ -394,7 +407,7 @@ bfa_port_hbfail(void *arg)
394 */ 407 */
395 if (port->stats_busy) { 408 if (port->stats_busy) {
396 if (port->stats_cbfn) 409 if (port->stats_cbfn)
397 port->stats_cbfn(port->dev, BFA_STATUS_FAILED); 410 port->stats_cbfn(port->stats_cbarg, BFA_STATUS_FAILED);
398 port->stats_cbfn = NULL; 411 port->stats_cbfn = NULL;
399 port->stats_busy = BFA_FALSE; 412 port->stats_busy = BFA_FALSE;
400 } 413 }
@@ -404,7 +417,7 @@ bfa_port_hbfail(void *arg)
404 */ 417 */
405 if (port->endis_pending) { 418 if (port->endis_pending) {
406 if (port->endis_cbfn) 419 if (port->endis_cbfn)
407 port->endis_cbfn(port->dev, BFA_STATUS_FAILED); 420 port->endis_cbfn(port->endis_cbarg, BFA_STATUS_FAILED);
408 port->endis_cbfn = NULL; 421 port->endis_cbfn = NULL;
409 port->endis_pending = BFA_FALSE; 422 port->endis_pending = BFA_FALSE;
410 } 423 }
@@ -428,6 +441,8 @@ void
428bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, void *dev, 441bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, void *dev,
429 struct bfa_trc_mod_s *trcmod, struct bfa_log_mod_s *logmod) 442 struct bfa_trc_mod_s *trcmod, struct bfa_log_mod_s *logmod)
430{ 443{
444 struct bfa_timeval_s tv;
445
431 bfa_assert(port); 446 bfa_assert(port);
432 447
433 port->dev = dev; 448 port->dev = dev;
@@ -435,13 +450,21 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, void *dev,
435 port->trcmod = trcmod; 450 port->trcmod = trcmod;
436 port->logmod = logmod; 451 port->logmod = logmod;
437 452
438 port->stats_busy = port->endis_pending = BFA_FALSE; 453 port->stats_busy = BFA_FALSE;
439 port->stats_cbfn = port->endis_cbfn = NULL; 454 port->endis_pending = BFA_FALSE;
455 port->stats_cbfn = NULL;
456 port->endis_cbfn = NULL;
440 457
441 bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port); 458 bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port);
442 bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port); 459 bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port);
443 bfa_ioc_hbfail_register(port->ioc, &port->hbfail); 460 bfa_ioc_hbfail_register(port->ioc, &port->hbfail);
444 461
462 /**
463 * initialize time stamp for stats reset
464 */
465 bfa_os_gettimeofday(&tv);
466 port->stats_reset_time = tv.tv_sec;
467
445 bfa_trc(port, 0); 468 bfa_trc(port, 0);
446} 469}
447 470
diff --git a/drivers/scsi/bfa/bfa_port_priv.h b/drivers/scsi/bfa/bfa_port_priv.h
index 40e256ec67ff..c9ebe0426fa6 100644
--- a/drivers/scsi/bfa/bfa_port_priv.h
+++ b/drivers/scsi/bfa/bfa_port_priv.h
@@ -75,8 +75,9 @@ struct bfa_fcport_s {
75 bfa_status_t stats_status; /* stats/statsclr status */ 75 bfa_status_t stats_status; /* stats/statsclr status */
76 bfa_boolean_t stats_busy; /* outstanding stats/statsclr */ 76 bfa_boolean_t stats_busy; /* outstanding stats/statsclr */
77 bfa_boolean_t stats_qfull; 77 bfa_boolean_t stats_qfull;
78 u32 stats_reset_time; /* stats reset time stamp */
78 bfa_cb_pport_t stats_cbfn; /* driver callback function */ 79 bfa_cb_pport_t stats_cbfn; /* driver callback function */
79 void *stats_cbarg; /* *!< user callback arg */ 80 void *stats_cbarg; /* user callback arg */
80 bfa_boolean_t diag_busy; /* diag busy status */ 81 bfa_boolean_t diag_busy; /* diag busy status */
81 bfa_boolean_t beacon; /* port beacon status */ 82 bfa_boolean_t beacon; /* port beacon status */
82 bfa_boolean_t link_e2e_beacon; /* link beacon status */ 83 bfa_boolean_t link_e2e_beacon; /* link beacon status */
@@ -87,5 +88,7 @@ struct bfa_fcport_s {
87/* 88/*
88 * public functions 89 * public functions
89 */ 90 */
90void bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); 91void bfa_fcport_init(struct bfa_s *bfa);
92void bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
93
91#endif /* __BFA_PORT_PRIV_H__ */ 94#endif /* __BFA_PORT_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_priv.h b/drivers/scsi/bfa/bfa_priv.h
index be80fc7e1b0e..bf4939b1676c 100644
--- a/drivers/scsi/bfa/bfa_priv.h
+++ b/drivers/scsi/bfa/bfa_priv.h
@@ -37,7 +37,6 @@
37 void *bfad, struct bfa_iocfc_cfg_s *cfg, \ 37 void *bfad, struct bfa_iocfc_cfg_s *cfg, \
38 struct bfa_meminfo_s *meminfo, \ 38 struct bfa_meminfo_s *meminfo, \
39 struct bfa_pcidev_s *pcidev); \ 39 struct bfa_pcidev_s *pcidev); \
40 static void bfa_ ## __mod ## _initdone(struct bfa_s *bfa); \
41 static void bfa_ ## __mod ## _detach(struct bfa_s *bfa); \ 40 static void bfa_ ## __mod ## _detach(struct bfa_s *bfa); \
42 static void bfa_ ## __mod ## _start(struct bfa_s *bfa); \ 41 static void bfa_ ## __mod ## _start(struct bfa_s *bfa); \
43 static void bfa_ ## __mod ## _stop(struct bfa_s *bfa); \ 42 static void bfa_ ## __mod ## _stop(struct bfa_s *bfa); \
@@ -47,7 +46,6 @@
47 struct bfa_module_s hal_mod_ ## __mod = { \ 46 struct bfa_module_s hal_mod_ ## __mod = { \
48 bfa_ ## __mod ## _meminfo, \ 47 bfa_ ## __mod ## _meminfo, \
49 bfa_ ## __mod ## _attach, \ 48 bfa_ ## __mod ## _attach, \
50 bfa_ ## __mod ## _initdone, \
51 bfa_ ## __mod ## _detach, \ 49 bfa_ ## __mod ## _detach, \
52 bfa_ ## __mod ## _start, \ 50 bfa_ ## __mod ## _start, \
53 bfa_ ## __mod ## _stop, \ 51 bfa_ ## __mod ## _stop, \
@@ -69,7 +67,6 @@ struct bfa_module_s {
69 struct bfa_iocfc_cfg_s *cfg, 67 struct bfa_iocfc_cfg_s *cfg,
70 struct bfa_meminfo_s *meminfo, 68 struct bfa_meminfo_s *meminfo,
71 struct bfa_pcidev_s *pcidev); 69 struct bfa_pcidev_s *pcidev);
72 void (*initdone) (struct bfa_s *bfa);
73 void (*detach) (struct bfa_s *bfa); 70 void (*detach) (struct bfa_s *bfa);
74 void (*start) (struct bfa_s *bfa); 71 void (*start) (struct bfa_s *bfa);
75 void (*stop) (struct bfa_s *bfa); 72 void (*stop) (struct bfa_s *bfa);
diff --git a/drivers/scsi/bfa/bfa_rport.c b/drivers/scsi/bfa/bfa_rport.c
index 7c509fa244e4..ccd0680f6f16 100644
--- a/drivers/scsi/bfa/bfa_rport.c
+++ b/drivers/scsi/bfa/bfa_rport.c
@@ -636,11 +636,6 @@ bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
636} 636}
637 637
638static void 638static void
639bfa_rport_initdone(struct bfa_s *bfa)
640{
641}
642
643static void
644bfa_rport_detach(struct bfa_s *bfa) 639bfa_rport_detach(struct bfa_s *bfa)
645{ 640{
646} 641}
diff --git a/drivers/scsi/bfa/bfa_sgpg.c b/drivers/scsi/bfa/bfa_sgpg.c
index 279d8f9b8907..ae452c42e40e 100644
--- a/drivers/scsi/bfa/bfa_sgpg.c
+++ b/drivers/scsi/bfa/bfa_sgpg.c
@@ -94,11 +94,6 @@ bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
94} 94}
95 95
96static void 96static void
97bfa_sgpg_initdone(struct bfa_s *bfa)
98{
99}
100
101static void
102bfa_sgpg_detach(struct bfa_s *bfa) 97bfa_sgpg_detach(struct bfa_s *bfa)
103{ 98{
104} 99}
diff --git a/drivers/scsi/bfa/bfa_uf.c b/drivers/scsi/bfa/bfa_uf.c
index 4b3c2417d180..b9a9a686ef6a 100644
--- a/drivers/scsi/bfa/bfa_uf.c
+++ b/drivers/scsi/bfa/bfa_uf.c
@@ -170,11 +170,6 @@ bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
170} 170}
171 171
172static void 172static void
173bfa_uf_initdone(struct bfa_s *bfa)
174{
175}
176
177static void
178bfa_uf_detach(struct bfa_s *bfa) 173bfa_uf_detach(struct bfa_s *bfa)
179{ 174{
180} 175}
@@ -256,7 +251,10 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
256 (struct fchs_s *) buf, pld_w0); 251 (struct fchs_s *) buf, pld_w0);
257 } 252 }
258 253
259 bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf); 254 if (bfa->fcs)
255 __bfa_cb_uf_recv(uf, BFA_TRUE);
256 else
257 bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
260} 258}
261 259
262static void 260static void
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index d4fc4287ebd3..915a29d6c7ad 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -54,31 +54,62 @@ static int bfa_io_max_sge = BFAD_IO_MAX_SGE;
54static int log_level = BFA_LOG_WARNING; 54static int log_level = BFA_LOG_WARNING;
55static int ioc_auto_recover = BFA_TRUE; 55static int ioc_auto_recover = BFA_TRUE;
56static int ipfc_enable = BFA_FALSE; 56static int ipfc_enable = BFA_FALSE;
57static int ipfc_mtu = -1;
58static int fdmi_enable = BFA_TRUE; 57static int fdmi_enable = BFA_TRUE;
59int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH; 58int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH;
60int bfa_linkup_delay = -1; 59int bfa_linkup_delay = -1;
60int bfa_debugfs_enable = 1;
61 61
62module_param(os_name, charp, S_IRUGO | S_IWUSR); 62module_param(os_name, charp, S_IRUGO | S_IWUSR);
63MODULE_PARM_DESC(os_name, "OS name of the hba host machine");
63module_param(os_patch, charp, S_IRUGO | S_IWUSR); 64module_param(os_patch, charp, S_IRUGO | S_IWUSR);
65MODULE_PARM_DESC(os_patch, "OS patch level of the hba host machine");
64module_param(host_name, charp, S_IRUGO | S_IWUSR); 66module_param(host_name, charp, S_IRUGO | S_IWUSR);
67MODULE_PARM_DESC(host_name, "Hostname of the hba host machine");
65module_param(num_rports, int, S_IRUGO | S_IWUSR); 68module_param(num_rports, int, S_IRUGO | S_IWUSR);
69MODULE_PARM_DESC(num_rports, "Max number of rports supported per port"
70 " (physical/logical), default=1024");
66module_param(num_ios, int, S_IRUGO | S_IWUSR); 71module_param(num_ios, int, S_IRUGO | S_IWUSR);
72MODULE_PARM_DESC(num_ios, "Max number of ioim requests, default=2000");
67module_param(num_tms, int, S_IRUGO | S_IWUSR); 73module_param(num_tms, int, S_IRUGO | S_IWUSR);
74MODULE_PARM_DESC(num_tms, "Max number of task im requests, default=128");
68module_param(num_fcxps, int, S_IRUGO | S_IWUSR); 75module_param(num_fcxps, int, S_IRUGO | S_IWUSR);
76MODULE_PARM_DESC(num_fcxps, "Max number of fcxp requests, default=64");
69module_param(num_ufbufs, int, S_IRUGO | S_IWUSR); 77module_param(num_ufbufs, int, S_IRUGO | S_IWUSR);
78MODULE_PARM_DESC(num_ufbufs, "Max number of unsolicited frame buffers,"
79 " default=64");
70module_param(reqq_size, int, S_IRUGO | S_IWUSR); 80module_param(reqq_size, int, S_IRUGO | S_IWUSR);
81MODULE_PARM_DESC(reqq_size, "Max number of request queue elements,"
82 " default=256");
71module_param(rspq_size, int, S_IRUGO | S_IWUSR); 83module_param(rspq_size, int, S_IRUGO | S_IWUSR);
84MODULE_PARM_DESC(rspq_size, "Max number of response queue elements,"
85 " default=64");
72module_param(num_sgpgs, int, S_IRUGO | S_IWUSR); 86module_param(num_sgpgs, int, S_IRUGO | S_IWUSR);
87MODULE_PARM_DESC(num_sgpgs, "Number of scatter/gather pages, default=2048");
73module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR); 88module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR);
89MODULE_PARM_DESC(rport_del_timeout, "Rport delete timeout, default=90 secs,"
90 " Range[>0]");
74module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR); 91module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR);
92MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32,"
93 " Range[>0]");
75module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR); 94module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR);
95MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255");
76module_param(log_level, int, S_IRUGO | S_IWUSR); 96module_param(log_level, int, S_IRUGO | S_IWUSR);
97MODULE_PARM_DESC(log_level, "Driver log level, default=3,"
98 " Range[Critical:1|Error:2|Warning:3|Info:4]");
77module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR); 99module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR);
100MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1,"
101 " Range[off:0|on:1]");
78module_param(ipfc_enable, int, S_IRUGO | S_IWUSR); 102module_param(ipfc_enable, int, S_IRUGO | S_IWUSR);
79module_param(ipfc_mtu, int, S_IRUGO | S_IWUSR); 103MODULE_PARM_DESC(ipfc_enable, "Enable IPoFC, default=0, Range[off:0|on:1]");
80module_param(fdmi_enable, int, S_IRUGO | S_IWUSR);
81module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR); 104module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR);
105MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for boot"
106 " port. Otherwise Range[>0]");
107module_param(fdmi_enable, int, S_IRUGO | S_IWUSR);
108MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1,"
109 " Range[false:0|true:1]");
110module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR);
111MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1,"
112 " Range[false:0|true:1]");
82 113
83/* 114/*
84 * Stores the module parm num_sgpgs value; 115 * Stores the module parm num_sgpgs value;
@@ -322,7 +353,31 @@ ext:
322 return rc; 353 return rc;
323} 354}
324 355
356/**
357 * @brief
358 * FCS PBC VPORT Create
359 */
360void
361bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport)
362{
363
364 struct bfad_pcfg_s *pcfg;
325 365
366 pcfg = kzalloc(sizeof(struct bfad_pcfg_s), GFP_ATOMIC);
367 if (!pcfg) {
368 bfa_trc(bfad, 0);
369 return;
370 }
371
372 pcfg->port_cfg.roles = BFA_PORT_ROLE_FCP_IM;
373 pcfg->port_cfg.pwwn = pbc_vport.vp_pwwn;
374 pcfg->port_cfg.nwwn = pbc_vport.vp_nwwn;
375 pcfg->port_cfg.preboot_vp = BFA_TRUE;
376
377 list_add_tail(&pcfg->list_entry, &bfad->pbc_pcfg_list);
378
379 return;
380}
326 381
327void 382void
328bfad_hal_mem_release(struct bfad_s *bfad) 383bfad_hal_mem_release(struct bfad_s *bfad)
@@ -481,10 +536,10 @@ ext:
481 */ 536 */
482bfa_status_t 537bfa_status_t
483bfad_vport_create(struct bfad_s *bfad, u16 vf_id, 538bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
484 struct bfa_port_cfg_s *port_cfg, struct device *dev) 539 struct bfa_port_cfg_s *port_cfg, struct device *dev)
485{ 540{
486 struct bfad_vport_s *vport; 541 struct bfad_vport_s *vport;
487 int rc = BFA_STATUS_OK; 542 int rc = BFA_STATUS_OK;
488 unsigned long flags; 543 unsigned long flags;
489 struct completion fcomp; 544 struct completion fcomp;
490 545
@@ -496,8 +551,12 @@ bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
496 551
497 vport->drv_port.bfad = bfad; 552 vport->drv_port.bfad = bfad;
498 spin_lock_irqsave(&bfad->bfad_lock, flags); 553 spin_lock_irqsave(&bfad->bfad_lock, flags);
499 rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id, 554 if (port_cfg->preboot_vp == BFA_TRUE)
500 port_cfg, vport); 555 rc = bfa_fcs_pbc_vport_create(&vport->fcs_vport,
556 &bfad->bfa_fcs, vf_id, port_cfg, vport);
557 else
558 rc = bfa_fcs_vport_create(&vport->fcs_vport,
559 &bfad->bfa_fcs, vf_id, port_cfg, vport);
501 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 560 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
502 561
503 if (rc != BFA_STATUS_OK) 562 if (rc != BFA_STATUS_OK)
@@ -848,6 +907,10 @@ bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role)
848 bfad->pport.roles |= BFA_PORT_ROLE_FCP_IM; 907 bfad->pport.roles |= BFA_PORT_ROLE_FCP_IM;
849 } 908 }
850 909
910 /* Setup the debugfs node for this scsi_host */
911 if (bfa_debugfs_enable)
912 bfad_debugfs_init(&bfad->pport);
913
851 bfad->bfad_flags |= BFAD_CFG_PPORT_DONE; 914 bfad->bfad_flags |= BFAD_CFG_PPORT_DONE;
852 915
853out: 916out:
@@ -857,6 +920,10 @@ out:
857void 920void
858bfad_uncfg_pport(struct bfad_s *bfad) 921bfad_uncfg_pport(struct bfad_s *bfad)
859{ 922{
923 /* Remove the debugfs node for this scsi_host */
924 kfree(bfad->regdata);
925 bfad_debugfs_exit(&bfad->pport);
926
860 if ((bfad->pport.roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) { 927 if ((bfad->pport.roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) {
861 bfad_ipfc_port_delete(bfad, &bfad->pport); 928 bfad_ipfc_port_delete(bfad, &bfad->pport);
862 bfad->pport.roles &= ~BFA_PORT_ROLE_FCP_IPFC; 929 bfad->pport.roles &= ~BFA_PORT_ROLE_FCP_IPFC;
@@ -884,6 +951,7 @@ bfa_status_t
884bfad_start_ops(struct bfad_s *bfad) 951bfad_start_ops(struct bfad_s *bfad)
885{ 952{
886 int retval; 953 int retval;
954 struct bfad_pcfg_s *pcfg, *pcfg_new;
887 955
888 /* PPORT FCS config */ 956 /* PPORT FCS config */
889 bfad_fcs_port_cfg(bfad); 957 bfad_fcs_port_cfg(bfad);
@@ -901,6 +969,27 @@ bfad_start_ops(struct bfad_s *bfad)
901 969
902 bfad_drv_start(bfad); 970 bfad_drv_start(bfad);
903 971
972 /* pbc vport creation */
973 list_for_each_entry_safe(pcfg, pcfg_new, &bfad->pbc_pcfg_list,
974 list_entry) {
975 struct fc_vport_identifiers vid;
976 struct fc_vport *fc_vport;
977
978 memset(&vid, 0, sizeof(vid));
979 vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
980 vid.vport_type = FC_PORTTYPE_NPIV;
981 vid.disable = false;
982 vid.node_name = wwn_to_u64((u8 *)&pcfg->port_cfg.nwwn);
983 vid.port_name = wwn_to_u64((u8 *)&pcfg->port_cfg.pwwn);
984 fc_vport = fc_vport_create(bfad->pport.im_port->shost, 0, &vid);
985 if (!fc_vport)
986 printk(KERN_WARNING "bfad%d: failed to create pbc vport"
987 " %llx\n", bfad->inst_no, vid.port_name);
988 list_del(&pcfg->list_entry);
989 kfree(pcfg);
990
991 }
992
904 /* 993 /*
905 * If bfa_linkup_delay is set to -1 default; try to retrive the 994 * If bfa_linkup_delay is set to -1 default; try to retrive the
906 * value using the bfad_os_get_linkup_delay(); else use the 995 * value using the bfad_os_get_linkup_delay(); else use the
@@ -928,7 +1017,7 @@ out_cfg_pport_failure:
928} 1017}
929 1018
930int 1019int
931bfad_worker (void *ptr) 1020bfad_worker(void *ptr)
932{ 1021{
933 struct bfad_s *bfad; 1022 struct bfad_s *bfad;
934 unsigned long flags; 1023 unsigned long flags;
@@ -1031,6 +1120,7 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1031 1120
1032 bfad->ref_count = 0; 1121 bfad->ref_count = 0;
1033 bfad->pport.bfad = bfad; 1122 bfad->pport.bfad = bfad;
1123 INIT_LIST_HEAD(&bfad->pbc_pcfg_list);
1034 1124
1035 bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad, "%s", 1125 bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad, "%s",
1036 "bfad_worker"); 1126 "bfad_worker");
@@ -1172,6 +1262,14 @@ static struct pci_device_id bfad_id_table[] = {
1172 .class = (PCI_CLASS_SERIAL_FIBER << 8), 1262 .class = (PCI_CLASS_SERIAL_FIBER << 8),
1173 .class_mask = ~0, 1263 .class_mask = ~0,
1174 }, 1264 },
1265 {
1266 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1267 .device = BFA_PCI_DEVICE_ID_CT_FC,
1268 .subvendor = PCI_ANY_ID,
1269 .subdevice = PCI_ANY_ID,
1270 .class = (PCI_CLASS_SERIAL_FIBER << 8),
1271 .class_mask = ~0,
1272 },
1175 1273
1176 {0, 0}, 1274 {0, 0},
1177}; 1275};
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index e477bfbfa7d8..0818eb07ef88 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -373,47 +373,53 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
373 (struct bfad_im_port_s *) shost->hostdata[0]; 373 (struct bfad_im_port_s *) shost->hostdata[0];
374 struct bfad_s *bfad = im_port->bfad; 374 struct bfad_s *bfad = im_port->bfad;
375 struct bfa_port_cfg_s port_cfg; 375 struct bfa_port_cfg_s port_cfg;
376 struct bfad_pcfg_s *pcfg;
376 int status = 0, rc; 377 int status = 0, rc;
377 unsigned long flags; 378 unsigned long flags;
378 379
379 memset(&port_cfg, 0, sizeof(port_cfg)); 380 memset(&port_cfg, 0, sizeof(port_cfg));
380 381 u64_to_wwn(fc_vport->node_name, (u8 *)&port_cfg.nwwn);
381 port_cfg.pwwn = wwn_to_u64((u8 *) &fc_vport->port_name); 382 u64_to_wwn(fc_vport->port_name, (u8 *)&port_cfg.pwwn);
382 port_cfg.nwwn = wwn_to_u64((u8 *) &fc_vport->node_name);
383
384 if (strlen(vname) > 0) 383 if (strlen(vname) > 0)
385 strcpy((char *)&port_cfg.sym_name, vname); 384 strcpy((char *)&port_cfg.sym_name, vname);
386
387 port_cfg.roles = BFA_PORT_ROLE_FCP_IM; 385 port_cfg.roles = BFA_PORT_ROLE_FCP_IM;
388 rc = bfad_vport_create(bfad, 0, &port_cfg, &fc_vport->dev);
389 386
387 spin_lock_irqsave(&bfad->bfad_lock, flags);
388 list_for_each_entry(pcfg, &bfad->pbc_pcfg_list, list_entry) {
389 if (port_cfg.pwwn == pcfg->port_cfg.pwwn) {
390 port_cfg.preboot_vp = pcfg->port_cfg.preboot_vp;
391 break;
392 }
393 }
394 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
395
396 rc = bfad_vport_create(bfad, 0, &port_cfg, &fc_vport->dev);
390 if (rc == BFA_STATUS_OK) { 397 if (rc == BFA_STATUS_OK) {
391 struct bfad_vport_s *vport; 398 struct bfad_vport_s *vport;
392 struct bfa_fcs_vport_s *fcs_vport; 399 struct bfa_fcs_vport_s *fcs_vport;
393 struct Scsi_Host *vshost; 400 struct Scsi_Host *vshost;
394 401
395 spin_lock_irqsave(&bfad->bfad_lock, flags); 402 spin_lock_irqsave(&bfad->bfad_lock, flags);
396 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, 403 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0,
397 port_cfg.pwwn); 404 port_cfg.pwwn);
398 if (fcs_vport == NULL) { 405 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
399 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 406 if (fcs_vport == NULL)
400 return VPCERR_BAD_WWN; 407 return VPCERR_BAD_WWN;
401 }
402 408
403 fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE); 409 fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
404 if (disable) { 410 if (disable) {
411 spin_lock_irqsave(&bfad->bfad_lock, flags);
405 bfa_fcs_vport_stop(fcs_vport); 412 bfa_fcs_vport_stop(fcs_vport);
413 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
406 fc_vport_set_state(fc_vport, FC_VPORT_DISABLED); 414 fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
407 } 415 }
408 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
409 416
410 vport = fcs_vport->vport_drv; 417 vport = fcs_vport->vport_drv;
411 vshost = vport->drv_port.im_port->shost; 418 vshost = vport->drv_port.im_port->shost;
412 fc_host_node_name(vshost) = wwn_to_u64((u8 *) &port_cfg.nwwn); 419 fc_host_node_name(vshost) = wwn_to_u64((u8 *)&port_cfg.nwwn);
413 fc_host_port_name(vshost) = wwn_to_u64((u8 *) &port_cfg.pwwn); 420 fc_host_port_name(vshost) = wwn_to_u64((u8 *)&port_cfg.pwwn);
414 fc_vport->dd_data = vport; 421 fc_vport->dd_data = vport;
415 vport->drv_port.im_port->fc_vport = fc_vport; 422 vport->drv_port.im_port->fc_vport = fc_vport;
416
417 } else if (rc == BFA_STATUS_INVALID_WWN) 423 } else if (rc == BFA_STATUS_INVALID_WWN)
418 return VPCERR_BAD_WWN; 424 return VPCERR_BAD_WWN;
419 else if (rc == BFA_STATUS_VPORT_EXISTS) 425 else if (rc == BFA_STATUS_VPORT_EXISTS)
@@ -422,7 +428,7 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
422 return VPCERR_NO_FABRIC_SUPP; 428 return VPCERR_NO_FABRIC_SUPP;
423 else if (rc == BFA_STATUS_VPORT_WWN_BP) 429 else if (rc == BFA_STATUS_VPORT_WWN_BP)
424 return VPCERR_BAD_WWN; 430 return VPCERR_BAD_WWN;
425 else 431 else
426 return FC_VPORT_FAILED; 432 return FC_VPORT_FAILED;
427 433
428 return status; 434 return status;
@@ -449,7 +455,7 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
449 port = im_port->port; 455 port = im_port->port;
450 456
451 vshost = vport->drv_port.im_port->shost; 457 vshost = vport->drv_port.im_port->shost;
452 pwwn = wwn_to_u64((u8 *) &fc_host_port_name(vshost)); 458 u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn);
453 459
454 spin_lock_irqsave(&bfad->bfad_lock, flags); 460 spin_lock_irqsave(&bfad->bfad_lock, flags);
455 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn); 461 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn);
@@ -467,6 +473,12 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
467 rc = bfa_fcs_vport_delete(&vport->fcs_vport); 473 rc = bfa_fcs_vport_delete(&vport->fcs_vport);
468 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 474 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
469 475
476 if (rc == BFA_STATUS_PBC) {
477 vport->drv_port.flags &= ~BFAD_PORT_DELETE;
478 vport->comp_del = NULL;
479 return -1;
480 }
481
470 wait_for_completion(vport->comp_del); 482 wait_for_completion(vport->comp_del);
471 483
472free_scsi_host: 484free_scsi_host:
@@ -490,7 +502,7 @@ bfad_im_vport_disable(struct fc_vport *fc_vport, bool disable)
490 vport = (struct bfad_vport_s *)fc_vport->dd_data; 502 vport = (struct bfad_vport_s *)fc_vport->dd_data;
491 bfad = vport->drv_port.bfad; 503 bfad = vport->drv_port.bfad;
492 vshost = vport->drv_port.im_port->shost; 504 vshost = vport->drv_port.im_port->shost;
493 pwwn = wwn_to_u64((u8 *) &fc_vport->port_name); 505 u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn);
494 506
495 spin_lock_irqsave(&bfad->bfad_lock, flags); 507 spin_lock_irqsave(&bfad->bfad_lock, flags);
496 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn); 508 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn);
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
new file mode 100644
index 000000000000..4b82f12aad62
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -0,0 +1,547 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <linux/debugfs.h>
19
20#include <bfad_drv.h>
21#include <bfad_im.h>
22
23/*
24 * BFA debufs interface
25 *
26 * To access the interface, debugfs file system should be mounted
27 * if not already mounted using:
28 * mount -t debugfs none /sys/kernel/debug
29 *
30 * BFA Hierarchy:
31 * - bfa/host#
32 * where the host number corresponds to the one under /sys/class/scsi_host/host#
33 *
34 * Debugging service available per host:
35 * fwtrc: To collect current firmware trace.
36 * drvtrc: To collect current driver trace
37 * fwsave: To collect last saved fw trace as a result of firmware crash.
38 * regwr: To write one word to chip register
39 * regrd: To read one or more words from chip register.
40 */
41
42struct bfad_debug_info {
43 char *debug_buffer;
44 void *i_private;
45 int buffer_len;
46};
47
48static int
49bfad_debugfs_open_drvtrc(struct inode *inode, struct file *file)
50{
51 struct bfad_port_s *port = inode->i_private;
52 struct bfad_s *bfad = port->bfad;
53 struct bfad_debug_info *debug;
54
55 debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL);
56 if (!debug)
57 return -ENOMEM;
58
59 debug->debug_buffer = (void *) bfad->trcmod;
60 debug->buffer_len = sizeof(struct bfa_trc_mod_s);
61
62 file->private_data = debug;
63
64 return 0;
65}
66
67static int
68bfad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
69{
70 struct bfad_port_s *port = inode->i_private;
71 struct bfad_s *bfad = port->bfad;
72 struct bfad_debug_info *fw_debug;
73 unsigned long flags;
74 int rc;
75
76 fw_debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL);
77 if (!fw_debug)
78 return -ENOMEM;
79
80 fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s);
81
82 fw_debug->debug_buffer = vmalloc(fw_debug->buffer_len);
83 if (!fw_debug->debug_buffer) {
84 kfree(fw_debug);
85 printk(KERN_INFO "bfad[%d]: Failed to allocate fwtrc buffer\n",
86 bfad->inst_no);
87 return -ENOMEM;
88 }
89
90 memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
91
92 spin_lock_irqsave(&bfad->bfad_lock, flags);
93 rc = bfa_debug_fwtrc(&bfad->bfa,
94 fw_debug->debug_buffer,
95 &fw_debug->buffer_len);
96 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
97 if (rc != BFA_STATUS_OK) {
98 vfree(fw_debug->debug_buffer);
99 fw_debug->debug_buffer = NULL;
100 kfree(fw_debug);
101 printk(KERN_INFO "bfad[%d]: Failed to collect fwtrc\n",
102 bfad->inst_no);
103 return -ENOMEM;
104 }
105
106 file->private_data = fw_debug;
107
108 return 0;
109}
110
111static int
112bfad_debugfs_open_fwsave(struct inode *inode, struct file *file)
113{
114 struct bfad_port_s *port = inode->i_private;
115 struct bfad_s *bfad = port->bfad;
116 struct bfad_debug_info *fw_debug;
117 unsigned long flags;
118 int rc;
119
120 fw_debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL);
121 if (!fw_debug)
122 return -ENOMEM;
123
124 fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s);
125
126 fw_debug->debug_buffer = vmalloc(fw_debug->buffer_len);
127 if (!fw_debug->debug_buffer) {
128 kfree(fw_debug);
129 printk(KERN_INFO "bfad[%d]: Failed to allocate fwsave buffer\n",
130 bfad->inst_no);
131 return -ENOMEM;
132 }
133
134 memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
135
136 spin_lock_irqsave(&bfad->bfad_lock, flags);
137 rc = bfa_debug_fwsave(&bfad->bfa,
138 fw_debug->debug_buffer,
139 &fw_debug->buffer_len);
140 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
141 if (rc != BFA_STATUS_OK) {
142 vfree(fw_debug->debug_buffer);
143 fw_debug->debug_buffer = NULL;
144 kfree(fw_debug);
145 printk(KERN_INFO "bfad[%d]: Failed to collect fwsave\n",
146 bfad->inst_no);
147 return -ENOMEM;
148 }
149
150 file->private_data = fw_debug;
151
152 return 0;
153}
154
155static int
156bfad_debugfs_open_reg(struct inode *inode, struct file *file)
157{
158 struct bfad_debug_info *reg_debug;
159
160 reg_debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL);
161 if (!reg_debug)
162 return -ENOMEM;
163
164 reg_debug->i_private = inode->i_private;
165
166 file->private_data = reg_debug;
167
168 return 0;
169}
170
171/* Changes the current file position */
172static loff_t
173bfad_debugfs_lseek(struct file *file, loff_t offset, int orig)
174{
175 struct bfad_debug_info *debug;
176 loff_t pos = file->f_pos;
177
178 debug = file->private_data;
179
180 switch (orig) {
181 case 0:
182 file->f_pos = offset;
183 break;
184 case 1:
185 file->f_pos += offset;
186 break;
187 case 2:
188 file->f_pos = debug->buffer_len - offset;
189 break;
190 default:
191 return -EINVAL;
192 }
193
194 if (file->f_pos < 0 || file->f_pos > debug->buffer_len) {
195 file->f_pos = pos;
196 return -EINVAL;
197 }
198
199 return file->f_pos;
200}
201
202static ssize_t
203bfad_debugfs_read(struct file *file, char __user *buf,
204 size_t nbytes, loff_t *pos)
205{
206 struct bfad_debug_info *debug = file->private_data;
207
208 if (!debug || !debug->debug_buffer)
209 return 0;
210
211 return memory_read_from_buffer(buf, nbytes, pos,
212 debug->debug_buffer, debug->buffer_len);
213}
214
215#define BFA_REG_CT_ADDRSZ (0x40000)
216#define BFA_REG_CB_ADDRSZ (0x20000)
217#define BFA_REG_ADDRSZ(__bfa) \
218 ((bfa_ioc_devid(&(__bfa)->ioc) == BFA_PCI_DEVICE_ID_CT) ? \
219 BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ)
220#define BFA_REG_ADDRMSK(__bfa) ((uint32_t)(BFA_REG_ADDRSZ(__bfa) - 1))
221
222static bfa_status_t
223bfad_reg_offset_check(struct bfa_s *bfa, u32 offset, u32 len)
224{
225 u8 area;
226
227 /* check [16:15] */
228 area = (offset >> 15) & 0x7;
229 if (area == 0) {
230 /* PCIe core register */
231 if ((offset + (len<<2)) > 0x8000) /* 8k dwords or 32KB */
232 return BFA_STATUS_EINVAL;
233 } else if (area == 0x1) {
234 /* CB 32 KB memory page */
235 if ((offset + (len<<2)) > 0x10000) /* 8k dwords or 32KB */
236 return BFA_STATUS_EINVAL;
237 } else {
238 /* CB register space 64KB */
239 if ((offset + (len<<2)) > BFA_REG_ADDRMSK(bfa))
240 return BFA_STATUS_EINVAL;
241 }
242 return BFA_STATUS_OK;
243}
244
245static ssize_t
246bfad_debugfs_read_regrd(struct file *file, char __user *buf,
247 size_t nbytes, loff_t *pos)
248{
249 struct bfad_debug_info *regrd_debug = file->private_data;
250 struct bfad_port_s *port = (struct bfad_port_s *)regrd_debug->i_private;
251 struct bfad_s *bfad = port->bfad;
252 ssize_t rc;
253
254 if (!bfad->regdata)
255 return 0;
256
257 rc = memory_read_from_buffer(buf, nbytes, pos,
258 bfad->regdata, bfad->reglen);
259
260 if ((*pos + nbytes) >= bfad->reglen) {
261 kfree(bfad->regdata);
262 bfad->regdata = NULL;
263 bfad->reglen = 0;
264 }
265
266 return rc;
267}
268
269static ssize_t
270bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
271 size_t nbytes, loff_t *ppos)
272{
273 struct bfad_debug_info *regrd_debug = file->private_data;
274 struct bfad_port_s *port = (struct bfad_port_s *)regrd_debug->i_private;
275 struct bfad_s *bfad = port->bfad;
276 struct bfa_s *bfa = &bfad->bfa;
277 struct bfa_ioc_s *ioc = &bfa->ioc;
278 int addr, len, rc, i;
279 u32 *regbuf;
280 void __iomem *rb, *reg_addr;
281 unsigned long flags;
282
283 rc = sscanf(buf, "%x:%x", &addr, &len);
284 if (rc < 2) {
285 printk(KERN_INFO
286 "bfad[%d]: %s failed to read user buf\n",
287 bfad->inst_no, __func__);
288 return -EINVAL;
289 }
290
291 kfree(bfad->regdata);
292 bfad->regdata = NULL;
293 bfad->reglen = 0;
294
295 bfad->regdata = kzalloc(len << 2, GFP_KERNEL);
296 if (!bfad->regdata) {
297 printk(KERN_INFO "bfad[%d]: Failed to allocate regrd buffer\n",
298 bfad->inst_no);
299 return -ENOMEM;
300 }
301
302 bfad->reglen = len << 2;
303 rb = bfa_ioc_bar0(ioc);
304 addr &= BFA_REG_ADDRMSK(bfa);
305
306 /* offset and len sanity check */
307 rc = bfad_reg_offset_check(bfa, addr, len);
308 if (rc) {
309 printk(KERN_INFO "bfad[%d]: Failed reg offset check\n",
310 bfad->inst_no);
311 kfree(bfad->regdata);
312 bfad->regdata = NULL;
313 bfad->reglen = 0;
314 return -EINVAL;
315 }
316
317 reg_addr = rb + addr;
318 regbuf = (u32 *)bfad->regdata;
319 spin_lock_irqsave(&bfad->bfad_lock, flags);
320 for (i = 0; i < len; i++) {
321 *regbuf = bfa_reg_read(reg_addr);
322 regbuf++;
323 reg_addr += sizeof(u32);
324 }
325 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
326
327 return nbytes;
328}
329
330static ssize_t
331bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
332 size_t nbytes, loff_t *ppos)
333{
334 struct bfad_debug_info *debug = file->private_data;
335 struct bfad_port_s *port = (struct bfad_port_s *)debug->i_private;
336 struct bfad_s *bfad = port->bfad;
337 struct bfa_s *bfa = &bfad->bfa;
338 struct bfa_ioc_s *ioc = &bfa->ioc;
339 int addr, val, rc;
340 void __iomem *reg_addr;
341 unsigned long flags;
342
343 rc = sscanf(buf, "%x:%x", &addr, &val);
344 if (rc < 2) {
345 printk(KERN_INFO
346 "bfad[%d]: %s failed to read user buf\n",
347 bfad->inst_no, __func__);
348 return -EINVAL;
349 }
350
351 addr &= BFA_REG_ADDRMSK(bfa); /* offset only 17 bit and word align */
352
353 /* offset and len sanity check */
354 rc = bfad_reg_offset_check(bfa, addr, 1);
355 if (rc) {
356 printk(KERN_INFO
357 "bfad[%d]: Failed reg offset check\n",
358 bfad->inst_no);
359 return -EINVAL;
360 }
361
362 reg_addr = (uint32_t *) ((uint8_t *) bfa_ioc_bar0(ioc) + addr);
363 spin_lock_irqsave(&bfad->bfad_lock, flags);
364 bfa_reg_write(reg_addr, val);
365 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
366
367 return nbytes;
368}
369
370static int
371bfad_debugfs_release(struct inode *inode, struct file *file)
372{
373 struct bfad_debug_info *debug = file->private_data;
374
375 if (!debug)
376 return 0;
377
378 file->private_data = NULL;
379 kfree(debug);
380 return 0;
381}
382
383static int
384bfad_debugfs_release_fwtrc(struct inode *inode, struct file *file)
385{
386 struct bfad_debug_info *fw_debug = file->private_data;
387
388 if (!fw_debug)
389 return 0;
390
391 if (fw_debug->debug_buffer)
392 vfree(fw_debug->debug_buffer);
393
394 file->private_data = NULL;
395 kfree(fw_debug);
396 return 0;
397}
398
399static const struct file_operations bfad_debugfs_op_drvtrc = {
400 .owner = THIS_MODULE,
401 .open = bfad_debugfs_open_drvtrc,
402 .llseek = bfad_debugfs_lseek,
403 .read = bfad_debugfs_read,
404 .release = bfad_debugfs_release,
405};
406
407static const struct file_operations bfad_debugfs_op_fwtrc = {
408 .owner = THIS_MODULE,
409 .open = bfad_debugfs_open_fwtrc,
410 .llseek = bfad_debugfs_lseek,
411 .read = bfad_debugfs_read,
412 .release = bfad_debugfs_release_fwtrc,
413};
414
415static const struct file_operations bfad_debugfs_op_fwsave = {
416 .owner = THIS_MODULE,
417 .open = bfad_debugfs_open_fwsave,
418 .llseek = bfad_debugfs_lseek,
419 .read = bfad_debugfs_read,
420 .release = bfad_debugfs_release_fwtrc,
421};
422
423static const struct file_operations bfad_debugfs_op_regrd = {
424 .owner = THIS_MODULE,
425 .open = bfad_debugfs_open_reg,
426 .llseek = bfad_debugfs_lseek,
427 .read = bfad_debugfs_read_regrd,
428 .write = bfad_debugfs_write_regrd,
429 .release = bfad_debugfs_release,
430};
431
432static const struct file_operations bfad_debugfs_op_regwr = {
433 .owner = THIS_MODULE,
434 .open = bfad_debugfs_open_reg,
435 .llseek = bfad_debugfs_lseek,
436 .write = bfad_debugfs_write_regwr,
437 .release = bfad_debugfs_release,
438};
439
440struct bfad_debugfs_entry {
441 const char *name;
442 mode_t mode;
443 const struct file_operations *fops;
444};
445
446static const struct bfad_debugfs_entry bfad_debugfs_files[] = {
447 { "drvtrc", S_IFREG|S_IRUGO, &bfad_debugfs_op_drvtrc, },
448 { "fwtrc", S_IFREG|S_IRUGO, &bfad_debugfs_op_fwtrc, },
449 { "fwsave", S_IFREG|S_IRUGO, &bfad_debugfs_op_fwsave, },
450 { "regrd", S_IFREG|S_IRUGO|S_IWUSR, &bfad_debugfs_op_regrd, },
451 { "regwr", S_IFREG|S_IWUSR, &bfad_debugfs_op_regwr, },
452};
453
454static struct dentry *bfa_debugfs_root;
455static atomic_t bfa_debugfs_port_count;
456
457inline void
458bfad_debugfs_init(struct bfad_port_s *port)
459{
460 struct bfad_im_port_s *im_port = port->im_port;
461 struct bfad_s *bfad = im_port->bfad;
462 struct Scsi_Host *shost = im_port->shost;
463 const struct bfad_debugfs_entry *file;
464 char name[16];
465 int i;
466
467 if (!bfa_debugfs_enable)
468 return;
469
470 /* Setup the BFA debugfs root directory*/
471 if (!bfa_debugfs_root) {
472 bfa_debugfs_root = debugfs_create_dir("bfa", NULL);
473 atomic_set(&bfa_debugfs_port_count, 0);
474 if (!bfa_debugfs_root) {
475 printk(KERN_WARNING
476 "BFA debugfs root dir creation failed\n");
477 goto err;
478 }
479 }
480
481 /*
482 * Setup the host# directory for the port,
483 * corresponds to the scsi_host num of this port.
484 */
485 snprintf(name, sizeof(name), "host%d", shost->host_no);
486 if (!port->port_debugfs_root) {
487 port->port_debugfs_root =
488 debugfs_create_dir(name, bfa_debugfs_root);
489 if (!port->port_debugfs_root) {
490 printk(KERN_WARNING
491 "BFA host root dir creation failed\n");
492 goto err;
493 }
494
495 atomic_inc(&bfa_debugfs_port_count);
496
497 for (i = 0; i < ARRAY_SIZE(bfad_debugfs_files); i++) {
498 file = &bfad_debugfs_files[i];
499 bfad->bfad_dentry_files[i] =
500 debugfs_create_file(file->name,
501 file->mode,
502 port->port_debugfs_root,
503 port,
504 file->fops);
505 if (!bfad->bfad_dentry_files[i]) {
506 printk(KERN_WARNING
507 "BFA host%d: create %s entry failed\n",
508 shost->host_no, file->name);
509 goto err;
510 }
511 }
512 }
513
514err:
515 return;
516}
517
518inline void
519bfad_debugfs_exit(struct bfad_port_s *port)
520{
521 struct bfad_im_port_s *im_port = port->im_port;
522 struct bfad_s *bfad = im_port->bfad;
523 int i;
524
525 for (i = 0; i < ARRAY_SIZE(bfad_debugfs_files); i++) {
526 if (bfad->bfad_dentry_files[i]) {
527 debugfs_remove(bfad->bfad_dentry_files[i]);
528 bfad->bfad_dentry_files[i] = NULL;
529 }
530 }
531
532 /*
533 * Remove the host# directory for the port,
534 * corresponds to the scsi_host num of this port.
535 */
536 if (port->port_debugfs_root) {
537 debugfs_remove(port->port_debugfs_root);
538 port->port_debugfs_root = NULL;
539 atomic_dec(&bfa_debugfs_port_count);
540 }
541
542 /* Remove the BFA debugfs root directory */
543 if (atomic_read(&bfa_debugfs_port_count) == 0) {
544 debugfs_remove(bfa_debugfs_root);
545 bfa_debugfs_root = NULL;
546 }
547}
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 6c920c1b53a4..465b8b86ec9c 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -46,7 +46,7 @@
46#ifdef BFA_DRIVER_VERSION 46#ifdef BFA_DRIVER_VERSION
47#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION 47#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
48#else 48#else
49#define BFAD_DRIVER_VERSION "2.1.2.1" 49#define BFAD_DRIVER_VERSION "2.2.2.1"
50#endif 50#endif
51 51
52 52
@@ -111,6 +111,9 @@ struct bfad_port_s {
111 struct bfad_im_port_s *im_port; /* IM specific data */ 111 struct bfad_im_port_s *im_port; /* IM specific data */
112 struct bfad_tm_port_s *tm_port; /* TM specific data */ 112 struct bfad_tm_port_s *tm_port; /* TM specific data */
113 struct bfad_ipfc_port_s *ipfc_port; /* IPFC specific data */ 113 struct bfad_ipfc_port_s *ipfc_port; /* IPFC specific data */
114
115 /* port debugfs specific data */
116 struct dentry *port_debugfs_root;
114}; 117};
115 118
116/* 119/*
@@ -120,6 +123,8 @@ struct bfad_vport_s {
120 struct bfad_port_s drv_port; 123 struct bfad_port_s drv_port;
121 struct bfa_fcs_vport_s fcs_vport; 124 struct bfa_fcs_vport_s fcs_vport;
122 struct completion *comp_del; 125 struct completion *comp_del;
126 struct list_head list_entry;
127 struct bfa_port_cfg_s port_cfg;
123}; 128};
124 129
125/* 130/*
@@ -139,18 +144,6 @@ struct bfad_cfg_param_s {
139 u32 binding_method; 144 u32 binding_method;
140}; 145};
141 146
142union bfad_tmp_buf {
143 /* From struct bfa_adapter_attr_s */
144 char manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
145 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
146 char model[BFA_ADAPTER_MODEL_NAME_LEN];
147 char fw_ver[BFA_VERSION_LEN];
148 char optrom_ver[BFA_VERSION_LEN];
149
150 /* From struct bfa_ioc_pci_attr_s */
151 u8 chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */
152};
153
154/* 147/*
155 * BFAD (PCI function) data structure 148 * BFAD (PCI function) data structure
156 */ 149 */
@@ -193,8 +186,18 @@ struct bfad_s {
193 struct bfa_plog_s plog_buf; 186 struct bfa_plog_s plog_buf;
194 int ref_count; 187 int ref_count;
195 bfa_boolean_t ipfc_enabled; 188 bfa_boolean_t ipfc_enabled;
196 union bfad_tmp_buf tmp_buf;
197 struct fc_host_statistics link_stats; 189 struct fc_host_statistics link_stats;
190 struct list_head pbc_pcfg_list;
191 atomic_t wq_reqcnt;
192 /* debugfs specific data */
193 char *regdata;
194 u32 reglen;
195 struct dentry *bfad_dentry_files[5];
196};
197
198struct bfad_pcfg_s {
199 struct list_head list_entry;
200 struct bfa_port_cfg_s port_cfg;
198}; 201};
199 202
200/* 203/*
@@ -280,7 +283,9 @@ void bfad_drv_uninit(struct bfad_s *bfad);
280void bfad_drv_log_level_set(struct bfad_s *bfad); 283void bfad_drv_log_level_set(struct bfad_s *bfad);
281bfa_status_t bfad_fc4_module_init(void); 284bfa_status_t bfad_fc4_module_init(void);
282void bfad_fc4_module_exit(void); 285void bfad_fc4_module_exit(void);
283int bfad_worker (void *ptr); 286int bfad_worker(void *ptr);
287void bfad_debugfs_init(struct bfad_port_s *port);
288void bfad_debugfs_exit(struct bfad_port_s *port);
284 289
285void bfad_pci_remove(struct pci_dev *pdev); 290void bfad_pci_remove(struct pci_dev *pdev);
286int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid); 291int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid);
@@ -293,6 +298,7 @@ extern struct list_head bfad_list;
293extern int bfa_lun_queue_depth; 298extern int bfa_lun_queue_depth;
294extern int bfad_supported_fc4s; 299extern int bfad_supported_fc4s;
295extern int bfa_linkup_delay; 300extern int bfa_linkup_delay;
301extern int bfa_debugfs_enable;
296extern struct mutex bfad_mutex; 302extern struct mutex bfad_mutex;
297 303
298#endif /* __BFAD_DRV_H__ */ 304#endif /* __BFAD_DRV_H__ */
diff --git a/drivers/scsi/bfa/bfad_fwimg.c b/drivers/scsi/bfa/bfad_fwimg.c
index 2ad65f275a92..1baca1a12085 100644
--- a/drivers/scsi/bfa/bfad_fwimg.c
+++ b/drivers/scsi/bfa/bfad_fwimg.c
@@ -33,16 +33,20 @@
33#include <bfa_fwimg_priv.h> 33#include <bfa_fwimg_priv.h>
34#include <bfa.h> 34#include <bfa.h>
35 35
36u32 bfi_image_ct_size; 36u32 bfi_image_ct_fc_size;
37u32 bfi_image_cb_size; 37u32 bfi_image_ct_cna_size;
38u32 *bfi_image_ct; 38u32 bfi_image_cb_fc_size;
39u32 *bfi_image_cb; 39u32 *bfi_image_ct_fc;
40u32 *bfi_image_ct_cna;
41u32 *bfi_image_cb_fc;
40 42
41 43
42#define BFAD_FW_FILE_CT "ctfw.bin" 44#define BFAD_FW_FILE_CT_FC "ctfw_fc.bin"
43#define BFAD_FW_FILE_CB "cbfw.bin" 45#define BFAD_FW_FILE_CT_CNA "ctfw_cna.bin"
44MODULE_FIRMWARE(BFAD_FW_FILE_CT); 46#define BFAD_FW_FILE_CB_FC "cbfw_fc.bin"
45MODULE_FIRMWARE(BFAD_FW_FILE_CB); 47MODULE_FIRMWARE(BFAD_FW_FILE_CT_FC);
48MODULE_FIRMWARE(BFAD_FW_FILE_CT_CNA);
49MODULE_FIRMWARE(BFAD_FW_FILE_CB_FC);
46 50
47u32 * 51u32 *
48bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, 52bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
@@ -74,24 +78,54 @@ error:
74u32 * 78u32 *
75bfad_get_firmware_buf(struct pci_dev *pdev) 79bfad_get_firmware_buf(struct pci_dev *pdev)
76{ 80{
77 if (pdev->device == BFA_PCI_DEVICE_ID_CT) { 81 if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) {
78 if (bfi_image_ct_size == 0) 82 if (bfi_image_ct_fc_size == 0)
79 bfad_read_firmware(pdev, &bfi_image_ct, 83 bfad_read_firmware(pdev, &bfi_image_ct_fc,
80 &bfi_image_ct_size, BFAD_FW_FILE_CT); 84 &bfi_image_ct_fc_size, BFAD_FW_FILE_CT_FC);
81 return bfi_image_ct; 85 return bfi_image_ct_fc;
86 } else if (pdev->device == BFA_PCI_DEVICE_ID_CT) {
87 if (bfi_image_ct_cna_size == 0)
88 bfad_read_firmware(pdev, &bfi_image_ct_cna,
89 &bfi_image_ct_cna_size, BFAD_FW_FILE_CT_CNA);
90 return bfi_image_ct_cna;
82 } else { 91 } else {
83 if (bfi_image_cb_size == 0) 92 if (bfi_image_cb_fc_size == 0)
84 bfad_read_firmware(pdev, &bfi_image_cb, 93 bfad_read_firmware(pdev, &bfi_image_cb_fc,
85 &bfi_image_cb_size, BFAD_FW_FILE_CB); 94 &bfi_image_cb_fc_size, BFAD_FW_FILE_CB_FC);
86 return bfi_image_cb; 95 return bfi_image_cb_fc;
87 } 96 }
88} 97}
89 98
90u32 * 99u32 *
91bfi_image_ct_get_chunk(u32 off) 100bfi_image_ct_fc_get_chunk(u32 off)
92{ return (u32 *)(bfi_image_ct + off); } 101{ return (u32 *)(bfi_image_ct_fc + off); }
93 102
94u32 * 103u32 *
95bfi_image_cb_get_chunk(u32 off) 104bfi_image_ct_cna_get_chunk(u32 off)
96{ return (u32 *)(bfi_image_cb + off); } 105{ return (u32 *)(bfi_image_ct_cna + off); }
97 106
107u32 *
108bfi_image_cb_fc_get_chunk(u32 off)
109{ return (u32 *)(bfi_image_cb_fc + off); }
110
111uint32_t *
112bfi_image_get_chunk(int type, uint32_t off)
113{
114 switch (type) {
115 case BFI_IMAGE_CT_FC: return bfi_image_ct_fc_get_chunk(off); break;
116 case BFI_IMAGE_CT_CNA: return bfi_image_ct_cna_get_chunk(off); break;
117 case BFI_IMAGE_CB_FC: return bfi_image_cb_fc_get_chunk(off); break;
118 default: return 0; break;
119 }
120}
121
122uint32_t
123bfi_image_get_size(int type)
124{
125 switch (type) {
126 case BFI_IMAGE_CT_FC: return bfi_image_ct_fc_size; break;
127 case BFI_IMAGE_CT_CNA: return bfi_image_ct_cna_size; break;
128 case BFI_IMAGE_CB_FC: return bfi_image_cb_fc_size; break;
129 default: return 0; break;
130 }
131}
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 5b7cf539e50b..678120b70460 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -554,7 +554,7 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
554 im_port->shost->transportt = 554 im_port->shost->transportt =
555 bfad_im_scsi_vport_transport_template; 555 bfad_im_scsi_vport_transport_template;
556 556
557 error = scsi_add_host(im_port->shost, dev); 557 error = scsi_add_host_with_dma(im_port->shost, dev, &bfad->pcidev->dev);
558 if (error) { 558 if (error) {
559 printk(KERN_WARNING "scsi_add_host failure %d\n", error); 559 printk(KERN_WARNING "scsi_add_host failure %d\n", error);
560 goto out_fc_rel; 560 goto out_fc_rel;
@@ -567,6 +567,7 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
567 567
568out_fc_rel: 568out_fc_rel:
569 scsi_host_put(im_port->shost); 569 scsi_host_put(im_port->shost);
570 im_port->shost = NULL;
570out_free_idr: 571out_free_idr:
571 mutex_lock(&bfad_mutex); 572 mutex_lock(&bfad_mutex);
572 idr_remove(&bfad_im_port_index, im_port->idr_id); 573 idr_remove(&bfad_im_port_index, im_port->idr_id);
@@ -597,10 +598,12 @@ bfad_im_port_delete_handler(struct work_struct *work)
597{ 598{
598 struct bfad_im_port_s *im_port = 599 struct bfad_im_port_s *im_port =
599 container_of(work, struct bfad_im_port_s, port_delete_work); 600 container_of(work, struct bfad_im_port_s, port_delete_work);
601 struct bfad_s *bfad = im_port->bfad;
600 602
601 if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) { 603 if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) {
602 im_port->flags |= BFAD_PORT_DELETE; 604 im_port->flags |= BFAD_PORT_DELETE;
603 fc_vport_terminate(im_port->fc_vport); 605 fc_vport_terminate(im_port->fc_vport);
606 atomic_dec(&bfad->wq_reqcnt);
604 } 607 }
605 608
606} 609}
@@ -633,8 +636,11 @@ bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port)
633{ 636{
634 struct bfad_im_port_s *im_port = port->im_port; 637 struct bfad_im_port_s *im_port = port->im_port;
635 638
636 queue_work(bfad->im->drv_workq, 639 if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) {
640 atomic_inc(&bfad->wq_reqcnt);
641 queue_work(bfad->im->drv_workq,
637 &im_port->port_delete_work); 642 &im_port->port_delete_work);
643 }
638} 644}
639 645
640void 646void
@@ -695,12 +701,27 @@ void
695bfad_im_probe_undo(struct bfad_s *bfad) 701bfad_im_probe_undo(struct bfad_s *bfad)
696{ 702{
697 if (bfad->im) { 703 if (bfad->im) {
704 while (atomic_read(&bfad->wq_reqcnt)) {
705 printk(KERN_INFO "bfa %s: waiting workq processing,"
706 " wq_reqcnt:%x\n", bfad->pci_name,
707 atomic_read(&bfad->wq_reqcnt));
708 schedule_timeout_uninterruptible(HZ);
709 }
698 bfad_os_destroy_workq(bfad->im); 710 bfad_os_destroy_workq(bfad->im);
699 kfree(bfad->im); 711 kfree(bfad->im);
700 bfad->im = NULL; 712 bfad->im = NULL;
701 } 713 }
702} 714}
703 715
716/**
717 * Call back function to handle IO redirection state change
718 */
719void
720bfa_cb_ioredirect_state_change(void *hcb_bfad, bfa_boolean_t ioredirect)
721{
722 /* Do nothing */
723}
724
704struct Scsi_Host * 725struct Scsi_Host *
705bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad) 726bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
706{ 727{
@@ -1204,9 +1225,9 @@ int
1204bfad_os_get_linkup_delay(struct bfad_s *bfad) 1225bfad_os_get_linkup_delay(struct bfad_s *bfad)
1205{ 1226{
1206 1227
1207 u8 nwwns = 0; 1228 u8 nwwns = 0;
1208 wwn_t *wwns; 1229 wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX];
1209 int ldelay; 1230 int ldelay;
1210 1231
1211 /* 1232 /*
1212 * Querying for the boot target port wwns 1233 * Querying for the boot target port wwns
@@ -1215,7 +1236,7 @@ bfad_os_get_linkup_delay(struct bfad_s *bfad)
1215 * else => local boot machine set bfa_linkup_delay = 10 1236 * else => local boot machine set bfa_linkup_delay = 10
1216 */ 1237 */
1217 1238
1218 bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, &wwns); 1239 bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, wwns);
1219 1240
1220 if (nwwns > 0) { 1241 if (nwwns > 0) {
1221 /* If boot over SAN; linkup_delay = 30sec */ 1242 /* If boot over SAN; linkup_delay = 30sec */
diff --git a/drivers/scsi/bfa/bfad_im_compat.h b/drivers/scsi/bfa/bfad_im_compat.h
index b36be15044a4..0a122abbbe89 100644
--- a/drivers/scsi/bfa/bfad_im_compat.h
+++ b/drivers/scsi/bfa/bfad_im_compat.h
@@ -18,9 +18,6 @@
18#ifndef __BFAD_IM_COMPAT_H__ 18#ifndef __BFAD_IM_COMPAT_H__
19#define __BFAD_IM_COMPAT_H__ 19#define __BFAD_IM_COMPAT_H__
20 20
21extern u32 *bfi_image_buf;
22extern u32 bfi_image_size;
23
24extern struct device_attribute *bfad_im_host_attrs[]; 21extern struct device_attribute *bfad_im_host_attrs[];
25extern struct device_attribute *bfad_im_vport_attrs[]; 22extern struct device_attribute *bfad_im_vport_attrs[];
26 23
@@ -37,10 +34,12 @@ bfad_load_fwimg(struct pci_dev *pdev)
37static inline void 34static inline void
38bfad_free_fwimg(void) 35bfad_free_fwimg(void)
39{ 36{
40 if (bfi_image_ct_size && bfi_image_ct) 37 if (bfi_image_ct_fc_size && bfi_image_ct_fc)
41 vfree(bfi_image_ct); 38 vfree(bfi_image_ct_fc);
42 if (bfi_image_cb_size && bfi_image_cb) 39 if (bfi_image_ct_cna_size && bfi_image_ct_cna)
43 vfree(bfi_image_cb); 40 vfree(bfi_image_ct_cna);
41 if (bfi_image_cb_fc_size && bfi_image_cb_fc)
42 vfree(bfi_image_cb_fc);
44} 43}
45 44
46#endif 45#endif
diff --git a/drivers/scsi/bfa/bfad_intr.c b/drivers/scsi/bfa/bfad_intr.c
index 2b7dbecbebca..56a351584f0c 100644
--- a/drivers/scsi/bfa/bfad_intr.c
+++ b/drivers/scsi/bfa/bfad_intr.c
@@ -26,7 +26,11 @@ BFA_TRC_FILE(LDRV, INTR);
26static int msix_disable_cb; 26static int msix_disable_cb;
27static int msix_disable_ct; 27static int msix_disable_ct;
28module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR); 28module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR);
29MODULE_PARM_DESC(msix_disable_cb, "Disable MSIX for Brocade-415/425/815/825"
30 " cards, default=0, Range[false:0|true:1]");
29module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR); 31module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR);
32MODULE_PARM_DESC(msix_disable_ct, "Disable MSIX for Brocade-1010/1020/804"
33 " cards, default=0, Range[false:0|true:1]");
30/** 34/**
31 * Line based interrupt handler. 35 * Line based interrupt handler.
32 */ 36 */
@@ -151,8 +155,8 @@ bfad_setup_intr(struct bfad_s *bfad)
151 /* Set up the msix entry table */ 155 /* Set up the msix entry table */
152 bfad_init_msix_entry(bfad, msix_entries, mask, max_bit); 156 bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
153 157
154 if ((pdev->device == BFA_PCI_DEVICE_ID_CT && !msix_disable_ct) || 158 if ((bfa_asic_id_ct(pdev->device) && !msix_disable_ct) ||
155 (pdev->device != BFA_PCI_DEVICE_ID_CT && !msix_disable_cb)) { 159 (!bfa_asic_id_ct(pdev->device) && !msix_disable_cb)) {
156 160
157 error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec); 161 error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
158 if (error) { 162 if (error) {
diff --git a/drivers/scsi/bfa/fabric.c b/drivers/scsi/bfa/fabric.c
index 8166e9745ec0..ddd4ba9317e6 100644
--- a/drivers/scsi/bfa/fabric.c
+++ b/drivers/scsi/bfa/fabric.c
@@ -789,7 +789,7 @@ bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric)
789 789
790 list_for_each_safe(qe, qen, &fabric->vport_q) { 790 list_for_each_safe(qe, qen, &fabric->vport_q) {
791 vport = (struct bfa_fcs_vport_s *)qe; 791 vport = (struct bfa_fcs_vport_s *)qe;
792 bfa_fcs_vport_delete(vport); 792 bfa_fcs_vport_fcs_delete(vport);
793 } 793 }
794 794
795 bfa_fcs_port_delete(&fabric->bport); 795 bfa_fcs_port_delete(&fabric->bport);
@@ -1027,6 +1027,32 @@ bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric)
1027 return fabric->num_vports; 1027 return fabric->num_vports;
1028} 1028}
1029 1029
1030/*
1031 * Get OUI of the attached switch.
1032 *
1033 * Note : Use of this function should be avoided as much as possible.
1034 * This function should be used only if there is any requirement
1035 * to check for FOS version below 6.3.
1036 * To check if the attached fabric is a brocade fabric, use
1037 * bfa_lps_is_brcd_fabric() which works for FOS versions 6.3
1038 * or above only.
1039 */
1040
1041u16
1042bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric)
1043{
1044 wwn_t fab_nwwn;
1045 u8 *tmp;
1046 u16 oui;
1047
1048 fab_nwwn = bfa_lps_get_peer_nwwn(fabric->lps);
1049
1050 tmp = (uint8_t *)&fab_nwwn;
1051 oui = (tmp[3] << 8) | tmp[4];
1052
1053 return oui;
1054}
1055
1030/** 1056/**
1031 * Unsolicited frame receive handling. 1057 * Unsolicited frame receive handling.
1032 */ 1058 */
@@ -1271,6 +1297,22 @@ bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
1271} 1297}
1272 1298
1273/** 1299/**
1300 *
1301 * @param[in] fabric - fabric
1302 * @param[in] node_symname -
1303 * Caller allocated buffer to receive the symbolic name
1304 *
1305 * @return - none
1306 */
1307void
1308bfa_fcs_get_sym_name(const struct bfa_fcs_s *fcs, char *node_symname)
1309{
1310 bfa_os_memcpy(node_symname,
1311 fcs->fabric.bport.port_cfg.sym_name.symname,
1312 BFA_SYMNAME_MAXLEN);
1313}
1314
1315/**
1274 * Not used by FCS. 1316 * Not used by FCS.
1275 */ 1317 */
1276void 1318void
diff --git a/drivers/scsi/bfa/fcpim.c b/drivers/scsi/bfa/fcpim.c
index 8ae4a2cfa85b..6b8976ad22fa 100644
--- a/drivers/scsi/bfa/fcpim.c
+++ b/drivers/scsi/bfa/fcpim.c
@@ -110,6 +110,7 @@ bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
110 switch (event) { 110 switch (event) {
111 case BFA_FCS_ITNIM_SM_ONLINE: 111 case BFA_FCS_ITNIM_SM_ONLINE:
112 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send); 112 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send);
113 itnim->prli_retries = 0;
113 bfa_fcs_itnim_send_prli(itnim, NULL); 114 bfa_fcs_itnim_send_prli(itnim, NULL);
114 break; 115 break;
115 116
@@ -174,8 +175,12 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
174 175
175 switch (event) { 176 switch (event) {
176 case BFA_FCS_ITNIM_SM_RSP_OK: 177 case BFA_FCS_ITNIM_SM_RSP_OK:
177 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_online); 178 if (itnim->rport->scsi_function == BFA_RPORT_INITIATOR) {
178 bfa_itnim_online(itnim->bfa_itnim, itnim->seq_rec); 179 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
180 } else {
181 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_online);
182 bfa_itnim_online(itnim->bfa_itnim, itnim->seq_rec);
183 }
179 break; 184 break;
180 185
181 case BFA_FCS_ITNIM_SM_RSP_ERROR: 186 case BFA_FCS_ITNIM_SM_RSP_ERROR:
@@ -193,9 +198,7 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
193 198
194 case BFA_FCS_ITNIM_SM_INITIATOR: 199 case BFA_FCS_ITNIM_SM_INITIATOR:
195 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); 200 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
196 /* 201 bfa_fcxp_discard(itnim->fcxp);
197 * dont discard fcxp. accept will reach same state
198 */
199 break; 202 break;
200 203
201 case BFA_FCS_ITNIM_SM_DELETE: 204 case BFA_FCS_ITNIM_SM_DELETE:
@@ -218,8 +221,16 @@ bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
218 221
219 switch (event) { 222 switch (event) {
220 case BFA_FCS_ITNIM_SM_TIMEOUT: 223 case BFA_FCS_ITNIM_SM_TIMEOUT:
221 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send); 224 if (itnim->prli_retries < BFA_FCS_RPORT_MAX_RETRIES) {
222 bfa_fcs_itnim_send_prli(itnim, NULL); 225 itnim->prli_retries++;
226 bfa_trc(itnim->fcs, itnim->prli_retries);
227 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send);
228 bfa_fcs_itnim_send_prli(itnim, NULL);
229 } else {
230 /* invoke target offline */
231 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
232 bfa_fcs_rport_logo_imp(itnim->rport);
233 }
223 break; 234 break;
224 235
225 case BFA_FCS_ITNIM_SM_OFFLINE: 236 case BFA_FCS_ITNIM_SM_OFFLINE:
@@ -422,7 +433,7 @@ bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced)
422 bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id, port->lp_tag, 433 bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id, port->lp_tag,
423 BFA_FALSE, FC_CLASS_3, len, &fchs, 434 BFA_FALSE, FC_CLASS_3, len, &fchs,
424 bfa_fcs_itnim_prli_response, (void *)itnim, FC_MAX_PDUSZ, 435 bfa_fcs_itnim_prli_response, (void *)itnim, FC_MAX_PDUSZ,
425 FC_RA_TOV); 436 FC_ELS_TOV);
426 437
427 itnim->stats.prli_sent++; 438 itnim->stats.prli_sent++;
428 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_FRMSENT); 439 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_FRMSENT);
@@ -467,7 +478,7 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
467 BFA_RPORT_INITIATOR; 478 BFA_RPORT_INITIATOR;
468 itnim->stats.prli_rsp_acc++; 479 itnim->stats.prli_rsp_acc++;
469 bfa_sm_send_event(itnim, 480 bfa_sm_send_event(itnim,
470 BFA_FCS_ITNIM_SM_INITIATOR); 481 BFA_FCS_ITNIM_SM_RSP_OK);
471 return; 482 return;
472 } 483 }
473 484
@@ -738,6 +749,7 @@ bfa_fcs_itnim_attr_get(struct bfa_fcs_port_s *port, wwn_t rpwwn,
738 attr->rec_support = itnim->rec_support; 749 attr->rec_support = itnim->rec_support;
739 attr->conf_comp = itnim->conf_comp; 750 attr->conf_comp = itnim->conf_comp;
740 attr->task_retry_id = itnim->task_retry_id; 751 attr->task_retry_id = itnim->task_retry_id;
752 bfa_os_memset(&attr->io_latency, 0, sizeof(struct bfa_itnim_latency_s));
741 753
742 return BFA_STATUS_OK; 754 return BFA_STATUS_OK;
743} 755}
@@ -793,7 +805,7 @@ bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs,
793 805
794 switch (els_cmd->els_code) { 806 switch (els_cmd->els_code) {
795 case FC_ELS_PRLO: 807 case FC_ELS_PRLO:
796 /* bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_PRLO); */ 808 bfa_fcs_rport_prlo(itnim->rport, fchs->ox_id);
797 break; 809 break;
798 810
799 default: 811 default:
diff --git a/drivers/scsi/bfa/fcs_fabric.h b/drivers/scsi/bfa/fcs_fabric.h
index 244c3f00c50c..432ab8ab8c3c 100644
--- a/drivers/scsi/bfa/fcs_fabric.h
+++ b/drivers/scsi/bfa/fcs_fabric.h
@@ -26,6 +26,8 @@
26#include <fcs/bfa_fcs_vport.h> 26#include <fcs/bfa_fcs_vport.h>
27#include <fcs/bfa_fcs_lport.h> 27#include <fcs/bfa_fcs_lport.h>
28 28
29#define BFA_FCS_BRCD_SWITCH_OUI 0x051e
30
29/* 31/*
30* fcs friend functions: only between fcs modules 32* fcs friend functions: only between fcs modules
31 */ 33 */
@@ -60,4 +62,7 @@ void bfa_fcs_auth_finished(struct bfa_fcs_fabric_s *fabric,
60 62
61void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric, 63void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
62 wwn_t fabric_name); 64 wwn_t fabric_name);
65u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric);
66void bfa_fcs_get_sym_name(const struct bfa_fcs_s *fcs, char *node_symname);
67
63#endif /* __FCS_FABRIC_H__ */ 68#endif /* __FCS_FABRIC_H__ */
diff --git a/drivers/scsi/bfa/fcs_rport.h b/drivers/scsi/bfa/fcs_rport.h
index 9c8d1d292380..e634fb7a69b8 100644
--- a/drivers/scsi/bfa/fcs_rport.h
+++ b/drivers/scsi/bfa/fcs_rport.h
@@ -24,6 +24,8 @@
24 24
25#include <fcs/bfa_fcs_rport.h> 25#include <fcs/bfa_fcs_rport.h>
26 26
27#define BFA_FCS_RPORT_MAX_RETRIES (5)
28
27void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs, 29void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
28 u16 len); 30 u16 len);
29void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport); 31void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport);
@@ -41,6 +43,7 @@ void bfa_fcs_rport_plogi_create(struct bfa_fcs_port_s *port,
41void bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs, 43void bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
42 struct fc_logi_s *plogi); 44 struct fc_logi_s *plogi);
43void bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport); 45void bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport);
46void bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, uint16_t ox_id);
44void bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport); 47void bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport);
45void bfa_fcs_rport_itntm_ack(struct bfa_fcs_rport_s *rport); 48void bfa_fcs_rport_itntm_ack(struct bfa_fcs_rport_s *rport);
46void bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport); 49void bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport);
diff --git a/drivers/scsi/bfa/fcs_vport.h b/drivers/scsi/bfa/fcs_vport.h
index 13c32ebf946c..bb647a4a5dde 100644
--- a/drivers/scsi/bfa/fcs_vport.h
+++ b/drivers/scsi/bfa/fcs_vport.h
@@ -26,6 +26,7 @@ void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport);
26void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport); 26void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport);
27void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport); 27void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport);
28void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport); 28void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport);
29void bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport);
29 30
30#endif /* __FCS_VPORT_H__ */ 31#endif /* __FCS_VPORT_H__ */
31 32
diff --git a/drivers/scsi/bfa/fdmi.c b/drivers/scsi/bfa/fdmi.c
index 8f17076d1a87..2b50eabf4b1e 100644
--- a/drivers/scsi/bfa/fdmi.c
+++ b/drivers/scsi/bfa/fdmi.c
@@ -532,7 +532,7 @@ bfa_fcs_port_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
532 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 532 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
533 FC_CLASS_3, (len + attr_len), &fchs, 533 FC_CLASS_3, (len + attr_len), &fchs,
534 bfa_fcs_port_fdmi_rhba_response, (void *)fdmi, 534 bfa_fcs_port_fdmi_rhba_response, (void *)fdmi,
535 FC_MAX_PDUSZ, FC_RA_TOV); 535 FC_MAX_PDUSZ, FC_FCCT_TOV);
536 536
537 bfa_sm_send_event(fdmi, FDMISM_EVENT_RHBA_SENT); 537 bfa_sm_send_event(fdmi, FDMISM_EVENT_RHBA_SENT);
538} 538}
@@ -823,7 +823,7 @@ bfa_fcs_port_fdmi_send_rprt(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
823 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 823 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
824 FC_CLASS_3, len + attr_len, &fchs, 824 FC_CLASS_3, len + attr_len, &fchs,
825 bfa_fcs_port_fdmi_rprt_response, (void *)fdmi, 825 bfa_fcs_port_fdmi_rprt_response, (void *)fdmi,
826 FC_MAX_PDUSZ, FC_RA_TOV); 826 FC_MAX_PDUSZ, FC_FCCT_TOV);
827 827
828 bfa_sm_send_event(fdmi, FDMISM_EVENT_RPRT_SENT); 828 bfa_sm_send_event(fdmi, FDMISM_EVENT_RPRT_SENT);
829} 829}
@@ -1043,7 +1043,7 @@ bfa_fcs_port_fdmi_send_rpa(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1043 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1043 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1044 FC_CLASS_3, len + attr_len, &fchs, 1044 FC_CLASS_3, len + attr_len, &fchs,
1045 bfa_fcs_port_fdmi_rpa_response, (void *)fdmi, 1045 bfa_fcs_port_fdmi_rpa_response, (void *)fdmi,
1046 FC_MAX_PDUSZ, FC_RA_TOV); 1046 FC_MAX_PDUSZ, FC_FCCT_TOV);
1047 1047
1048 bfa_sm_send_event(fdmi, FDMISM_EVENT_RPA_SENT); 1048 bfa_sm_send_event(fdmi, FDMISM_EVENT_RPA_SENT);
1049} 1049}
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_ioc.h b/drivers/scsi/bfa/include/aen/bfa_aen_ioc.h
index 71378b446b69..4daf96faa266 100644
--- a/drivers/scsi/bfa/include/aen/bfa_aen_ioc.h
+++ b/drivers/scsi/bfa/include/aen/bfa_aen_ioc.h
@@ -32,6 +32,14 @@
32 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_DISABLE) 32 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_DISABLE)
33#define BFA_AEN_IOC_FWMISMATCH \ 33#define BFA_AEN_IOC_FWMISMATCH \
34 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_FWMISMATCH) 34 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_FWMISMATCH)
35#define BFA_AEN_IOC_FWCFG_ERROR \
36 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_FWCFG_ERROR)
37#define BFA_AEN_IOC_INVALID_VENDOR \
38 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_INVALID_VENDOR)
39#define BFA_AEN_IOC_INVALID_NWWN \
40 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_INVALID_NWWN)
41#define BFA_AEN_IOC_INVALID_PWWN \
42 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_INVALID_PWWN)
35 43
36#endif 44#endif
37 45
diff --git a/drivers/scsi/bfa/include/bfa.h b/drivers/scsi/bfa/include/bfa.h
index 1f5966cfbd16..d52b32f5695c 100644
--- a/drivers/scsi/bfa/include/bfa.h
+++ b/drivers/scsi/bfa/include/bfa.h
@@ -126,6 +126,10 @@ struct bfa_sge_s {
126 bfa_ioc_get_type(&(__bfa)->ioc) 126 bfa_ioc_get_type(&(__bfa)->ioc)
127#define bfa_get_mac(__bfa) \ 127#define bfa_get_mac(__bfa) \
128 bfa_ioc_get_mac(&(__bfa)->ioc) 128 bfa_ioc_get_mac(&(__bfa)->ioc)
129#define bfa_get_mfg_mac(__bfa) \
130 bfa_ioc_get_mfg_mac(&(__bfa)->ioc)
131#define bfa_get_fw_clock_res(__bfa) \
132 ((__bfa)->iocfc.cfgrsp->fwcfg.fw_tick_res)
129 133
130/* 134/*
131 * bfa API functions 135 * bfa API functions
diff --git a/drivers/scsi/bfa/include/bfa_fcpim.h b/drivers/scsi/bfa/include/bfa_fcpim.h
index 04789795fa53..4bc9453081df 100644
--- a/drivers/scsi/bfa/include/bfa_fcpim.h
+++ b/drivers/scsi/bfa/include/bfa_fcpim.h
@@ -42,6 +42,24 @@ u16 bfa_fcpim_qdepth_get(struct bfa_s *bfa);
42bfa_status_t bfa_fcpim_get_modstats(struct bfa_s *bfa, 42bfa_status_t bfa_fcpim_get_modstats(struct bfa_s *bfa,
43 struct bfa_fcpim_stats_s *modstats); 43 struct bfa_fcpim_stats_s *modstats);
44bfa_status_t bfa_fcpim_clr_modstats(struct bfa_s *bfa); 44bfa_status_t bfa_fcpim_clr_modstats(struct bfa_s *bfa);
45void bfa_fcpim_set_ioredirect(struct bfa_s *bfa, bfa_boolean_t state);
46void bfa_fcpim_update_ioredirect(struct bfa_s *bfa);
47void bfa_cb_ioredirect_state_change(void *hcb_bfad, bfa_boolean_t ioredirect);
48
49#define bfa_fcpim_ioredirect_enabled(__bfa) \
50 (((struct bfa_fcpim_mod_s *)(BFA_FCPIM_MOD(__bfa)))->ioredirect)
51
52#define bfa_fcpim_get_next_reqq(__bfa, __qid) \
53{ \
54 struct bfa_fcpim_mod_s *__fcpim = BFA_FCPIM_MOD(__bfa); \
55 __fcpim->reqq++; \
56 __fcpim->reqq &= (BFI_IOC_MAX_CQS - 1); \
57 *(__qid) = __fcpim->reqq; \
58}
59
60#define bfa_iocfc_map_msg_to_qid(__msg, __qid) \
61 *(__qid) = (u8)((__msg) & (BFI_IOC_MAX_CQS - 1));
62
45 63
46/* 64/*
47 * bfa itnim API functions 65 * bfa itnim API functions
@@ -56,6 +74,7 @@ void bfa_itnim_get_stats(struct bfa_itnim_s *itnim,
56 struct bfa_itnim_hal_stats_s *stats); 74 struct bfa_itnim_hal_stats_s *stats);
57void bfa_itnim_clear_stats(struct bfa_itnim_s *itnim); 75void bfa_itnim_clear_stats(struct bfa_itnim_s *itnim);
58 76
77#define bfa_itnim_get_reqq(__ioim) (((struct bfa_ioim_s *)__ioim)->itnim->reqq)
59 78
60/** 79/**
61 * BFA completion callback for bfa_itnim_online(). 80 * BFA completion callback for bfa_itnim_online().
@@ -156,4 +175,3 @@ void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
156 enum bfi_tskim_status tsk_status); 175 enum bfi_tskim_status tsk_status);
157 176
158#endif /* __BFA_FCPIM_H__ */ 177#endif /* __BFA_FCPIM_H__ */
159
diff --git a/drivers/scsi/bfa/include/bfa_svc.h b/drivers/scsi/bfa/include/bfa_svc.h
index 1349b99a3c6d..7840943d73b0 100644
--- a/drivers/scsi/bfa/include/bfa_svc.h
+++ b/drivers/scsi/bfa/include/bfa_svc.h
@@ -215,6 +215,7 @@ bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
215 bfa_cb_pport_t cbfn, void *cbarg); 215 bfa_cb_pport_t cbfn, void *cbarg);
216bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, 216bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn,
217 void *cbarg); 217 void *cbarg);
218bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa);
218 219
219/* 220/*
220 * bfa rport API functions 221 * bfa rport API functions
diff --git a/drivers/scsi/bfa/include/bfi/bfi_ctreg.h b/drivers/scsi/bfa/include/bfi/bfi_ctreg.h
index 57a8497105af..c0ef5a93b797 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_ctreg.h
+++ b/drivers/scsi/bfa/include/bfi/bfi_ctreg.h
@@ -455,6 +455,9 @@ enum {
455#define __PSS_LPU0_RAM_ERR 0x00000001 455#define __PSS_LPU0_RAM_ERR 0x00000001
456#define ERR_SET_REG 0x00018818 456#define ERR_SET_REG 0x00018818
457#define __PSS_ERR_STATUS_SET 0x003fffff 457#define __PSS_ERR_STATUS_SET 0x003fffff
458#define PMM_1T_RESET_REG_P0 0x0002381c
459#define __PMM_1T_RESET_P 0x00000001
460#define PMM_1T_RESET_REG_P1 0x00023c1c
458#define HQM_QSET0_RXQ_DRBL_P0 0x00038000 461#define HQM_QSET0_RXQ_DRBL_P0 0x00038000
459#define __RXQ0_ADD_VECTORS_P 0x80000000 462#define __RXQ0_ADD_VECTORS_P 0x80000000
460#define __RXQ0_STOP_P 0x40000000 463#define __RXQ0_STOP_P 0x40000000
diff --git a/drivers/scsi/bfa/include/bfi/bfi_ioc.h b/drivers/scsi/bfa/include/bfi/bfi_ioc.h
index a0158aac0024..450ded6e9bc2 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_ioc.h
+++ b/drivers/scsi/bfa/include/bfi/bfi_ioc.h
@@ -48,9 +48,14 @@ struct bfi_ioc_getattr_req_s {
48}; 48};
49 49
50struct bfi_ioc_attr_s { 50struct bfi_ioc_attr_s {
51 wwn_t mfg_wwn; 51 wwn_t mfg_pwwn; /* Mfg port wwn */
52 mac_t mfg_mac; 52 wwn_t mfg_nwwn; /* Mfg node wwn */
53 u16 rsvd_a; 53 mac_t mfg_mac; /* Mfg mac */
54 u16 rsvd_a;
55 wwn_t pwwn;
56 wwn_t nwwn;
57 mac_t mac; /* PBC or Mfg mac */
58 u16 rsvd_b;
54 char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)]; 59 char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
55 u8 pcie_gen; 60 u8 pcie_gen;
56 u8 pcie_lanes_orig; 61 u8 pcie_lanes_orig;
@@ -58,11 +63,12 @@ struct bfi_ioc_attr_s {
58 u8 rx_bbcredit; /* receive buffer credits */ 63 u8 rx_bbcredit; /* receive buffer credits */
59 u32 adapter_prop; /* adapter properties */ 64 u32 adapter_prop; /* adapter properties */
60 u16 maxfrsize; /* max receive frame size */ 65 u16 maxfrsize; /* max receive frame size */
61 char asic_rev; 66 char asic_rev;
62 u8 rsvd_b; 67 u8 rsvd_c;
63 char fw_version[BFA_VERSION_LEN]; 68 char fw_version[BFA_VERSION_LEN];
64 char optrom_version[BFA_VERSION_LEN]; 69 char optrom_version[BFA_VERSION_LEN];
65 struct bfa_mfg_vpd_s vpd; 70 struct bfa_mfg_vpd_s vpd;
71 u32 card_type; /* card type */
66}; 72};
67 73
68/** 74/**
diff --git a/drivers/scsi/bfa/include/bfi/bfi_iocfc.h b/drivers/scsi/bfa/include/bfi/bfi_iocfc.h
index c3760df72575..ccdfcc5d7e0b 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_iocfc.h
+++ b/drivers/scsi/bfa/include/bfi/bfi_iocfc.h
@@ -19,6 +19,7 @@
19#define __BFI_IOCFC_H__ 19#define __BFI_IOCFC_H__
20 20
21#include "bfi.h" 21#include "bfi.h"
22#include <bfi/bfi_pbc.h>
22#include <defs/bfa_defs_ioc.h> 23#include <defs/bfa_defs_ioc.h>
23#include <defs/bfa_defs_iocfc.h> 24#include <defs/bfa_defs_iocfc.h>
24#include <defs/bfa_defs_boot.h> 25#include <defs/bfa_defs_boot.h>
@@ -78,6 +79,7 @@ struct bfi_iocfc_cfgrsp_s {
78 struct bfa_iocfc_fwcfg_s fwcfg; 79 struct bfa_iocfc_fwcfg_s fwcfg;
79 struct bfa_iocfc_intr_attr_s intr_attr; 80 struct bfa_iocfc_intr_attr_s intr_attr;
80 struct bfi_iocfc_bootwwns bootwwns; 81 struct bfi_iocfc_bootwwns bootwwns;
82 struct bfi_pbc_s pbc_cfg;
81}; 83};
82 84
83/** 85/**
diff --git a/drivers/scsi/bfa/include/bfi/bfi_pbc.h b/drivers/scsi/bfa/include/bfi/bfi_pbc.h
new file mode 100644
index 000000000000..88a4154c30c0
--- /dev/null
+++ b/drivers/scsi/bfa/include/bfi/bfi_pbc.h
@@ -0,0 +1,62 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_PBC_H__
19#define __BFI_PBC_H__
20
21#pragma pack(1)
22
23#define BFI_PBC_MAX_BLUNS 8
24#define BFI_PBC_MAX_VPORTS 16
25
26#define BFI_PBC_PORT_DISABLED 2
27/**
28 * PBC boot lun configuration
29 */
30struct bfi_pbc_blun_s {
31 wwn_t tgt_pwwn;
32 lun_t tgt_lun;
33};
34
35/**
36 * PBC virtual port configuration
37 */
38struct bfi_pbc_vport_s {
39 wwn_t vp_pwwn;
40 wwn_t vp_nwwn;
41};
42
43/**
44 * BFI pre-boot configuration information
45 */
46struct bfi_pbc_s {
47 u8 port_enabled;
48 u8 boot_enabled;
49 u8 nbluns;
50 u8 nvports;
51 u8 port_speed;
52 u8 rsvd_a;
53 u16 hss;
54 wwn_t pbc_pwwn;
55 wwn_t pbc_nwwn;
56 struct bfi_pbc_blun_s blun[BFI_PBC_MAX_BLUNS];
57 struct bfi_pbc_vport_s vport[BFI_PBC_MAX_VPORTS];
58};
59
60#pragma pack()
61
62#endif /* __BFI_PBC_H__ */
diff --git a/drivers/scsi/bfa/include/cna/port/bfa_port.h b/drivers/scsi/bfa/include/cna/port/bfa_port.h
index 7cbf17d3141b..d7babaf97848 100644
--- a/drivers/scsi/bfa/include/cna/port/bfa_port.h
+++ b/drivers/scsi/bfa/include/cna/port/bfa_port.h
@@ -37,6 +37,7 @@ struct bfa_port_s {
37 bfa_port_stats_cbfn_t stats_cbfn; 37 bfa_port_stats_cbfn_t stats_cbfn;
38 void *stats_cbarg; 38 void *stats_cbarg;
39 bfa_status_t stats_status; 39 bfa_status_t stats_status;
40 u32 stats_reset_time;
40 union bfa_pport_stats_u *stats; 41 union bfa_pport_stats_u *stats;
41 struct bfa_dma_s stats_dma; 42 struct bfa_dma_s stats_dma;
42 bfa_boolean_t endis_pending; 43 bfa_boolean_t endis_pending;
diff --git a/drivers/scsi/bfa/include/cs/bfa_debug.h b/drivers/scsi/bfa/include/cs/bfa_debug.h
index 441be86b1b0f..75a911ea7936 100644
--- a/drivers/scsi/bfa/include/cs/bfa_debug.h
+++ b/drivers/scsi/bfa/include/cs/bfa_debug.h
@@ -28,7 +28,8 @@
28} while (0) 28} while (0)
29 29
30#define bfa_sm_fault(__mod, __event) do { \ 30#define bfa_sm_fault(__mod, __event) do { \
31 bfa_sm_panic((__mod)->logm, __LINE__, __FILE__, __event); \ 31 bfa_trc(__mod, (((uint32_t)0xDEAD << 16) | __event)); \
32 bfa_sm_panic((__mod)->logm, __LINE__, __FILE__, __event); \
32} while (0) 33} while (0)
33 34
34#ifndef BFA_PERF_BUILD 35#ifndef BFA_PERF_BUILD
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_adapter.h b/drivers/scsi/bfa/include/defs/bfa_defs_adapter.h
index 8c208fc8e329..aea0360d67d5 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_adapter.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_adapter.h
@@ -39,7 +39,7 @@ enum {
39struct bfa_adapter_attr_s { 39struct bfa_adapter_attr_s {
40 char manufacturer[BFA_ADAPTER_MFG_NAME_LEN]; 40 char manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
41 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; 41 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
42 u32 rsvd1; 42 u32 card_type;
43 char model[BFA_ADAPTER_MODEL_NAME_LEN]; 43 char model[BFA_ADAPTER_MODEL_NAME_LEN];
44 char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN]; 44 char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
45 wwn_t pwwn; 45 wwn_t pwwn;
@@ -60,6 +60,7 @@ struct bfa_adapter_attr_s {
60 u8 pcie_lanes_orig; 60 u8 pcie_lanes_orig;
61 u8 pcie_lanes; 61 u8 pcie_lanes;
62 u8 cna_capable; 62 u8 cna_capable;
63 u8 is_mezz;
63}; 64};
64 65
65/** 66/**
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_auth.h b/drivers/scsi/bfa/include/defs/bfa_defs_auth.h
index 45df32820911..f56ed871bb99 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_auth.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_auth.h
@@ -125,10 +125,10 @@ struct bfa_auth_attr_s {
125 enum bfa_auth_status status; 125 enum bfa_auth_status status;
126 enum bfa_auth_algo algo; 126 enum bfa_auth_algo algo;
127 enum bfa_auth_group dh_grp; 127 enum bfa_auth_group dh_grp;
128 u16 rjt_code; 128 enum bfa_auth_rej_code rjt_code;
129 u16 rjt_code_exp; 129 enum bfa_auth_rej_code_exp rjt_code_exp;
130 u8 secret_set; 130 u8 secret_set;
131 u8 resv[7]; 131 u8 resv[3];
132}; 132};
133 133
134#endif /* __BFA_DEFS_AUTH_H__ */ 134#endif /* __BFA_DEFS_AUTH_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_boot.h b/drivers/scsi/bfa/include/defs/bfa_defs_boot.h
index 6f4aa5283545..0fca10b6ad10 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_boot.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_boot.h
@@ -24,6 +24,8 @@
24 24
25enum { 25enum {
26 BFA_BOOT_BOOTLUN_MAX = 4, /* maximum boot lun per IOC */ 26 BFA_BOOT_BOOTLUN_MAX = 4, /* maximum boot lun per IOC */
27 BFA_PREBOOT_BOOTLUN_MAX = 8, /* maximum preboot lun per IOC */
28
27}; 29};
28 30
29#define BOOT_CFG_REV1 1 31#define BOOT_CFG_REV1 1
@@ -67,5 +69,13 @@ struct bfa_boot_cfg_s {
67 struct bfa_boot_bootlun_s blun_disc[BFA_BOOT_BOOTLUN_MAX]; 69 struct bfa_boot_bootlun_s blun_disc[BFA_BOOT_BOOTLUN_MAX];
68}; 70};
69 71
72struct bfa_boot_pbc_s {
73 u8 enable; /* enable/disable SAN boot */
74 u8 speed; /* boot speed settings */
75 u8 topology; /* boot topology setting */
76 u8 rsvd1;
77 u32 nbluns; /* number of boot luns */
78 struct bfa_boot_bootlun_s pblun[BFA_PREBOOT_BOOTLUN_MAX];
79};
70 80
71#endif /* __BFA_DEFS_BOOT_H__ */ 81#endif /* __BFA_DEFS_BOOT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_driver.h b/drivers/scsi/bfa/include/defs/bfa_defs_driver.h
index 50382dd2ab41..7d00d00d3969 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_driver.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_driver.h
@@ -29,7 +29,7 @@ struct bfa_driver_stats_s {
29 u16 tm_target_reset; 29 u16 tm_target_reset;
30 u16 tm_bus_reset; 30 u16 tm_bus_reset;
31 u16 ioc_restart; /* IOC restart count */ 31 u16 ioc_restart; /* IOC restart count */
32 u16 io_pending; /* outstanding io count per-IOC */ 32 u16 rsvd;
33 u64 control_req; 33 u64 control_req;
34 u64 input_req; 34 u64 input_req;
35 u64 output_req; 35 u64 output_req;
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h b/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h
index a07ef4a3cd78..af86a6396439 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h
@@ -48,7 +48,7 @@ struct bfa_fcoe_stats_s {
48 u64 disc_fcf_unavail; /* Discovery FCF not avail */ 48 u64 disc_fcf_unavail; /* Discovery FCF not avail */
49 u64 linksvc_unsupp; /* FIP link service req unsupp. */ 49 u64 linksvc_unsupp; /* FIP link service req unsupp. */
50 u64 linksvc_err; /* FIP link service req errors */ 50 u64 linksvc_err; /* FIP link service req errors */
51 u64 logo_req; /* FIP logo */ 51 u64 logo_req; /* FIP logos received */
52 u64 clrvlink_req; /* Clear virtual link requests */ 52 u64 clrvlink_req; /* Clear virtual link requests */
53 u64 op_unsupp; /* FIP operation unsupp. */ 53 u64 op_unsupp; /* FIP operation unsupp. */
54 u64 untagged; /* FIP untagged frames */ 54 u64 untagged; /* FIP untagged frames */
@@ -64,21 +64,15 @@ struct bfa_fcoe_stats_s {
64 u64 txf_timeout; /* Tx timeouts */ 64 u64 txf_timeout; /* Tx timeouts */
65 u64 txf_parity_errors; /* Transmit parity err */ 65 u64 txf_parity_errors; /* Transmit parity err */
66 u64 txf_fid_parity_errors; /* Transmit FID parity err */ 66 u64 txf_fid_parity_errors; /* Transmit FID parity err */
67 u64 tx_pause; /* Tx pause frames */ 67 u64 rxf_ucast_octets; /* Rx FCoE unicast octets */
68 u64 tx_zero_pause; /* Tx zero pause frames */ 68 u64 rxf_ucast; /* Rx FCoE unicast frames */
69 u64 tx_first_pause; /* Tx first pause frames */ 69 u64 rxf_ucast_vlan; /* Rx FCoE unicast vlan frames */
70 u64 rx_pause; /* Rx pause frames */ 70 u64 rxf_mcast_octets; /* Rx FCoE multicast octets */
71 u64 rx_zero_pause; /* Rx zero pause frames */ 71 u64 rxf_mcast; /* Rx FCoE multicast frames */
72 u64 rx_first_pause; /* Rx first pause frames */ 72 u64 rxf_mcast_vlan; /* Rx FCoE multicast vlan frames */
73 u64 rxf_ucast_octets; /* Rx unicast octets */ 73 u64 rxf_bcast_octets; /* Rx FCoE broadcast octets */
74 u64 rxf_ucast; /* Rx unicast frames */ 74 u64 rxf_bcast; /* Rx FCoE broadcast frames */
75 u64 rxf_ucast_vlan; /* Rx unicast vlan frames */ 75 u64 rxf_bcast_vlan; /* Rx FCoE broadcast vlan frames */
76 u64 rxf_mcast_octets; /* Rx multicast octets */
77 u64 rxf_mcast; /* Rx multicast frames */
78 u64 rxf_mcast_vlan; /* Rx multicast vlan frames */
79 u64 rxf_bcast_octets; /* Rx broadcast octests */
80 u64 rxf_bcast; /* Rx broadcast frames */
81 u64 rxf_bcast_vlan; /* Rx broadcast vlan frames */
82}; 76};
83 77
84/** 78/**
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h b/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h
index 8d8e6a966537..add0a05d941d 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h
@@ -126,7 +126,7 @@ struct bfa_ioc_attr_s {
126 struct bfa_ioc_driver_attr_s driver_attr; /* driver attr */ 126 struct bfa_ioc_driver_attr_s driver_attr; /* driver attr */
127 struct bfa_ioc_pci_attr_s pci_attr; 127 struct bfa_ioc_pci_attr_s pci_attr;
128 u8 port_id; /* port number */ 128 u8 port_id; /* port number */
129 u8 rsvd[7]; /*!< 64bit align */ 129 u8 rsvd[7]; /* 64bit align */
130}; 130};
131 131
132/** 132/**
@@ -138,6 +138,11 @@ enum bfa_ioc_aen_event {
138 BFA_IOC_AEN_ENABLE = 3, /* IOC enabled event */ 138 BFA_IOC_AEN_ENABLE = 3, /* IOC enabled event */
139 BFA_IOC_AEN_DISABLE = 4, /* IOC disabled event */ 139 BFA_IOC_AEN_DISABLE = 4, /* IOC disabled event */
140 BFA_IOC_AEN_FWMISMATCH = 5, /* IOC firmware mismatch */ 140 BFA_IOC_AEN_FWMISMATCH = 5, /* IOC firmware mismatch */
141 BFA_IOC_AEN_FWCFG_ERROR = 6, /* IOC firmware config error */
142 BFA_IOC_AEN_INVALID_VENDOR = 7,
143 BFA_IOC_AEN_INVALID_NWWN = 8, /* Zero NWWN */
144 BFA_IOC_AEN_INVALID_PWWN = 9 /* Zero PWWN */
145
141}; 146};
142 147
143/** 148/**
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h b/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h
index c290fb13d2d1..31e728a631ed 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h
@@ -51,8 +51,10 @@ struct bfa_iocfc_fwcfg_s {
51 u16 num_tsktm_reqs; /* TM task management requests*/ 51 u16 num_tsktm_reqs; /* TM task management requests*/
52 u16 num_fcxp_reqs; /* unassisted FC exchanges */ 52 u16 num_fcxp_reqs; /* unassisted FC exchanges */
53 u16 num_uf_bufs; /* unsolicited recv buffers */ 53 u16 num_uf_bufs; /* unsolicited recv buffers */
54 u8 num_cqs; 54 u8 num_cqs;
55 u8 rsvd[5]; 55 u8 fw_tick_res; /*!< FW clock resolution in ms */
56 u8 rsvd[4];
57
56}; 58};
57 59
58struct bfa_iocfc_drvcfg_s { 60struct bfa_iocfc_drvcfg_s {
@@ -176,10 +178,10 @@ struct bfa_fw_port_fpg_stats_s {
176 u32 nos_rx; 178 u32 nos_rx;
177 u32 lip_rx; 179 u32 lip_rx;
178 u32 arbf0_rx; 180 u32 arbf0_rx;
181 u32 arb_rx;
179 u32 mrk_rx; 182 u32 mrk_rx;
180 u32 const_mrk_rx; 183 u32 const_mrk_rx;
181 u32 prim_unknown; 184 u32 prim_unknown;
182 u32 rsvd;
183}; 185};
184 186
185 187
@@ -200,6 +202,8 @@ struct bfa_fw_port_lksm_stats_s {
200 u32 lrr_tx; /* No. of times LRR tx started */ 202 u32 lrr_tx; /* No. of times LRR tx started */
201 u32 ols_tx; /* No. of times OLS tx started */ 203 u32 ols_tx; /* No. of times OLS tx started */
202 u32 nos_tx; /* No. of times NOS tx started */ 204 u32 nos_tx; /* No. of times NOS tx started */
205 u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */
206 u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */
203}; 207};
204 208
205 209
@@ -239,7 +243,7 @@ struct bfa_fw_fip_stats_s {
239 u32 disc_fcf_unavail; /* Discovery FCF Not Avail. */ 243 u32 disc_fcf_unavail; /* Discovery FCF Not Avail. */
240 u32 linksvc_unsupp; /* Unsupported link service req */ 244 u32 linksvc_unsupp; /* Unsupported link service req */
241 u32 linksvc_err; /* Parse error in link service req */ 245 u32 linksvc_err; /* Parse error in link service req */
242 u32 logo_req; /* Number of FIP logos received */ 246 u32 logo_req; /* FIP logos received */
243 u32 clrvlink_req; /* Clear virtual link req */ 247 u32 clrvlink_req; /* Clear virtual link req */
244 u32 op_unsupp; /* Unsupported FIP operation */ 248 u32 op_unsupp; /* Unsupported FIP operation */
245 u32 untagged; /* Untagged frames (ignored) */ 249 u32 untagged; /* Untagged frames (ignored) */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_itnim.h b/drivers/scsi/bfa/include/defs/bfa_defs_itnim.h
index 2ec769903d24..d77788b3999a 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_itnim.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_itnim.h
@@ -34,6 +34,15 @@ enum bfa_itnim_state {
34 BFA_ITNIM_INITIATIOR = 7, /* initiator */ 34 BFA_ITNIM_INITIATIOR = 7, /* initiator */
35}; 35};
36 36
37struct bfa_itnim_latency_s {
38 u32 min;
39 u32 max;
40 u32 count;
41 u32 clock_res;
42 u32 avg;
43 u32 rsvd;
44};
45
37struct bfa_itnim_hal_stats_s { 46struct bfa_itnim_hal_stats_s {
38 u32 onlines; /* ITN nexus onlines (PRLI done) */ 47 u32 onlines; /* ITN nexus onlines (PRLI done) */
39 u32 offlines; /* ITN Nexus offlines */ 48 u32 offlines; /* ITN Nexus offlines */
@@ -91,6 +100,7 @@ struct bfa_itnim_attr_s {
91 u8 task_retry_id; /* task retry ident support */ 100 u8 task_retry_id; /* task retry ident support */
92 u8 rec_support; /* REC supported */ 101 u8 rec_support; /* REC supported */
93 u8 conf_comp; /* confirmed completion supp */ 102 u8 conf_comp; /* confirmed completion supp */
103 struct bfa_itnim_latency_s io_latency; /* IO latency */
94}; 104};
95 105
96/** 106/**
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h b/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h
index c5bd9c36ad4d..d22fb7909643 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h
@@ -45,26 +45,6 @@
45#define BFA_MFG_CHKSUM_SIZE 16 45#define BFA_MFG_CHKSUM_SIZE 16
46 46
47/** 47/**
48 * Manufacturing block encrypted version
49 */
50#define BFA_MFG_ENC_VER 2
51
52/**
53 * Manufacturing block version 1 length
54 */
55#define BFA_MFG_VER1_LEN 128
56
57/**
58 * Manufacturing block header length
59 */
60#define BFA_MFG_HDR_LEN 4
61
62/**
63 * Checksum size
64 */
65#define BFA_MFG_CHKSUM_SIZE 16
66
67/**
68 * Manufacturing block format 48 * Manufacturing block format
69 */ 49 */
70#define BFA_MFG_SERIALNUM_SIZE 11 50#define BFA_MFG_SERIALNUM_SIZE 11
@@ -86,6 +66,9 @@ enum {
86 BFA_MFG_TYPE_FC4P1 = 415, /* 4G 1port FC card */ 66 BFA_MFG_TYPE_FC4P1 = 415, /* 4G 1port FC card */
87 BFA_MFG_TYPE_CNA10P2 = 1020, /* 10G 2port CNA card */ 67 BFA_MFG_TYPE_CNA10P2 = 1020, /* 10G 2port CNA card */
88 BFA_MFG_TYPE_CNA10P1 = 1010, /* 10G 1port CNA card */ 68 BFA_MFG_TYPE_CNA10P1 = 1010, /* 10G 1port CNA card */
69 BFA_MFG_TYPE_JAYHAWK = 804, /* Jayhawk mezz card */
70 BFA_MFG_TYPE_WANCHESE = 1007, /* Wanchese mezz card */
71 BFA_MFG_TYPE_INVALID = 0, /* Invalid card type */
89}; 72};
90 73
91#pragma pack(1) 74#pragma pack(1)
@@ -95,6 +78,24 @@ enum {
95 */ 78 */
96#define bfa_mfg_type2port_num(card_type) (((card_type) / 10) % 10) 79#define bfa_mfg_type2port_num(card_type) (((card_type) / 10) % 10)
97 80
81/**
82 * Check if Mezz card
83 */
84#define bfa_mfg_is_mezz(type) (( \
85 (type) == BFA_MFG_TYPE_JAYHAWK || \
86 (type) == BFA_MFG_TYPE_WANCHESE))
87
88/**
89 * Check if card type valid
90 */
91#define bfa_mfg_is_card_type_valid(type) (( \
92 (type) == BFA_MFG_TYPE_FC8P2 || \
93 (type) == BFA_MFG_TYPE_FC8P1 || \
94 (type) == BFA_MFG_TYPE_FC4P2 || \
95 (type) == BFA_MFG_TYPE_FC4P1 || \
96 (type) == BFA_MFG_TYPE_CNA10P2 || \
97 (type) == BFA_MFG_TYPE_CNA10P1 || \
98 bfa_mfg_is_mezz(type)))
98 99
99/** 100/**
100 * All numerical fields are in big-endian format. 101 * All numerical fields are in big-endian format.
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_pci.h b/drivers/scsi/bfa/include/defs/bfa_defs_pci.h
index c9b83321694b..ea7d89bbc0bb 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_pci.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_pci.h
@@ -26,8 +26,13 @@ enum {
26 BFA_PCI_DEVICE_ID_FC_8G2P = 0x13, 26 BFA_PCI_DEVICE_ID_FC_8G2P = 0x13,
27 BFA_PCI_DEVICE_ID_FC_8G1P = 0x17, 27 BFA_PCI_DEVICE_ID_FC_8G1P = 0x17,
28 BFA_PCI_DEVICE_ID_CT = 0x14, 28 BFA_PCI_DEVICE_ID_CT = 0x14,
29 BFA_PCI_DEVICE_ID_CT_FC = 0x21,
29}; 30};
30 31
32#define bfa_asic_id_ct(devid) \
33 ((devid) == BFA_PCI_DEVICE_ID_CT || \
34 (devid) == BFA_PCI_DEVICE_ID_CT_FC)
35
31/** 36/**
32 * PCI sub-system device and vendor ID information 37 * PCI sub-system device and vendor ID information
33 */ 38 */
@@ -35,7 +40,9 @@ enum {
35 BFA_PCI_FCOE_SSDEVICE_ID = 0x14, 40 BFA_PCI_FCOE_SSDEVICE_ID = 0x14,
36}; 41};
37 42
38#define BFA_PCI_ACCESS_RANGES 1 /* Maximum number of device address ranges 43/**
39 * mapped through different BAR(s). */ 44 * Maximum number of device address ranges mapped through different BAR(s)
45 */
46#define BFA_PCI_ACCESS_RANGES 1
40 47
41#endif /* __BFA_DEFS_PCI_H__ */ 48#endif /* __BFA_DEFS_PCI_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_port.h b/drivers/scsi/bfa/include/defs/bfa_defs_port.h
index 501bc9739d9d..ebdf0d1731a4 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_port.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_port.h
@@ -50,12 +50,12 @@ enum bfa_port_role {
50 * FCS port configuration. 50 * FCS port configuration.
51 */ 51 */
52struct bfa_port_cfg_s { 52struct bfa_port_cfg_s {
53 wwn_t pwwn; /* port wwn */ 53 wwn_t pwwn; /* port wwn */
54 wwn_t nwwn; /* node wwn */ 54 wwn_t nwwn; /* node wwn */
55 struct bfa_port_symname_s sym_name; /* vm port symbolic name */ 55 struct bfa_port_symname_s sym_name; /* vm port symbolic name */
56 enum bfa_port_role roles; /* FCS port roles */ 56 bfa_boolean_t preboot_vp; /* vport created from PBC */
57 u32 rsvd; 57 enum bfa_port_role roles; /* FCS port roles */
58 u8 tag[16]; /* opaque tag from application */ 58 u8 tag[16]; /* opaque tag from application */
59}; 59};
60 60
61/** 61/**
@@ -159,7 +159,7 @@ struct bfa_port_stats_s {
159 u32 ms_plogi_rsp_err; 159 u32 ms_plogi_rsp_err;
160 u32 ms_plogi_acc_err; 160 u32 ms_plogi_acc_err;
161 u32 ms_plogi_accepts; 161 u32 ms_plogi_accepts;
162 u32 ms_rejects; /* NS command rejects */ 162 u32 ms_rejects; /* MS command rejects */
163 u32 ms_plogi_unknown_rsp; 163 u32 ms_plogi_unknown_rsp;
164 u32 ms_plogi_alloc_wait; 164 u32 ms_plogi_alloc_wait;
165 165
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_pport.h b/drivers/scsi/bfa/include/defs/bfa_defs_pport.h
index 26e5cc78095d..2de675839c2f 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_pport.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_pport.h
@@ -38,6 +38,7 @@ enum bfa_pport_states {
38 BFA_PPORT_ST_IOCDOWN = 10, 38 BFA_PPORT_ST_IOCDOWN = 10,
39 BFA_PPORT_ST_IOCDIS = 11, 39 BFA_PPORT_ST_IOCDIS = 11,
40 BFA_PPORT_ST_FWMISMATCH = 12, 40 BFA_PPORT_ST_FWMISMATCH = 12,
41 BFA_PPORT_ST_PREBOOT_DISABLED = 13,
41 BFA_PPORT_ST_MAX_STATE, 42 BFA_PPORT_ST_MAX_STATE,
42}; 43};
43 44
@@ -203,6 +204,8 @@ struct bfa_pport_attr_s {
203 */ 204 */
204 wwn_t nwwn; /* node wwn */ 205 wwn_t nwwn; /* node wwn */
205 wwn_t pwwn; /* port wwn */ 206 wwn_t pwwn; /* port wwn */
207 wwn_t factorynwwn; /* factory node wwn */
208 wwn_t factorypwwn; /* factory port wwn */
206 enum fc_cos cos_supported; /* supported class of services */ 209 enum fc_cos cos_supported; /* supported class of services */
207 u32 rsvd; 210 u32 rsvd;
208 struct fc_symname_s port_symname; /* port symbolic name */ 211 struct fc_symname_s port_symname; /* port symbolic name */
@@ -243,7 +246,7 @@ struct bfa_pport_fc_stats_s {
243 u64 secs_reset; /* Seconds since stats is reset */ 246 u64 secs_reset; /* Seconds since stats is reset */
244 u64 tx_frames; /* Tx frames */ 247 u64 tx_frames; /* Tx frames */
245 u64 tx_words; /* Tx words */ 248 u64 tx_words; /* Tx words */
246 u64 tx_lip; /* TX LIP */ 249 u64 tx_lip; /* Tx LIP */
247 u64 tx_nos; /* Tx NOS */ 250 u64 tx_nos; /* Tx NOS */
248 u64 tx_ols; /* Tx OLS */ 251 u64 tx_ols; /* Tx OLS */
249 u64 tx_lr; /* Tx LR */ 252 u64 tx_lr; /* Tx LR */
@@ -309,7 +312,7 @@ struct bfa_pport_eth_stats_s {
309 u64 rx_zero_pause; /* Rx zero pause */ 312 u64 rx_zero_pause; /* Rx zero pause */
310 u64 tx_pause; /* Tx pause */ 313 u64 tx_pause; /* Tx pause */
311 u64 tx_zero_pause; /* Tx zero pause */ 314 u64 tx_zero_pause; /* Tx zero pause */
312 u64 rx_fcoe_pause; /* Rx fcoe pause */ 315 u64 rx_fcoe_pause; /* Rx FCoE pause */
313 u64 rx_fcoe_zero_pause; /* Rx FCoE zero pause */ 316 u64 rx_fcoe_zero_pause; /* Rx FCoE zero pause */
314 u64 tx_fcoe_pause; /* Tx FCoE pause */ 317 u64 tx_fcoe_pause; /* Tx FCoE pause */
315 u64 tx_fcoe_zero_pause; /* Tx FCoE zero pause */ 318 u64 tx_fcoe_zero_pause; /* Tx FCoE zero pause */
@@ -381,26 +384,10 @@ struct bfa_pport_link_s {
381 u8 trunked; /* Trunked or not (1 or 0) */ 384 u8 trunked; /* Trunked or not (1 or 0) */
382 u8 resvd[3]; 385 u8 resvd[3];
383 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */ 386 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */
384 struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */
385 union { 387 union {
386 struct { 388 struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */
387 u8 tmaster;/* Trunk Master or 389 struct bfa_fcport_fcf_s fcf; /* FCF information (for FCoE) */
388 * not (1 or 0) */ 390 } vc_fcf;
389 u8 tlinks; /* Trunk links bitmap
390 * (linkup) */
391 u8 resv1; /* Reserved */
392 } trunk_info;
393
394 struct {
395 u8 myalpa; /* alpa claimed */
396 u8 login_req; /* Login required or
397 * not (1 or 0) */
398 u8 alpabm_val;/* alpa bitmap valid
399 * or not (1 or 0) */
400 struct fc_alpabm_s alpabm; /* alpa bitmap */
401 } loop_info;
402 } tl;
403 struct bfa_fcport_fcf_s fcf; /*!< FCF information (for FCoE) */
404}; 391};
405 392
406#endif /* __BFA_DEFS_PPORT_H__ */ 393#endif /* __BFA_DEFS_PPORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_status.h b/drivers/scsi/bfa/include/defs/bfa_defs_status.h
index ec78b4cb121a..6eb4e62096fc 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_status.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_status.h
@@ -84,8 +84,9 @@ enum bfa_status {
84 BFA_STATUS_BADFRMHDR = 48, /* Bad frame header */ 84 BFA_STATUS_BADFRMHDR = 48, /* Bad frame header */
85 BFA_STATUS_BADFRMSZ = 49, /* Bad frame size check and replace 85 BFA_STATUS_BADFRMSZ = 49, /* Bad frame size check and replace
86 * SFP/cable */ 86 * SFP/cable */
87 BFA_STATUS_MISSINGFRM = 50, /* Missing frame check and replace 87 BFA_STATUS_MISSINGFRM = 50, /* Missing frame check and replace
88 * SFP/cable */ 88 * SFP/cable or for Mezz card check and
89 * replace pass through module */
89 BFA_STATUS_LINKTIMEOUT = 51, /* Link timeout check and replace 90 BFA_STATUS_LINKTIMEOUT = 51, /* Link timeout check and replace
90 * SFP/cable */ 91 * SFP/cable */
91 BFA_STATUS_NO_FCPIM_NEXUS = 52, /* No FCP Nexus exists with the 92 BFA_STATUS_NO_FCPIM_NEXUS = 52, /* No FCP Nexus exists with the
@@ -173,7 +174,7 @@ enum bfa_status {
173 BFA_STATUS_LEDTEST_OP = 109, /* LED test is operating */ 174 BFA_STATUS_LEDTEST_OP = 109, /* LED test is operating */
174 BFA_STATUS_CEE_NOT_DN = 110, /* eth port is not at down state, please 175 BFA_STATUS_CEE_NOT_DN = 110, /* eth port is not at down state, please
175 * bring down first */ 176 * bring down first */
176 BFA_STATUS_10G_SPD = 111, /* Speed setting not valid for 10G HBA */ 177 BFA_STATUS_10G_SPD = 111, /* Speed setting not valid for 10G CNA */
177 BFA_STATUS_IM_INV_TEAM_NAME = 112, /* Invalid team name */ 178 BFA_STATUS_IM_INV_TEAM_NAME = 112, /* Invalid team name */
178 BFA_STATUS_IM_DUP_TEAM_NAME = 113, /* Given team name already 179 BFA_STATUS_IM_DUP_TEAM_NAME = 113, /* Given team name already
179 * exists */ 180 * exists */
@@ -213,12 +214,13 @@ enum bfa_status {
213 * loaded */ 214 * loaded */
214 BFA_STATUS_CARD_TYPE_MISMATCH = 131, /* Card type mismatch */ 215 BFA_STATUS_CARD_TYPE_MISMATCH = 131, /* Card type mismatch */
215 BFA_STATUS_BAD_ASICBLK = 132, /* Bad ASIC block */ 216 BFA_STATUS_BAD_ASICBLK = 132, /* Bad ASIC block */
216 BFA_STATUS_NO_DRIVER = 133, /* Brocade adapter/driver not installed or loaded */ 217 BFA_STATUS_NO_DRIVER = 133, /* Brocade adapter/driver not installed
217 BFA_STATUS_INVALID_MAC = 134, /* Invalid mac address */ 218 * or loaded */
219 BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */
218 BFA_STATUS_IM_NO_VLAN = 135, /* No VLANs configured on the adapter */ 220 BFA_STATUS_IM_NO_VLAN = 135, /* No VLANs configured on the adapter */
219 BFA_STATUS_IM_ETH_LB_FAILED = 136, /* Ethernet loopback test failed */ 221 BFA_STATUS_IM_ETH_LB_FAILED = 136, /* Ethernet loopback test failed */
220 BFA_STATUS_IM_PVID_REMOVE = 137, /* Cannot remove port vlan (PVID) */ 222 BFA_STATUS_IM_PVID_REMOVE = 137, /* Cannot remove port VLAN (PVID) */
221 BFA_STATUS_IM_PVID_EDIT = 138, /* Cannot edit port vlan (PVID) */ 223 BFA_STATUS_IM_PVID_EDIT = 138, /* Cannot edit port VLAN (PVID) */
222 BFA_STATUS_CNA_NO_BOOT = 139, /* Boot upload not allowed for CNA */ 224 BFA_STATUS_CNA_NO_BOOT = 139, /* Boot upload not allowed for CNA */
223 BFA_STATUS_IM_PVID_NON_ZERO = 140, /* Port VLAN ID (PVID) is Set to 225 BFA_STATUS_IM_PVID_NON_ZERO = 140, /* Port VLAN ID (PVID) is Set to
224 * Non-Zero Value */ 226 * Non-Zero Value */
@@ -232,14 +234,15 @@ enum bfa_status {
232 BFA_STATUS_INSUFFICIENT_PERMS = 144, /* User doesn't have sufficient 234 BFA_STATUS_INSUFFICIENT_PERMS = 144, /* User doesn't have sufficient
233 * permissions to execute the BCU 235 * permissions to execute the BCU
234 * application */ 236 * application */
235 BFA_STATUS_IM_INV_VLAN_NAME = 145, /* Invalid/Reserved Vlan name 237 BFA_STATUS_IM_INV_VLAN_NAME = 145, /* Invalid/Reserved VLAN name
236 * string. The name is not allowed 238 * string. The name is not allowed
237 * for the normal Vlans */ 239 * for the normal VLAN */
238 BFA_STATUS_CMD_NOTSUPP_CNA = 146, /* Command not supported for CNA */ 240 BFA_STATUS_CMD_NOTSUPP_CNA = 146, /* Command not supported for CNA */
239 BFA_STATUS_IM_PASSTHRU_EDIT = 147, /* Can not edit passthru vlan id */ 241 BFA_STATUS_IM_PASSTHRU_EDIT = 147, /* Can not edit passthrough VLAN
240 BFA_STATUS_IM_BIND_FAILED = 148, /*! < IM Driver bind operation 242 * id */
243 BFA_STATUS_IM_BIND_FAILED = 148, /* IM Driver bind operation
241 * failed */ 244 * failed */
242 BFA_STATUS_IM_UNBIND_FAILED = 149, /* ! < IM Driver unbind operation 245 BFA_STATUS_IM_UNBIND_FAILED = 149, /* IM Driver unbind operation
243 * failed */ 246 * failed */
244 BFA_STATUS_IM_PORT_IN_TEAM = 150, /* Port is already part of the 247 BFA_STATUS_IM_PORT_IN_TEAM = 150, /* Port is already part of the
245 * team */ 248 * team */
@@ -249,7 +252,24 @@ enum bfa_status {
249 BFA_STATUS_IM_TEAM_CFG_NOT_ALLOWED = 153, /* Given settings are not 252 BFA_STATUS_IM_TEAM_CFG_NOT_ALLOWED = 153, /* Given settings are not
250 * allowed for the current 253 * allowed for the current
251 * Teaming mode */ 254 * Teaming mode */
252 BFA_STATUS_MAX_VAL /* Unknown error code */ 255 BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot
256 * configuration */
257 BFA_STATUS_DEVID_MISSING = 155, /* Boot image is not for the adapter(s)
258 * installed */
259 BFA_STATUS_BAD_FWCFG = 156, /* Bad firmware configuration */
260 BFA_STATUS_CREATE_FILE = 157, /* Failed to create temporary file */
261 BFA_STATUS_INVALID_VENDOR = 158, /* Invalid switch vendor */
262 BFA_STATUS_SFP_NOT_READY = 159, /* SFP info is not ready. Retry */
263 BFA_STATUS_NO_TOPOLOGY_FOR_CNA = 160, /* Topology command not
264 * applicable to CNA */
265 BFA_STATUS_BOOT_CODE_UPDATED = 161, /* reboot -- -r is needed after
266 * boot code updated */
267 BFA_STATUS_BOOT_VERSION = 162, /* Boot code version not compatible with
268 * the driver installed */
269 BFA_STATUS_CARDTYPE_MISSING = 163, /* Boot image is not for the
270 * adapter(s) installed */
271 BFA_STATUS_INVALID_CARDTYPE = 164, /* Invalid card type provided */
272 BFA_STATUS_MAX_VAL /* Unknown error code */
253}; 273};
254#define bfa_status_t enum bfa_status 274#define bfa_status_t enum bfa_status
255 275
diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb_vport.h b/drivers/scsi/bfa/include/fcb/bfa_fcb_vport.h
index a39f474c2fcf..cfd6ba7c47ec 100644
--- a/drivers/scsi/bfa/include/fcb/bfa_fcb_vport.h
+++ b/drivers/scsi/bfa/include/fcb/bfa_fcb_vport.h
@@ -40,7 +40,8 @@ struct bfad_vport_s;
40 * 40 *
41 * @return None 41 * @return None
42 */ 42 */
43void bfa_fcb_vport_delete(struct bfad_vport_s *vport_drv); 43void bfa_fcb_vport_delete(struct bfad_vport_s *vport_drv);
44void bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s);
44 45
45 46
46 47
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs.h b/drivers/scsi/bfa/include/fcs/bfa_fcs.h
index f2fd35fdee28..54e5b81ab2a3 100644
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs.h
+++ b/drivers/scsi/bfa/include/fcs/bfa_fcs.h
@@ -61,8 +61,8 @@ struct bfa_fcs_s {
61/* 61/*
62 * bfa fcs API functions 62 * bfa fcs API functions
63 */ 63 */
64void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, 64void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa,
65 bfa_boolean_t min_cfg); 65 struct bfad_s *bfad, bfa_boolean_t min_cfg);
66void bfa_fcs_init(struct bfa_fcs_s *fcs); 66void bfa_fcs_init(struct bfa_fcs_s *fcs);
67void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, 67void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
68 struct bfa_fcs_driver_info_s *driver_info); 68 struct bfa_fcs_driver_info_s *driver_info);
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_fcpim.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_fcpim.h
index e719f2c3eb35..9a35ecf5cdf0 100644
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs_fcpim.h
+++ b/drivers/scsi/bfa/include/fcs/bfa_fcs_fcpim.h
@@ -41,6 +41,7 @@ struct bfa_fcs_itnim_s {
41 struct bfa_fcs_s *fcs; /* fcs instance */ 41 struct bfa_fcs_s *fcs; /* fcs instance */
42 struct bfa_timer_s timer; /* timer functions */ 42 struct bfa_timer_s timer; /* timer functions */
43 struct bfa_itnim_s *bfa_itnim; /* BFA itnim struct */ 43 struct bfa_itnim_s *bfa_itnim; /* BFA itnim struct */
44 u32 prli_retries; /* max prli retry attempts */
44 bfa_boolean_t seq_rec; /* seq recovery support */ 45 bfa_boolean_t seq_rec; /* seq recovery support */
45 bfa_boolean_t rec_support; /* REC supported */ 46 bfa_boolean_t rec_support; /* REC supported */
46 bfa_boolean_t conf_comp; /* FCP_CONF support */ 47 bfa_boolean_t conf_comp; /* FCP_CONF support */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_rport.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_rport.h
index 702b95b76c2d..3027fc6c7722 100644
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs_rport.h
+++ b/drivers/scsi/bfa/include/fcs/bfa_fcs_rport.h
@@ -58,6 +58,7 @@ struct bfa_fcs_rport_s {
58 u16 reply_oxid; /* OX_ID of inbound requests */ 58 u16 reply_oxid; /* OX_ID of inbound requests */
59 enum fc_cos fc_cos; /* FC classes of service supp */ 59 enum fc_cos fc_cos; /* FC classes of service supp */
60 bfa_boolean_t cisc; /* CISC capable device */ 60 bfa_boolean_t cisc; /* CISC capable device */
61 bfa_boolean_t prlo; /* processing prlo or LOGO */
61 wwn_t pwwn; /* port wwn of rport */ 62 wwn_t pwwn; /* port wwn of rport */
62 wwn_t nwwn; /* node wwn of rport */ 63 wwn_t nwwn; /* node wwn of rport */
63 struct bfa_rport_symname_s psym_name; /* port symbolic name */ 64 struct bfa_rport_symname_s psym_name; /* port symbolic name */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_vport.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_vport.h
index cd33f2cd5c34..0af262430860 100644
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs_vport.h
+++ b/drivers/scsi/bfa/include/fcs/bfa_fcs_vport.h
@@ -49,6 +49,10 @@ bfa_status_t bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport,
49 struct bfa_fcs_s *fcs, u16 vf_id, 49 struct bfa_fcs_s *fcs, u16 vf_id,
50 struct bfa_port_cfg_s *port_cfg, 50 struct bfa_port_cfg_s *port_cfg,
51 struct bfad_vport_s *vport_drv); 51 struct bfad_vport_s *vport_drv);
52bfa_status_t bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport,
53 struct bfa_fcs_s *fcs, uint16_t vf_id,
54 struct bfa_port_cfg_s *port_cfg,
55 struct bfad_vport_s *vport_drv);
52bfa_status_t bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport); 56bfa_status_t bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport);
53bfa_status_t bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport); 57bfa_status_t bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport);
54bfa_status_t bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport); 58bfa_status_t bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport);
diff --git a/drivers/scsi/bfa/include/log/bfa_log_linux.h b/drivers/scsi/bfa/include/log/bfa_log_linux.h
index bd451db4c30a..44bc89768bda 100644
--- a/drivers/scsi/bfa/include/log/bfa_log_linux.h
+++ b/drivers/scsi/bfa/include/log/bfa_log_linux.h
@@ -53,8 +53,10 @@
53 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 16) 53 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 16)
54#define BFA_LOG_LINUX_DRIVER_ERROR \ 54#define BFA_LOG_LINUX_DRIVER_ERROR \
55 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 17) 55 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 17)
56#define BFA_LOG_LINUX_DRIVER_DIAG \ 56#define BFA_LOG_LINUX_DRIVER_INFO \
57 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 18) 57 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 18)
58#define BFA_LOG_LINUX_DRIVER_AEN \ 58#define BFA_LOG_LINUX_DRIVER_DIAG \
59 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 19) 59 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 19)
60#define BFA_LOG_LINUX_DRIVER_AEN \
61 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 20)
60#endif 62#endif
diff --git a/drivers/scsi/bfa/include/protocol/fc.h b/drivers/scsi/bfa/include/protocol/fc.h
index 8d1038035a76..436dd7c5643a 100644
--- a/drivers/scsi/bfa/include/protocol/fc.h
+++ b/drivers/scsi/bfa/include/protocol/fc.h
@@ -1080,6 +1080,7 @@ struct fc_alpabm_s{
1080#define FC_REC_TOV (FC_ED_TOV + 1) 1080#define FC_REC_TOV (FC_ED_TOV + 1)
1081#define FC_RA_TOV 10 1081#define FC_RA_TOV 10
1082#define FC_ELS_TOV (2 * FC_RA_TOV) 1082#define FC_ELS_TOV (2 * FC_RA_TOV)
1083#define FC_FCCT_TOV (3 * FC_RA_TOV)
1083 1084
1084/* 1085/*
1085 * virtual fabric related defines 1086 * virtual fabric related defines
diff --git a/drivers/scsi/bfa/lport_api.c b/drivers/scsi/bfa/lport_api.c
index d3907d184e2b..72b3f508d0e9 100644
--- a/drivers/scsi/bfa/lport_api.c
+++ b/drivers/scsi/bfa/lport_api.c
@@ -137,6 +137,8 @@ bfa_fcs_port_get_rports(struct bfa_fcs_port_s *port, wwn_t rport_wwns[],
137/* 137/*
138 * Iterate's through all the rport's in the given port to 138 * Iterate's through all the rport's in the given port to
139 * determine the maximum operating speed. 139 * determine the maximum operating speed.
140 *
141 * To be used in TRL Functionality only
140 */ 142 */
141enum bfa_pport_speed 143enum bfa_pport_speed
142bfa_fcs_port_get_rport_max_speed(struct bfa_fcs_port_s *port) 144bfa_fcs_port_get_rport_max_speed(struct bfa_fcs_port_s *port)
@@ -146,7 +148,8 @@ bfa_fcs_port_get_rport_max_speed(struct bfa_fcs_port_s *port)
146 struct bfa_fcs_s *fcs; 148 struct bfa_fcs_s *fcs;
147 enum bfa_pport_speed max_speed = 0; 149 enum bfa_pport_speed max_speed = 0;
148 struct bfa_pport_attr_s pport_attr; 150 struct bfa_pport_attr_s pport_attr;
149 enum bfa_pport_speed pport_speed; 151 enum bfa_pport_speed pport_speed, rport_speed;
152 bfa_boolean_t trl_enabled = bfa_fcport_is_ratelim(port->fcs->bfa);
150 153
151 if (port == NULL) 154 if (port == NULL)
152 return 0; 155 return 0;
@@ -164,19 +167,28 @@ bfa_fcs_port_get_rport_max_speed(struct bfa_fcs_port_s *port)
164 qe = bfa_q_first(qh); 167 qe = bfa_q_first(qh);
165 168
166 while (qe != qh) { 169 while (qe != qh) {
167 rport = (struct bfa_fcs_rport_s *)qe; 170 rport = (struct bfa_fcs_rport_s *) qe;
168 if ((bfa_os_ntoh3b(rport->pid) > 0xFFF000) 171 if ((bfa_os_ntoh3b(rport->pid) > 0xFFF000) ||
169 || (bfa_fcs_rport_get_state(rport) == BFA_RPORT_OFFLINE)) { 172 (bfa_fcs_rport_get_state(rport) ==
173 BFA_RPORT_OFFLINE)) {
170 qe = bfa_q_next(qe); 174 qe = bfa_q_next(qe);
171 continue; 175 continue;
172 } 176 }
173 177
174 if ((rport->rpf.rpsc_speed == BFA_PPORT_SPEED_8GBPS) 178 rport_speed = rport->rpf.rpsc_speed;
175 || (rport->rpf.rpsc_speed > pport_speed)) { 179 if ((trl_enabled) && (rport_speed ==
176 max_speed = rport->rpf.rpsc_speed; 180 BFA_PPORT_SPEED_UNKNOWN)) {
181 /* Use default ratelim speed setting */
182 rport_speed =
183 bfa_fcport_get_ratelim_speed(port->fcs->bfa);
184 }
185
186 if ((rport_speed == BFA_PPORT_SPEED_8GBPS) ||
187 (rport_speed > pport_speed)) {
188 max_speed = rport_speed;
177 break; 189 break;
178 } else if (rport->rpf.rpsc_speed > max_speed) { 190 } else if (rport_speed > max_speed) {
179 max_speed = rport->rpf.rpsc_speed; 191 max_speed = rport_speed;
180 } 192 }
181 193
182 qe = bfa_q_next(qe); 194 qe = bfa_q_next(qe);
diff --git a/drivers/scsi/bfa/ms.c b/drivers/scsi/bfa/ms.c
index 5e8c8dee6c97..1d579ef26122 100644
--- a/drivers/scsi/bfa/ms.c
+++ b/drivers/scsi/bfa/ms.c
@@ -157,6 +157,7 @@ bfa_fcs_port_ms_sm_plogi(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event)
157 * Start timer for a delayed retry 157 * Start timer for a delayed retry
158 */ 158 */
159 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_plogi_retry); 159 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_plogi_retry);
160 ms->port->stats.ms_retries++;
160 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port), &ms->timer, 161 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port), &ms->timer,
161 bfa_fcs_port_ms_timeout, ms, 162 bfa_fcs_port_ms_timeout, ms,
162 BFA_FCS_RETRY_TIMEOUT); 163 BFA_FCS_RETRY_TIMEOUT);
@@ -279,6 +280,7 @@ bfa_fcs_port_ms_sm_gmal(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event)
279 */ 280 */
280 if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) { 281 if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) {
281 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gmal_retry); 282 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gmal_retry);
283 ms->port->stats.ms_retries++;
282 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port), 284 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
283 &ms->timer, bfa_fcs_port_ms_timeout, ms, 285 &ms->timer, bfa_fcs_port_ms_timeout, ms,
284 BFA_FCS_RETRY_TIMEOUT); 286 BFA_FCS_RETRY_TIMEOUT);
@@ -359,7 +361,7 @@ bfa_fcs_port_ms_send_gmal(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
359 361
360 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 362 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
361 FC_CLASS_3, len, &fchs, bfa_fcs_port_ms_gmal_response, 363 FC_CLASS_3, len, &fchs, bfa_fcs_port_ms_gmal_response,
362 (void *)ms, FC_MAX_PDUSZ, FC_RA_TOV); 364 (void *)ms, FC_MAX_PDUSZ, FC_FCCT_TOV);
363 365
364 bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT); 366 bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT);
365} 367}
@@ -479,6 +481,7 @@ bfa_fcs_port_ms_sm_gfn(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event)
479 */ 481 */
480 if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) { 482 if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) {
481 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_retry); 483 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_retry);
484 ms->port->stats.ms_retries++;
482 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port), 485 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
483 &ms->timer, bfa_fcs_port_ms_timeout, ms, 486 &ms->timer, bfa_fcs_port_ms_timeout, ms,
484 BFA_FCS_RETRY_TIMEOUT); 487 BFA_FCS_RETRY_TIMEOUT);
@@ -557,7 +560,7 @@ bfa_fcs_port_ms_send_gfn(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
557 560
558 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 561 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
559 FC_CLASS_3, len, &fchs, bfa_fcs_port_ms_gfn_response, 562 FC_CLASS_3, len, &fchs, bfa_fcs_port_ms_gfn_response,
560 (void *)ms, FC_MAX_PDUSZ, FC_RA_TOV); 563 (void *)ms, FC_MAX_PDUSZ, FC_FCCT_TOV);
561 564
562 bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT); 565 bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT);
563} 566}
@@ -637,7 +640,7 @@ bfa_fcs_port_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
637 640
638 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 641 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
639 FC_CLASS_3, len, &fchs, bfa_fcs_port_ms_plogi_response, 642 FC_CLASS_3, len, &fchs, bfa_fcs_port_ms_plogi_response,
640 (void *)ms, FC_MAX_PDUSZ, FC_RA_TOV); 643 (void *)ms, FC_MAX_PDUSZ, FC_ELS_TOV);
641 644
642 port->stats.ms_plogi_sent++; 645 port->stats.ms_plogi_sent++;
643 bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT); 646 bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT);
diff --git a/drivers/scsi/bfa/ns.c b/drivers/scsi/bfa/ns.c
index d20dd7e15742..ae0edcc86ed5 100644
--- a/drivers/scsi/bfa/ns.c
+++ b/drivers/scsi/bfa/ns.c
@@ -664,7 +664,7 @@ bfa_fcs_port_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
664 664
665 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 665 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
666 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_plogi_response, 666 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_plogi_response,
667 (void *)ns, FC_MAX_PDUSZ, FC_RA_TOV); 667 (void *)ns, FC_MAX_PDUSZ, FC_ELS_TOV);
668 port->stats.ns_plogi_sent++; 668 port->stats.ns_plogi_sent++;
669 669
670 bfa_sm_send_event(ns, NSSM_EVENT_PLOGI_SENT); 670 bfa_sm_send_event(ns, NSSM_EVENT_PLOGI_SENT);
@@ -791,7 +791,7 @@ bfa_fcs_port_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
791 791
792 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 792 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
793 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_rspn_id_response, 793 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_rspn_id_response,
794 (void *)ns, FC_MAX_PDUSZ, FC_RA_TOV); 794 (void *)ns, FC_MAX_PDUSZ, FC_FCCT_TOV);
795 795
796 port->stats.ns_rspnid_sent++; 796 port->stats.ns_rspnid_sent++;
797 797
@@ -865,7 +865,7 @@ bfa_fcs_port_ns_send_rft_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
865 865
866 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 866 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
867 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_rft_id_response, 867 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_rft_id_response,
868 (void *)ns, FC_MAX_PDUSZ, FC_RA_TOV); 868 (void *)ns, FC_MAX_PDUSZ, FC_FCCT_TOV);
869 869
870 port->stats.ns_rftid_sent++; 870 port->stats.ns_rftid_sent++;
871 bfa_sm_send_event(ns, NSSM_EVENT_RFTID_SENT); 871 bfa_sm_send_event(ns, NSSM_EVENT_RFTID_SENT);
@@ -943,7 +943,7 @@ bfa_fcs_port_ns_send_rff_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
943 943
944 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 944 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
945 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_rff_id_response, 945 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_rff_id_response,
946 (void *)ns, FC_MAX_PDUSZ, FC_RA_TOV); 946 (void *)ns, FC_MAX_PDUSZ, FC_FCCT_TOV);
947 947
948 port->stats.ns_rffid_sent++; 948 port->stats.ns_rffid_sent++;
949 bfa_sm_send_event(ns, NSSM_EVENT_RFFID_SENT); 949 bfa_sm_send_event(ns, NSSM_EVENT_RFFID_SENT);
@@ -1029,7 +1029,7 @@ bfa_fcs_port_ns_send_gid_ft(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1029 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1029 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1030 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_gid_ft_response, 1030 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_gid_ft_response,
1031 (void *)ns, bfa_fcxp_get_maxrsp(port->fcs->bfa), 1031 (void *)ns, bfa_fcxp_get_maxrsp(port->fcs->bfa),
1032 FC_RA_TOV); 1032 FC_FCCT_TOV);
1033 1033
1034 port->stats.ns_gidft_sent++; 1034 port->stats.ns_gidft_sent++;
1035 1035
@@ -1228,10 +1228,10 @@ bfa_fcs_port_ns_boot_target_disc(struct bfa_fcs_port_s *port)
1228 1228
1229 struct bfa_fcs_rport_s *rport; 1229 struct bfa_fcs_rport_s *rport;
1230 u8 nwwns; 1230 u8 nwwns;
1231 wwn_t *wwns; 1231 wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX];
1232 int ii; 1232 int ii;
1233 1233
1234 bfa_iocfc_get_bootwwns(port->fcs->bfa, &nwwns, &wwns); 1234 bfa_iocfc_get_bootwwns(port->fcs->bfa, &nwwns, wwns);
1235 1235
1236 for (ii = 0; ii < nwwns; ++ii) { 1236 for (ii = 0; ii < nwwns; ++ii) {
1237 rport = bfa_fcs_rport_create_by_wwn(port, wwns[ii]); 1237 rport = bfa_fcs_rport_create_by_wwn(port, wwns[ii]);
diff --git a/drivers/scsi/bfa/rport.c b/drivers/scsi/bfa/rport.c
index 7b096f2e3836..9b4c2c9a644b 100644
--- a/drivers/scsi/bfa/rport.c
+++ b/drivers/scsi/bfa/rport.c
@@ -36,8 +36,6 @@
36 36
37BFA_TRC_FILE(FCS, RPORT); 37BFA_TRC_FILE(FCS, RPORT);
38 38
39#define BFA_FCS_RPORT_MAX_RETRIES (5)
40
41/* In millisecs */ 39/* In millisecs */
42static u32 bfa_fcs_rport_del_timeout = 40static u32 bfa_fcs_rport_del_timeout =
43 BFA_FCS_RPORT_DEF_DEL_TIMEOUT * 1000; 41 BFA_FCS_RPORT_DEF_DEL_TIMEOUT * 1000;
@@ -95,6 +93,7 @@ static void bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport,
95 u8 reason_code_expl); 93 u8 reason_code_expl);
96static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport, 94static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
97 struct fchs_s *rx_fchs, u16 len); 95 struct fchs_s *rx_fchs, u16 len);
96static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport);
98/** 97/**
99 * fcs_rport_sm FCS rport state machine events 98 * fcs_rport_sm FCS rport state machine events
100 */ 99 */
@@ -115,7 +114,8 @@ enum rport_event {
115 RPSM_EVENT_HCB_OFFLINE = 13, /* BFA rport offline callback */ 114 RPSM_EVENT_HCB_OFFLINE = 13, /* BFA rport offline callback */
116 RPSM_EVENT_FC4_OFFLINE = 14, /* FC-4 offline complete */ 115 RPSM_EVENT_FC4_OFFLINE = 14, /* FC-4 offline complete */
117 RPSM_EVENT_ADDRESS_CHANGE = 15, /* Rport's PID has changed */ 116 RPSM_EVENT_ADDRESS_CHANGE = 15, /* Rport's PID has changed */
118 RPSM_EVENT_ADDRESS_DISC = 16 /* Need to Discover rport's PID */ 117 RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */
118 RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */
119}; 119};
120 120
121static void bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, 121static void bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport,
@@ -356,8 +356,8 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
356 */ 356 */
357 357
358 case RPSM_EVENT_TIMEOUT: 358 case RPSM_EVENT_TIMEOUT:
359 rport->plogi_retries++;
360 if (rport->plogi_retries < BFA_FCS_RPORT_MAX_RETRIES) { 359 if (rport->plogi_retries < BFA_FCS_RPORT_MAX_RETRIES) {
360 rport->plogi_retries++;
361 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); 361 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
362 bfa_fcs_rport_send_plogi(rport, NULL); 362 bfa_fcs_rport_send_plogi(rport, NULL);
363 } else { 363 } else {
@@ -375,6 +375,7 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
375 bfa_fcs_rport_free(rport); 375 bfa_fcs_rport_free(rport);
376 break; 376 break;
377 377
378 case RPSM_EVENT_PRLO_RCVD:
378 case RPSM_EVENT_LOGO_RCVD: 379 case RPSM_EVENT_LOGO_RCVD:
379 break; 380 break;
380 381
@@ -430,6 +431,13 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
430 431
431 case RPSM_EVENT_LOGO_RCVD: 432 case RPSM_EVENT_LOGO_RCVD:
432 bfa_fcs_rport_send_logo_acc(rport); 433 bfa_fcs_rport_send_logo_acc(rport);
434 /*
435 * !! fall through !!
436 */
437 case RPSM_EVENT_PRLO_RCVD:
438 if (rport->prlo == BFA_TRUE)
439 bfa_fcs_rport_send_prlo_acc(rport);
440
433 bfa_fcxp_discard(rport->fcxp); 441 bfa_fcxp_discard(rport->fcxp);
434 /* 442 /*
435 * !! fall through !! 443 * !! fall through !!
@@ -504,6 +512,9 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
504 bfa_fcs_rport_online_action(rport); 512 bfa_fcs_rport_online_action(rport);
505 break; 513 break;
506 514
515 case RPSM_EVENT_PRLO_RCVD:
516 break;
517
507 case RPSM_EVENT_LOGO_RCVD: 518 case RPSM_EVENT_LOGO_RCVD:
508 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv); 519 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv);
509 bfa_rport_offline(rport->bfa_rport); 520 bfa_rport_offline(rport->bfa_rport);
@@ -582,6 +593,7 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
582 break; 593 break;
583 594
584 case RPSM_EVENT_LOGO_RCVD: 595 case RPSM_EVENT_LOGO_RCVD:
596 case RPSM_EVENT_PRLO_RCVD:
585 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); 597 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
586 bfa_fcs_rport_offline_action(rport); 598 bfa_fcs_rport_offline_action(rport);
587 break; 599 break;
@@ -624,6 +636,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
624 break; 636 break;
625 637
626 case RPSM_EVENT_LOGO_RCVD: 638 case RPSM_EVENT_LOGO_RCVD:
639 case RPSM_EVENT_PRLO_RCVD:
627 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); 640 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
628 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); 641 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
629 bfa_fcs_rport_offline_action(rport); 642 bfa_fcs_rport_offline_action(rport);
@@ -690,6 +703,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
690 break; 703 break;
691 704
692 case RPSM_EVENT_LOGO_RCVD: 705 case RPSM_EVENT_LOGO_RCVD:
706 case RPSM_EVENT_PRLO_RCVD:
693 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); 707 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
694 bfa_fcxp_discard(rport->fcxp); 708 bfa_fcxp_discard(rport->fcxp);
695 bfa_fcs_rport_offline_action(rport); 709 bfa_fcs_rport_offline_action(rport);
@@ -740,6 +754,7 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
740 break; 754 break;
741 755
742 case RPSM_EVENT_LOGO_RCVD: 756 case RPSM_EVENT_LOGO_RCVD:
757 case RPSM_EVENT_PRLO_RCVD:
743 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); 758 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
744 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); 759 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
745 bfa_fcs_rport_offline_action(rport); 760 bfa_fcs_rport_offline_action(rport);
@@ -811,6 +826,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
811 break; 826 break;
812 827
813 case RPSM_EVENT_LOGO_RCVD: 828 case RPSM_EVENT_LOGO_RCVD:
829 case RPSM_EVENT_PRLO_RCVD:
814 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); 830 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
815 bfa_fcxp_discard(rport->fcxp); 831 bfa_fcxp_discard(rport->fcxp);
816 bfa_fcs_rport_offline_action(rport); 832 bfa_fcs_rport_offline_action(rport);
@@ -843,6 +859,7 @@ bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
843 break; 859 break;
844 860
845 case RPSM_EVENT_LOGO_RCVD: 861 case RPSM_EVENT_LOGO_RCVD:
862 case RPSM_EVENT_PRLO_RCVD:
846 case RPSM_EVENT_ADDRESS_CHANGE: 863 case RPSM_EVENT_ADDRESS_CHANGE:
847 break; 864 break;
848 865
@@ -894,6 +911,7 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
894 case RPSM_EVENT_SCN: 911 case RPSM_EVENT_SCN:
895 case RPSM_EVENT_LOGO_IMP: 912 case RPSM_EVENT_LOGO_IMP:
896 case RPSM_EVENT_LOGO_RCVD: 913 case RPSM_EVENT_LOGO_RCVD:
914 case RPSM_EVENT_PRLO_RCVD:
897 case RPSM_EVENT_ADDRESS_CHANGE: 915 case RPSM_EVENT_ADDRESS_CHANGE:
898 /** 916 /**
899 * rport is already going offline. 917 * rport is already going offline.
@@ -953,6 +971,7 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
953 971
954 case RPSM_EVENT_SCN: 972 case RPSM_EVENT_SCN:
955 case RPSM_EVENT_LOGO_RCVD: 973 case RPSM_EVENT_LOGO_RCVD:
974 case RPSM_EVENT_PRLO_RCVD:
956 /** 975 /**
957 * Ignore, already offline. 976 * Ignore, already offline.
958 */ 977 */
@@ -978,8 +997,11 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
978 switch (event) { 997 switch (event) {
979 case RPSM_EVENT_HCB_OFFLINE: 998 case RPSM_EVENT_HCB_OFFLINE:
980 case RPSM_EVENT_ADDRESS_CHANGE: 999 case RPSM_EVENT_ADDRESS_CHANGE:
981 if (rport->pid) 1000 if (rport->pid && (rport->prlo == BFA_TRUE))
1001 bfa_fcs_rport_send_prlo_acc(rport);
1002 if (rport->pid && (rport->prlo == BFA_FALSE))
982 bfa_fcs_rport_send_logo_acc(rport); 1003 bfa_fcs_rport_send_logo_acc(rport);
1004
983 /* 1005 /*
984 * If the lport is online and if the rport is not a well known 1006 * If the lport is online and if the rport is not a well known
985 * address port, we try to re-discover the r-port. 1007 * address port, we try to re-discover the r-port.
@@ -1013,6 +1035,7 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
1013 break; 1035 break;
1014 1036
1015 case RPSM_EVENT_LOGO_RCVD: 1037 case RPSM_EVENT_LOGO_RCVD:
1038 case RPSM_EVENT_PRLO_RCVD:
1016 /** 1039 /**
1017 * Ignore - already processing a LOGO. 1040 * Ignore - already processing a LOGO.
1018 */ 1041 */
@@ -1042,6 +1065,7 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
1042 break; 1065 break;
1043 1066
1044 case RPSM_EVENT_LOGO_RCVD: 1067 case RPSM_EVENT_LOGO_RCVD:
1068 case RPSM_EVENT_PRLO_RCVD:
1045 case RPSM_EVENT_ADDRESS_CHANGE: 1069 case RPSM_EVENT_ADDRESS_CHANGE:
1046 break; 1070 break;
1047 1071
@@ -1075,6 +1099,7 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
1075 break; 1099 break;
1076 1100
1077 case RPSM_EVENT_LOGO_RCVD: 1101 case RPSM_EVENT_LOGO_RCVD:
1102 case RPSM_EVENT_PRLO_RCVD:
1078 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); 1103 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
1079 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); 1104 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
1080 bfa_fcs_rport_free(rport); 1105 bfa_fcs_rport_free(rport);
@@ -1123,6 +1148,7 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
1123 break; 1148 break;
1124 1149
1125 case RPSM_EVENT_LOGO_RCVD: 1150 case RPSM_EVENT_LOGO_RCVD:
1151 case RPSM_EVENT_PRLO_RCVD:
1126 case RPSM_EVENT_LOGO_IMP: 1152 case RPSM_EVENT_LOGO_IMP:
1127 break; 1153 break;
1128 1154
@@ -1174,6 +1200,7 @@ bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
1174 1200
1175 case RPSM_EVENT_SCN: 1201 case RPSM_EVENT_SCN:
1176 case RPSM_EVENT_LOGO_RCVD: 1202 case RPSM_EVENT_LOGO_RCVD:
1203 case RPSM_EVENT_PRLO_RCVD:
1177 case RPSM_EVENT_PLOGI_SEND: 1204 case RPSM_EVENT_PLOGI_SEND:
1178 break; 1205 break;
1179 1206
@@ -1250,6 +1277,10 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
1250 bfa_fcs_rport_send_logo_acc(rport); 1277 bfa_fcs_rport_send_logo_acc(rport);
1251 break; 1278 break;
1252 1279
1280 case RPSM_EVENT_PRLO_RCVD:
1281 bfa_fcs_rport_send_prlo_acc(rport);
1282 break;
1283
1253 case RPSM_EVENT_PLOGI_COMP: 1284 case RPSM_EVENT_PLOGI_COMP:
1254 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); 1285 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
1255 bfa_timer_stop(&rport->timer); 1286 bfa_timer_stop(&rport->timer);
@@ -1322,6 +1353,10 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
1322 bfa_fcs_rport_del_timeout); 1353 bfa_fcs_rport_del_timeout);
1323 break; 1354 break;
1324 1355
1356 case RPSM_EVENT_PRLO_RCVD:
1357 bfa_fcs_rport_send_prlo_acc(rport);
1358 break;
1359
1325 case RPSM_EVENT_SCN: 1360 case RPSM_EVENT_SCN:
1326 /** 1361 /**
1327 * ignore, wait for NS query response 1362 * ignore, wait for NS query response
@@ -1378,7 +1413,7 @@ bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1378 1413
1379 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1414 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1380 FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response, 1415 FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response,
1381 (void *)rport, FC_MAX_PDUSZ, FC_RA_TOV); 1416 (void *)rport, FC_MAX_PDUSZ, FC_ELS_TOV);
1382 1417
1383 rport->stats.plogis++; 1418 rport->stats.plogis++;
1384 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); 1419 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
@@ -1519,7 +1554,7 @@ bfa_fcs_rport_send_adisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1519 1554
1520 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1555 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1521 FC_CLASS_3, len, &fchs, bfa_fcs_rport_adisc_response, 1556 FC_CLASS_3, len, &fchs, bfa_fcs_rport_adisc_response,
1522 rport, FC_MAX_PDUSZ, FC_RA_TOV); 1557 rport, FC_MAX_PDUSZ, FC_ELS_TOV);
1523 1558
1524 rport->stats.adisc_sent++; 1559 rport->stats.adisc_sent++;
1525 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); 1560 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
@@ -1580,7 +1615,7 @@ bfa_fcs_rport_send_gidpn(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1580 1615
1581 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1616 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1582 FC_CLASS_3, len, &fchs, bfa_fcs_rport_gidpn_response, 1617 FC_CLASS_3, len, &fchs, bfa_fcs_rport_gidpn_response,
1583 (void *)rport, FC_MAX_PDUSZ, FC_RA_TOV); 1618 (void *)rport, FC_MAX_PDUSZ, FC_FCCT_TOV);
1584 1619
1585 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); 1620 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
1586} 1621}
@@ -1692,7 +1727,7 @@ bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1692 1727
1693 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1728 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1694 FC_CLASS_3, len, &fchs, NULL, rport, FC_MAX_PDUSZ, 1729 FC_CLASS_3, len, &fchs, NULL, rport, FC_MAX_PDUSZ,
1695 FC_ED_TOV); 1730 FC_ELS_TOV);
1696 1731
1697 rport->stats.logos++; 1732 rport->stats.logos++;
1698 bfa_fcxp_discard(rport->fcxp); 1733 bfa_fcxp_discard(rport->fcxp);
@@ -2184,6 +2219,7 @@ bfa_fcs_rport_process_logo(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs)
2184 rport->reply_oxid = fchs->ox_id; 2219 rport->reply_oxid = fchs->ox_id;
2185 bfa_trc(rport->fcs, rport->reply_oxid); 2220 bfa_trc(rport->fcs, rport->reply_oxid);
2186 2221
2222 rport->prlo = BFA_FALSE;
2187 rport->stats.logo_rcvd++; 2223 rport->stats.logo_rcvd++;
2188 bfa_sm_send_event(rport, RPSM_EVENT_LOGO_RCVD); 2224 bfa_sm_send_event(rport, RPSM_EVENT_LOGO_RCVD);
2189} 2225}
@@ -2553,6 +2589,30 @@ bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
2553 } 2589 }
2554} 2590}
2555 2591
2592/* Send best case acc to prlo */
2593static void
2594bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport)
2595{
2596 struct bfa_fcs_port_s *port = rport->port;
2597 struct fchs_s fchs;
2598 struct bfa_fcxp_s *fcxp;
2599 int len;
2600
2601 bfa_trc(rport->fcs, rport->pid);
2602
2603 fcxp = bfa_fcs_fcxp_alloc(port->fcs);
2604 if (!fcxp)
2605 return;
2606
2607 len = fc_prlo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
2608 rport->pid, bfa_fcs_port_get_fcid(port),
2609 rport->reply_oxid, 0);
2610
2611 bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id,
2612 port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs,
2613 NULL, NULL, FC_MAX_PDUSZ, 0);
2614}
2615
2556/* 2616/*
2557 * Send a LS reject 2617 * Send a LS reject
2558 */ 2618 */
@@ -2604,3 +2664,13 @@ bfa_fcs_rport_set_del_timeout(u8 rport_tmo)
2604 if (rport_tmo > 0) 2664 if (rport_tmo > 0)
2605 bfa_fcs_rport_del_timeout = rport_tmo * 1000; 2665 bfa_fcs_rport_del_timeout = rport_tmo * 1000;
2606} 2666}
2667
2668void
2669bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, uint16_t ox_id)
2670{
2671 bfa_trc(rport->fcs, rport->pid);
2672
2673 rport->prlo = BFA_TRUE;
2674 rport->reply_oxid = ox_id;
2675 bfa_sm_send_event(rport, RPSM_EVENT_PRLO_RCVD);
2676}
diff --git a/drivers/scsi/bfa/rport_api.c b/drivers/scsi/bfa/rport_api.c
index a441f41d2a64..15e0c470afd9 100644
--- a/drivers/scsi/bfa/rport_api.c
+++ b/drivers/scsi/bfa/rport_api.c
@@ -83,6 +83,7 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
83{ 83{
84 struct bfa_rport_qos_attr_s qos_attr; 84 struct bfa_rport_qos_attr_s qos_attr;
85 struct bfa_fcs_port_s *port = rport->port; 85 struct bfa_fcs_port_s *port = rport->port;
86 enum bfa_pport_speed rport_speed = rport->rpf.rpsc_speed;
86 87
87 bfa_os_memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s)); 88 bfa_os_memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
88 89
@@ -102,10 +103,14 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
102 rport_attr->qos_attr = qos_attr; 103 rport_attr->qos_attr = qos_attr;
103 104
104 rport_attr->trl_enforced = BFA_FALSE; 105 rport_attr->trl_enforced = BFA_FALSE;
106
105 if (bfa_fcport_is_ratelim(port->fcs->bfa)) { 107 if (bfa_fcport_is_ratelim(port->fcs->bfa)) {
106 if ((rport->rpf.rpsc_speed == BFA_PPORT_SPEED_UNKNOWN) || 108 if (rport_speed == BFA_PPORT_SPEED_UNKNOWN) {
107 (rport->rpf.rpsc_speed < 109 /* Use default ratelim speed setting */
108 bfa_fcs_port_get_rport_max_speed(port))) 110 rport_speed =
111 bfa_fcport_get_ratelim_speed(rport->fcs->bfa);
112 }
113 if (rport_speed < bfa_fcs_port_get_rport_max_speed(port))
109 rport_attr->trl_enforced = BFA_TRUE; 114 rport_attr->trl_enforced = BFA_TRUE;
110 } 115 }
111 116
diff --git a/drivers/scsi/bfa/rport_ftrs.c b/drivers/scsi/bfa/rport_ftrs.c
index ae7bba67ae2a..f2a9361ce9a4 100644
--- a/drivers/scsi/bfa/rport_ftrs.c
+++ b/drivers/scsi/bfa/rport_ftrs.c
@@ -73,6 +73,7 @@ static void
73bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) 73bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
74{ 74{
75 struct bfa_fcs_rport_s *rport = rpf->rport; 75 struct bfa_fcs_rport_s *rport = rpf->rport;
76 struct bfa_fcs_fabric_s *fabric = &rport->fcs->fabric;
76 77
77 bfa_trc(rport->fcs, rport->pwwn); 78 bfa_trc(rport->fcs, rport->pwwn);
78 bfa_trc(rport->fcs, rport->pid); 79 bfa_trc(rport->fcs, rport->pid);
@@ -80,12 +81,16 @@ bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
80 81
81 switch (event) { 82 switch (event) {
82 case RPFSM_EVENT_RPORT_ONLINE: 83 case RPFSM_EVENT_RPORT_ONLINE:
83 if (!BFA_FCS_PID_IS_WKA(rport->pid)) { 84 /* Send RPSC2 to a Brocade fabric only. */
85 if ((!BFA_FCS_PID_IS_WKA(rport->pid)) &&
86 ((bfa_lps_is_brcd_fabric(rport->port->fabric->lps)) ||
87 (bfa_fcs_fabric_get_switch_oui(fabric) ==
88 BFA_FCS_BRCD_SWITCH_OUI))) {
84 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); 89 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
85 rpf->rpsc_retries = 0; 90 rpf->rpsc_retries = 0;
86 bfa_fcs_rpf_send_rpsc2(rpf, NULL); 91 bfa_fcs_rpf_send_rpsc2(rpf, NULL);
87 break; 92 }
88 }; 93 break;
89 94
90 case RPFSM_EVENT_RPORT_OFFLINE: 95 case RPFSM_EVENT_RPORT_OFFLINE:
91 break; 96 break;
@@ -269,6 +274,7 @@ void bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport)
269 if (__fcs_min_cfg(rport->port->fcs)) 274 if (__fcs_min_cfg(rport->port->fcs))
270 return; 275 return;
271 276
277 rport->rpf.rpsc_speed = 0;
272 bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_OFFLINE); 278 bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_OFFLINE);
273} 279}
274 280
@@ -307,7 +313,7 @@ bfa_fcs_rpf_send_rpsc2(void *rpf_cbarg, struct bfa_fcxp_s *fcxp_alloced)
307 313
308 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 314 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
309 FC_CLASS_3, len, &fchs, bfa_fcs_rpf_rpsc2_response, 315 FC_CLASS_3, len, &fchs, bfa_fcs_rpf_rpsc2_response,
310 rpf, FC_MAX_PDUSZ, FC_RA_TOV); 316 rpf, FC_MAX_PDUSZ, FC_ELS_TOV);
311 rport->stats.rpsc_sent++; 317 rport->stats.rpsc_sent++;
312 bfa_sm_send_event(rpf, RPFSM_EVENT_FCXP_SENT); 318 bfa_sm_send_event(rpf, RPFSM_EVENT_FCXP_SENT);
313 319
diff --git a/drivers/scsi/bfa/scn.c b/drivers/scsi/bfa/scn.c
index 8fe09ba88a91..8a60129e6307 100644
--- a/drivers/scsi/bfa/scn.c
+++ b/drivers/scsi/bfa/scn.c
@@ -218,7 +218,7 @@ bfa_fcs_port_scn_send_scr(void *scn_cbarg, struct bfa_fcxp_s *fcxp_alloced)
218 218
219 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 219 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
220 FC_CLASS_3, len, &fchs, bfa_fcs_port_scn_scr_response, 220 FC_CLASS_3, len, &fchs, bfa_fcs_port_scn_scr_response,
221 (void *)scn, FC_MAX_PDUSZ, FC_RA_TOV); 221 (void *)scn, FC_MAX_PDUSZ, FC_ELS_TOV);
222 222
223 bfa_sm_send_event(scn, SCNSM_EVENT_SCR_SENT); 223 bfa_sm_send_event(scn, SCNSM_EVENT_SCR_SENT);
224} 224}
diff --git a/drivers/scsi/bfa/vport.c b/drivers/scsi/bfa/vport.c
index 27cd619a227a..66f30a0d61e0 100644
--- a/drivers/scsi/bfa/vport.c
+++ b/drivers/scsi/bfa/vport.c
@@ -218,9 +218,9 @@ bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
218 218
219 switch (event) { 219 switch (event) {
220 case BFA_FCS_VPORT_SM_DELETE: 220 case BFA_FCS_VPORT_SM_DELETE:
221 bfa_sm_set_state(vport, bfa_fcs_vport_sm_logo); 221 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
222 bfa_lps_discard(vport->lps); 222 bfa_lps_discard(vport->lps);
223 bfa_fcs_vport_do_logo(vport); 223 bfa_fcs_port_delete(&vport->lport);
224 break; 224 break;
225 225
226 case BFA_FCS_VPORT_SM_OFFLINE: 226 case BFA_FCS_VPORT_SM_OFFLINE:
@@ -357,8 +357,9 @@ bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport,
357 357
358 switch (event) { 358 switch (event) {
359 case BFA_FCS_VPORT_SM_DELETE: 359 case BFA_FCS_VPORT_SM_DELETE:
360 bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit); 360 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
361 bfa_fcs_vport_free(vport); 361 bfa_fcs_port_delete(&vport->lport);
362
362 break; 363 break;
363 364
364 default: 365 default:
@@ -594,6 +595,15 @@ bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport)
594} 595}
595 596
596/** 597/**
598 * delete notification from fabric SM. To be invoked from within FCS.
599 */
600void
601bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport)
602{
603 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE);
604}
605
606/**
597 * Delete completion callback from associated lport 607 * Delete completion callback from associated lport
598 */ 608 */
599void 609void
@@ -646,6 +656,7 @@ bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
646 return BFA_STATUS_VPORT_MAX; 656 return BFA_STATUS_VPORT_MAX;
647 657
648 vport->vport_drv = vport_drv; 658 vport->vport_drv = vport_drv;
659 vport_cfg->preboot_vp = BFA_FALSE;
649 bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit); 660 bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
650 661
651 bfa_fcs_lport_attach(&vport->lport, fcs, vf_id, vport); 662 bfa_fcs_lport_attach(&vport->lport, fcs, vf_id, vport);
@@ -657,6 +668,36 @@ bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
657} 668}
658 669
659/** 670/**
671 * Use this function to instantiate a new FCS PBC vport object. This
672 * function will not trigger any HW initialization process (which will be
673 * done in vport_start() call)
674 *
675 * param[in] vport - pointer to bfa_fcs_vport_t. This space
676 * needs to be allocated by the driver.
677 * param[in] fcs - FCS instance
678 * param[in] vport_cfg - vport configuration
679 * param[in] vf_id - VF_ID if vport is created within a VF.
680 * FC_VF_ID_NULL to specify base fabric.
681 * param[in] vport_drv - Opaque handle back to the driver's vport
682 * structure
683 *
684 * retval BFA_STATUS_OK - on success.
685 * retval BFA_STATUS_FAILED - on failure.
686 */
687bfa_status_t
688bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
689 uint16_t vf_id, struct bfa_port_cfg_s *vport_cfg,
690 struct bfad_vport_s *vport_drv)
691{
692 bfa_status_t rc;
693
694 rc = bfa_fcs_vport_create(vport, fcs, vf_id, vport_cfg, vport_drv);
695 vport->lport.port_cfg.preboot_vp = BFA_TRUE;
696
697 return rc;
698}
699
700/**
660 * Use this function initialize the vport. 701 * Use this function initialize the vport.
661 * 702 *
662 * @param[in] vport - pointer to bfa_fcs_vport_t. 703 * @param[in] vport - pointer to bfa_fcs_vport_t.
@@ -692,6 +733,8 @@ bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport)
692 * Use this function to delete a vport object. Fabric object should 733 * Use this function to delete a vport object. Fabric object should
693 * be stopped before this function call. 734 * be stopped before this function call.
694 * 735 *
736 * Donot invoke this from within FCS
737 *
695 * param[in] vport - pointer to bfa_fcs_vport_t. 738 * param[in] vport - pointer to bfa_fcs_vport_t.
696 * 739 *
697 * return None 740 * return None
@@ -699,6 +742,9 @@ bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport)
699bfa_status_t 742bfa_status_t
700bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport) 743bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport)
701{ 744{
745 if (vport->lport.port_cfg.preboot_vp)
746 return BFA_STATUS_PBC;
747
702 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE); 748 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE);
703 749
704 return BFA_STATUS_OK; 750 return BFA_STATUS_OK;
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 6b624e767d3b..00c033511cbf 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -295,16 +295,21 @@ struct iscsi_cid_queue {
295 * @max_cqes: CQ size 295 * @max_cqes: CQ size
296 * @num_ccell: number of command cells per connection 296 * @num_ccell: number of command cells per connection
297 * @ofld_conns_active: active connection list 297 * @ofld_conns_active: active connection list
298 * @eh_wait: wait queue for the endpoint to shutdown
298 * @max_active_conns: max offload connections supported by this device 299 * @max_active_conns: max offload connections supported by this device
299 * @cid_que: iscsi cid queue 300 * @cid_que: iscsi cid queue
300 * @ep_rdwr_lock: read / write lock to synchronize various ep lists 301 * @ep_rdwr_lock: read / write lock to synchronize various ep lists
301 * @ep_ofld_list: connection list for pending offload completion 302 * @ep_ofld_list: connection list for pending offload completion
303 * @ep_active_list: connection list for active offload endpoints
302 * @ep_destroy_list: connection list for pending offload completion 304 * @ep_destroy_list: connection list for pending offload completion
303 * @mp_bd_tbl: BD table to be used with middle path requests 305 * @mp_bd_tbl: BD table to be used with middle path requests
304 * @mp_bd_dma: DMA address of 'mp_bd_tbl' memory buffer 306 * @mp_bd_dma: DMA address of 'mp_bd_tbl' memory buffer
305 * @dummy_buffer: Dummy buffer to be used with zero length scsicmd reqs 307 * @dummy_buffer: Dummy buffer to be used with zero length scsicmd reqs
306 * @dummy_buf_dma: DMA address of 'dummy_buffer' memory buffer 308 * @dummy_buf_dma: DMA address of 'dummy_buffer' memory buffer
307 * @lock: lock to synchonize access to hba structure 309 * @lock: lock to synchonize access to hba structure
310 * @hba_shutdown_tmo: Timeout value to shutdown each connection
311 * @conn_teardown_tmo: Timeout value to tear down each connection
312 * @conn_ctx_destroy_tmo: Timeout value to destroy context of each connection
308 * @pci_did: PCI device ID 313 * @pci_did: PCI device ID
309 * @pci_vid: PCI vendor ID 314 * @pci_vid: PCI vendor ID
310 * @pci_sdid: PCI subsystem device ID 315 * @pci_sdid: PCI subsystem device ID
@@ -369,6 +374,7 @@ struct bnx2i_hba {
369 374
370 rwlock_t ep_rdwr_lock; 375 rwlock_t ep_rdwr_lock;
371 struct list_head ep_ofld_list; 376 struct list_head ep_ofld_list;
377 struct list_head ep_active_list;
372 struct list_head ep_destroy_list; 378 struct list_head ep_destroy_list;
373 379
374 /* 380 /*
@@ -383,6 +389,8 @@ struct bnx2i_hba {
383 struct mutex net_dev_lock;/* sync net device access */ 389 struct mutex net_dev_lock;/* sync net device access */
384 390
385 int hba_shutdown_tmo; 391 int hba_shutdown_tmo;
392 int conn_teardown_tmo;
393 int conn_ctx_destroy_tmo;
386 /* 394 /*
387 * PCI related info. 395 * PCI related info.
388 */ 396 */
@@ -631,6 +639,8 @@ enum {
631 EP_STATE_CLEANUP_CMPL = 0x800, 639 EP_STATE_CLEANUP_CMPL = 0x800,
632 EP_STATE_TCP_FIN_RCVD = 0x1000, 640 EP_STATE_TCP_FIN_RCVD = 0x1000,
633 EP_STATE_TCP_RST_RCVD = 0x2000, 641 EP_STATE_TCP_RST_RCVD = 0x2000,
642 EP_STATE_LOGOUT_SENT = 0x4000,
643 EP_STATE_LOGOUT_RESP_RCVD = 0x8000,
634 EP_STATE_PG_OFLD_FAILED = 0x1000000, 644 EP_STATE_PG_OFLD_FAILED = 0x1000000,
635 EP_STATE_ULP_UPDATE_FAILED = 0x2000000, 645 EP_STATE_ULP_UPDATE_FAILED = 0x2000000,
636 EP_STATE_CLEANUP_FAILED = 0x4000000, 646 EP_STATE_CLEANUP_FAILED = 0x4000000,
@@ -645,6 +655,7 @@ enum {
645 * @link: list head to link elements 655 * @link: list head to link elements
646 * @hba: adapter to which this connection belongs 656 * @hba: adapter to which this connection belongs
647 * @conn: iscsi connection this EP is linked to 657 * @conn: iscsi connection this EP is linked to
658 * @cls_ep: associated iSCSI endpoint pointer
648 * @sess: iscsi session this EP is linked to 659 * @sess: iscsi session this EP is linked to
649 * @cm_sk: cnic sock struct 660 * @cm_sk: cnic sock struct
650 * @hba_age: age to detect if 'iscsid' issues ep_disconnect() 661 * @hba_age: age to detect if 'iscsid' issues ep_disconnect()
@@ -664,6 +675,7 @@ struct bnx2i_endpoint {
664 struct list_head link; 675 struct list_head link;
665 struct bnx2i_hba *hba; 676 struct bnx2i_hba *hba;
666 struct bnx2i_conn *conn; 677 struct bnx2i_conn *conn;
678 struct iscsi_endpoint *cls_ep;
667 struct cnic_sock *cm_sk; 679 struct cnic_sock *cm_sk;
668 u32 hba_age; 680 u32 hba_age;
669 u32 state; 681 u32 state;
@@ -766,6 +778,8 @@ extern struct bnx2i_endpoint *bnx2i_find_ep_in_destroy_list(
766extern int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep); 778extern int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep);
767extern void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action); 779extern void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action);
768 780
781extern int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep);
782
769/* Debug related function prototypes */ 783/* Debug related function prototypes */
770extern void bnx2i_print_pend_cmd_queue(struct bnx2i_conn *conn); 784extern void bnx2i_print_pend_cmd_queue(struct bnx2i_conn *conn);
771extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn); 785extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn);
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 3a66ca24c7bd..d23fc256d585 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -562,6 +562,8 @@ int bnx2i_send_iscsi_logout(struct bnx2i_conn *bnx2i_conn,
562 logout_wqe->num_bds = 1; 562 logout_wqe->num_bds = 1;
563 logout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ 563 logout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
564 564
565 bnx2i_conn->ep->state = EP_STATE_LOGOUT_SENT;
566
565 bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); 567 bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
566 return 0; 568 return 0;
567} 569}
@@ -1482,6 +1484,8 @@ static int bnx2i_process_logout_resp(struct iscsi_session *session,
1482 resp_hdr->t2retain = cpu_to_be32(logout->time_to_retain); 1484 resp_hdr->t2retain = cpu_to_be32(logout->time_to_retain);
1483 1485
1484 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0); 1486 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
1487
1488 bnx2i_conn->ep->state = EP_STATE_LOGOUT_RESP_RCVD;
1485done: 1489done:
1486 spin_unlock(&session->lock); 1490 spin_unlock(&session->lock);
1487 return 0; 1491 return 0;
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index af6a00a600fb..a796f565f383 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -17,8 +17,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
17static u32 adapter_count; 17static u32 adapter_count;
18 18
19#define DRV_MODULE_NAME "bnx2i" 19#define DRV_MODULE_NAME "bnx2i"
20#define DRV_MODULE_VERSION "2.1.1" 20#define DRV_MODULE_VERSION "2.1.2"
21#define DRV_MODULE_RELDATE "Mar 24, 2010" 21#define DRV_MODULE_RELDATE "Jun 28, 2010"
22 22
23static char version[] __devinitdata = 23static char version[] __devinitdata =
24 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ 24 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@ -176,6 +176,9 @@ void bnx2i_start(void *handle)
176void bnx2i_stop(void *handle) 176void bnx2i_stop(void *handle)
177{ 177{
178 struct bnx2i_hba *hba = handle; 178 struct bnx2i_hba *hba = handle;
179 struct list_head *pos, *tmp;
180 struct bnx2i_endpoint *bnx2i_ep;
181 int conns_active;
179 182
180 /* check if cleanup happened in GOING_DOWN context */ 183 /* check if cleanup happened in GOING_DOWN context */
181 if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN, 184 if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN,
@@ -187,9 +190,33 @@ void bnx2i_stop(void *handle)
187 * control returns to network driver. So it is required to cleanup and 190 * control returns to network driver. So it is required to cleanup and
188 * release all connection resources before returning from this routine. 191 * release all connection resources before returning from this routine.
189 */ 192 */
190 wait_event_interruptible_timeout(hba->eh_wait, 193 while (hba->ofld_conns_active) {
191 (hba->ofld_conns_active == 0), 194 conns_active = hba->ofld_conns_active;
192 hba->hba_shutdown_tmo); 195 wait_event_interruptible_timeout(hba->eh_wait,
196 (hba->ofld_conns_active != conns_active),
197 hba->hba_shutdown_tmo);
198 if (hba->ofld_conns_active == conns_active)
199 break;
200 }
201 if (hba->ofld_conns_active) {
202 /* Stage to force the disconnection
203 * This is the case where the daemon is either slow or
204 * not present
205 */
206 printk(KERN_ALERT "bnx2i: Wait timeout, force all eps "
207 "to disconnect (%d)\n", hba->ofld_conns_active);
208 mutex_lock(&hba->net_dev_lock);
209 list_for_each_safe(pos, tmp, &hba->ep_active_list) {
210 bnx2i_ep = list_entry(pos, struct bnx2i_endpoint, link);
211 /* Clean up the chip only */
212 bnx2i_hw_ep_disconnect(bnx2i_ep);
213 }
214 mutex_unlock(&hba->net_dev_lock);
215 if (hba->ofld_conns_active)
216 printk(KERN_ERR "bnx2i: EP disconnect timeout (%d)!\n",
217 hba->ofld_conns_active);
218 }
219
193 /* This flag should be cleared last so that ep_disconnect() gracefully 220 /* This flag should be cleared last so that ep_disconnect() gracefully
194 * cleans up connection context 221 * cleans up connection context
195 */ 222 */
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index fa68ab34b998..a46ccc380ab1 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -386,6 +386,7 @@ static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
386 } 386 }
387 387
388 bnx2i_ep = ep->dd_data; 388 bnx2i_ep = ep->dd_data;
389 bnx2i_ep->cls_ep = ep;
389 INIT_LIST_HEAD(&bnx2i_ep->link); 390 INIT_LIST_HEAD(&bnx2i_ep->link);
390 bnx2i_ep->state = EP_STATE_IDLE; 391 bnx2i_ep->state = EP_STATE_IDLE;
391 bnx2i_ep->ep_iscsi_cid = (u16) -1; 392 bnx2i_ep->ep_iscsi_cid = (u16) -1;
@@ -678,7 +679,6 @@ bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid)
678 return ep; 679 return ep;
679} 680}
680 681
681
682/** 682/**
683 * bnx2i_find_ep_in_destroy_list - find iscsi_cid in destroy list 683 * bnx2i_find_ep_in_destroy_list - find iscsi_cid in destroy list
684 * @hba: pointer to adapter instance 684 * @hba: pointer to adapter instance
@@ -709,6 +709,38 @@ bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid)
709} 709}
710 710
711/** 711/**
712 * bnx2i_ep_active_list_add - add an entry to ep active list
713 * @hba: pointer to adapter instance
714 * @ep: pointer to endpoint (transport indentifier) structure
715 *
716 * current active conn queue manager
717 */
718static void bnx2i_ep_active_list_add(struct bnx2i_hba *hba,
719 struct bnx2i_endpoint *ep)
720{
721 write_lock_bh(&hba->ep_rdwr_lock);
722 list_add_tail(&ep->link, &hba->ep_active_list);
723 write_unlock_bh(&hba->ep_rdwr_lock);
724}
725
726
727/**
728 * bnx2i_ep_active_list_del - deletes an entry to ep active list
729 * @hba: pointer to adapter instance
730 * @ep: pointer to endpoint (transport indentifier) structure
731 *
732 * current active conn queue manager
733 */
734static void bnx2i_ep_active_list_del(struct bnx2i_hba *hba,
735 struct bnx2i_endpoint *ep)
736{
737 write_lock_bh(&hba->ep_rdwr_lock);
738 list_del_init(&ep->link);
739 write_unlock_bh(&hba->ep_rdwr_lock);
740}
741
742
743/**
712 * bnx2i_setup_host_queue_size - assigns shost->can_queue param 744 * bnx2i_setup_host_queue_size - assigns shost->can_queue param
713 * @hba: pointer to adapter instance 745 * @hba: pointer to adapter instance
714 * @shost: scsi host pointer 746 * @shost: scsi host pointer
@@ -784,6 +816,7 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
784 goto mp_bdt_mem_err; 816 goto mp_bdt_mem_err;
785 817
786 INIT_LIST_HEAD(&hba->ep_ofld_list); 818 INIT_LIST_HEAD(&hba->ep_ofld_list);
819 INIT_LIST_HEAD(&hba->ep_active_list);
787 INIT_LIST_HEAD(&hba->ep_destroy_list); 820 INIT_LIST_HEAD(&hba->ep_destroy_list);
788 rwlock_init(&hba->ep_rdwr_lock); 821 rwlock_init(&hba->ep_rdwr_lock);
789 822
@@ -821,10 +854,15 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
821 spin_lock_init(&hba->lock); 854 spin_lock_init(&hba->lock);
822 mutex_init(&hba->net_dev_lock); 855 mutex_init(&hba->net_dev_lock);
823 init_waitqueue_head(&hba->eh_wait); 856 init_waitqueue_head(&hba->eh_wait);
824 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) 857 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
825 hba->hba_shutdown_tmo = 240 * HZ; 858 hba->hba_shutdown_tmo = 20 * HZ;
826 else /* 5706/5708/5709 */ 859 hba->conn_teardown_tmo = 20 * HZ;
827 hba->hba_shutdown_tmo = 30 * HZ; 860 hba->conn_ctx_destroy_tmo = 6 * HZ;
861 } else { /* 5706/5708/5709 */
862 hba->hba_shutdown_tmo = 20 * HZ;
863 hba->conn_teardown_tmo = 10 * HZ;
864 hba->conn_ctx_destroy_tmo = 2 * HZ;
865 }
828 866
829 if (iscsi_host_add(shost, &hba->pcidev->dev)) 867 if (iscsi_host_add(shost, &hba->pcidev->dev))
830 goto free_dump_mem; 868 goto free_dump_mem;
@@ -857,6 +895,7 @@ void bnx2i_free_hba(struct bnx2i_hba *hba)
857 895
858 iscsi_host_remove(shost); 896 iscsi_host_remove(shost);
859 INIT_LIST_HEAD(&hba->ep_ofld_list); 897 INIT_LIST_HEAD(&hba->ep_ofld_list);
898 INIT_LIST_HEAD(&hba->ep_active_list);
860 INIT_LIST_HEAD(&hba->ep_destroy_list); 899 INIT_LIST_HEAD(&hba->ep_destroy_list);
861 pci_dev_put(hba->pcidev); 900 pci_dev_put(hba->pcidev);
862 901
@@ -1461,6 +1500,26 @@ static int bnx2i_host_get_param(struct Scsi_Host *shost,
1461 case ISCSI_HOST_PARAM_NETDEV_NAME: 1500 case ISCSI_HOST_PARAM_NETDEV_NAME:
1462 len = sprintf(buf, "%s\n", hba->netdev->name); 1501 len = sprintf(buf, "%s\n", hba->netdev->name);
1463 break; 1502 break;
1503 case ISCSI_HOST_PARAM_IPADDRESS: {
1504 struct list_head *active_list = &hba->ep_active_list;
1505
1506 read_lock_bh(&hba->ep_rdwr_lock);
1507 if (!list_empty(&hba->ep_active_list)) {
1508 struct bnx2i_endpoint *bnx2i_ep;
1509 struct cnic_sock *csk;
1510
1511 bnx2i_ep = list_first_entry(active_list,
1512 struct bnx2i_endpoint,
1513 link);
1514 csk = bnx2i_ep->cm_sk;
1515 if (test_bit(SK_F_IPV6, &csk->flags))
1516 len = sprintf(buf, "%pI6\n", csk->src_ip);
1517 else
1518 len = sprintf(buf, "%pI4\n", csk->src_ip);
1519 }
1520 read_unlock_bh(&hba->ep_rdwr_lock);
1521 break;
1522 }
1464 default: 1523 default:
1465 return iscsi_host_get_param(shost, param, buf); 1524 return iscsi_host_get_param(shost, param, buf);
1466 } 1525 }
@@ -1599,7 +1658,7 @@ static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
1599 1658
1600 ep->state = EP_STATE_CLEANUP_START; 1659 ep->state = EP_STATE_CLEANUP_START;
1601 init_timer(&ep->ofld_timer); 1660 init_timer(&ep->ofld_timer);
1602 ep->ofld_timer.expires = 10*HZ + jiffies; 1661 ep->ofld_timer.expires = hba->conn_ctx_destroy_tmo + jiffies;
1603 ep->ofld_timer.function = bnx2i_ep_ofld_timer; 1662 ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1604 ep->ofld_timer.data = (unsigned long) ep; 1663 ep->ofld_timer.data = (unsigned long) ep;
1605 add_timer(&ep->ofld_timer); 1664 add_timer(&ep->ofld_timer);
@@ -1665,10 +1724,11 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
1665 1724
1666 if (!hba || test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state)) { 1725 if (!hba || test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state)) {
1667 rc = -EINVAL; 1726 rc = -EINVAL;
1668 goto check_busy; 1727 goto nohba;
1669 } 1728 }
1670 1729
1671 cnic = hba->cnic; 1730 cnic = hba->cnic;
1731 mutex_lock(&hba->net_dev_lock);
1672 ep = bnx2i_alloc_ep(hba); 1732 ep = bnx2i_alloc_ep(hba);
1673 if (!ep) { 1733 if (!ep) {
1674 rc = -ENOMEM; 1734 rc = -ENOMEM;
@@ -1676,7 +1736,6 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
1676 } 1736 }
1677 bnx2i_ep = ep->dd_data; 1737 bnx2i_ep = ep->dd_data;
1678 1738
1679 mutex_lock(&hba->net_dev_lock);
1680 if (bnx2i_adapter_ready(hba)) { 1739 if (bnx2i_adapter_ready(hba)) {
1681 rc = -EPERM; 1740 rc = -EPERM;
1682 goto net_if_down; 1741 goto net_if_down;
@@ -1754,15 +1813,19 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
1754 goto conn_failed; 1813 goto conn_failed;
1755 } else 1814 } else
1756 rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr); 1815 rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr);
1757
1758 if (rc) 1816 if (rc)
1759 goto release_ep; 1817 goto release_ep;
1760 1818
1819 bnx2i_ep_active_list_add(hba, bnx2i_ep);
1820
1761 if (bnx2i_map_ep_dbell_regs(bnx2i_ep)) 1821 if (bnx2i_map_ep_dbell_regs(bnx2i_ep))
1762 goto release_ep; 1822 goto del_active_ep;
1823
1763 mutex_unlock(&hba->net_dev_lock); 1824 mutex_unlock(&hba->net_dev_lock);
1764 return ep; 1825 return ep;
1765 1826
1827del_active_ep:
1828 bnx2i_ep_active_list_del(hba, bnx2i_ep);
1766release_ep: 1829release_ep:
1767 if (bnx2i_tear_down_conn(hba, bnx2i_ep)) { 1830 if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
1768 mutex_unlock(&hba->net_dev_lock); 1831 mutex_unlock(&hba->net_dev_lock);
@@ -1774,8 +1837,9 @@ iscsi_cid_err:
1774 bnx2i_free_qp_resc(hba, bnx2i_ep); 1837 bnx2i_free_qp_resc(hba, bnx2i_ep);
1775qp_resc_err: 1838qp_resc_err:
1776 bnx2i_free_ep(ep); 1839 bnx2i_free_ep(ep);
1777 mutex_unlock(&hba->net_dev_lock);
1778check_busy: 1840check_busy:
1841 mutex_unlock(&hba->net_dev_lock);
1842nohba:
1779 bnx2i_unreg_dev_all(); 1843 bnx2i_unreg_dev_all();
1780 return ERR_PTR(rc); 1844 return ERR_PTR(rc);
1781} 1845}
@@ -1846,6 +1910,8 @@ static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
1846 case EP_STATE_ULP_UPDATE_START: 1910 case EP_STATE_ULP_UPDATE_START:
1847 case EP_STATE_ULP_UPDATE_COMPL: 1911 case EP_STATE_ULP_UPDATE_COMPL:
1848 case EP_STATE_TCP_FIN_RCVD: 1912 case EP_STATE_TCP_FIN_RCVD:
1913 case EP_STATE_LOGOUT_SENT:
1914 case EP_STATE_LOGOUT_RESP_RCVD:
1849 case EP_STATE_ULP_UPDATE_FAILED: 1915 case EP_STATE_ULP_UPDATE_FAILED:
1850 ret = 1; 1916 ret = 1;
1851 break; 1917 break;
@@ -1866,9 +1932,96 @@ static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
1866} 1932}
1867 1933
1868 1934
1935/*
1936 * bnx2i_hw_ep_disconnect - executes TCP connection teardown process in the hw
1937 * @ep: TCP connection (bnx2i endpoint) handle
1938 *
1939 * executes TCP connection teardown process
1940 */
1941int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
1942{
1943 struct bnx2i_hba *hba = bnx2i_ep->hba;
1944 struct cnic_dev *cnic;
1945 struct iscsi_session *session = NULL;
1946 struct iscsi_conn *conn = NULL;
1947 int ret = 0;
1948 int close = 0;
1949 int close_ret = 0;
1950
1951 if (!hba)
1952 return 0;
1953
1954 cnic = hba->cnic;
1955 if (!cnic)
1956 return 0;
1957
1958 if (!bnx2i_ep_tcp_conn_active(bnx2i_ep))
1959 goto destroy_conn;
1960
1961 if (bnx2i_ep->conn) {
1962 conn = bnx2i_ep->conn->cls_conn->dd_data;
1963 session = conn->session;
1964 }
1965
1966 init_timer(&bnx2i_ep->ofld_timer);
1967 bnx2i_ep->ofld_timer.expires = hba->conn_teardown_tmo + jiffies;
1968 bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1969 bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
1970 add_timer(&bnx2i_ep->ofld_timer);
1971
1972 if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic))
1973 goto out;
1974
1975 if (session) {
1976 spin_lock_bh(&session->lock);
1977 if (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD) {
1978 if (session->state == ISCSI_STATE_LOGGING_OUT) {
1979 if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) {
1980 /* Logout sent, but no resp */
1981 printk(KERN_ALERT "bnx2i - WARNING "
1982 "logout response was not "
1983 "received!\n");
1984 } else if (bnx2i_ep->state ==
1985 EP_STATE_LOGOUT_RESP_RCVD)
1986 close = 1;
1987 }
1988 } else
1989 close = 1;
1990
1991 spin_unlock_bh(&session->lock);
1992 }
1993
1994 bnx2i_ep->state = EP_STATE_DISCONN_START;
1995
1996 if (close)
1997 close_ret = cnic->cm_close(bnx2i_ep->cm_sk);
1998 else
1999 close_ret = cnic->cm_abort(bnx2i_ep->cm_sk);
2000
2001 if (close_ret)
2002 bnx2i_ep->state = EP_STATE_DISCONN_COMPL;
2003
2004 /* wait for option-2 conn teardown */
2005 wait_event_interruptible(bnx2i_ep->ofld_wait,
2006 bnx2i_ep->state != EP_STATE_DISCONN_START);
2007
2008 if (signal_pending(current))
2009 flush_signals(current);
2010 del_timer_sync(&bnx2i_ep->ofld_timer);
2011
2012destroy_conn:
2013 bnx2i_ep_active_list_del(hba, bnx2i_ep);
2014 if (bnx2i_tear_down_conn(hba, bnx2i_ep))
2015 ret = -EINVAL;
2016out:
2017 bnx2i_ep->state = EP_STATE_IDLE;
2018 return ret;
2019}
2020
2021
1869/** 2022/**
1870 * bnx2i_ep_disconnect - executes TCP connection teardown process 2023 * bnx2i_ep_disconnect - executes TCP connection teardown process
1871 * @ep: TCP connection (endpoint) handle 2024 * @ep: TCP connection (iscsi endpoint) handle
1872 * 2025 *
1873 * executes TCP connection teardown process 2026 * executes TCP connection teardown process
1874 */ 2027 */
@@ -1876,9 +2029,7 @@ static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
1876{ 2029{
1877 struct bnx2i_endpoint *bnx2i_ep; 2030 struct bnx2i_endpoint *bnx2i_ep;
1878 struct bnx2i_conn *bnx2i_conn = NULL; 2031 struct bnx2i_conn *bnx2i_conn = NULL;
1879 struct iscsi_session *session = NULL; 2032 struct iscsi_conn *conn = NULL;
1880 struct iscsi_conn *conn;
1881 struct cnic_dev *cnic;
1882 struct bnx2i_hba *hba; 2033 struct bnx2i_hba *hba;
1883 2034
1884 bnx2i_ep = ep->dd_data; 2035 bnx2i_ep = ep->dd_data;
@@ -1894,72 +2045,34 @@ static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
1894 if (bnx2i_ep->conn) { 2045 if (bnx2i_ep->conn) {
1895 bnx2i_conn = bnx2i_ep->conn; 2046 bnx2i_conn = bnx2i_ep->conn;
1896 conn = bnx2i_conn->cls_conn->dd_data; 2047 conn = bnx2i_conn->cls_conn->dd_data;
1897 session = conn->session;
1898
1899 iscsi_suspend_queue(conn); 2048 iscsi_suspend_queue(conn);
1900 } 2049 }
1901
1902 hba = bnx2i_ep->hba; 2050 hba = bnx2i_ep->hba;
1903 if (bnx2i_ep->state == EP_STATE_IDLE)
1904 goto return_bnx2i_ep;
1905 cnic = hba->cnic;
1906 2051
1907 mutex_lock(&hba->net_dev_lock); 2052 mutex_lock(&hba->net_dev_lock);
1908 2053
2054 if (bnx2i_ep->state == EP_STATE_IDLE)
2055 goto return_bnx2i_ep;
2056
1909 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) 2057 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
1910 goto free_resc; 2058 goto free_resc;
1911 if (bnx2i_ep->hba_age != hba->age)
1912 goto free_resc;
1913
1914 if (!bnx2i_ep_tcp_conn_active(bnx2i_ep))
1915 goto destory_conn;
1916
1917 bnx2i_ep->state = EP_STATE_DISCONN_START;
1918
1919 init_timer(&bnx2i_ep->ofld_timer);
1920 bnx2i_ep->ofld_timer.expires = 10*HZ + jiffies;
1921 bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1922 bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
1923 add_timer(&bnx2i_ep->ofld_timer);
1924 2059
1925 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { 2060 if (bnx2i_ep->hba_age != hba->age)
1926 int close = 0;
1927
1928 if (session) {
1929 spin_lock_bh(&session->lock);
1930 if (session->state == ISCSI_STATE_LOGGING_OUT)
1931 close = 1;
1932 spin_unlock_bh(&session->lock);
1933 }
1934 if (close)
1935 cnic->cm_close(bnx2i_ep->cm_sk);
1936 else
1937 cnic->cm_abort(bnx2i_ep->cm_sk);
1938 } else
1939 goto free_resc; 2061 goto free_resc;
1940 2062
1941 /* wait for option-2 conn teardown */ 2063 /* Do all chip cleanup here */
1942 wait_event_interruptible(bnx2i_ep->ofld_wait, 2064 if (bnx2i_hw_ep_disconnect(bnx2i_ep)) {
1943 bnx2i_ep->state != EP_STATE_DISCONN_START);
1944
1945 if (signal_pending(current))
1946 flush_signals(current);
1947 del_timer_sync(&bnx2i_ep->ofld_timer);
1948
1949destory_conn:
1950 if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
1951 mutex_unlock(&hba->net_dev_lock); 2065 mutex_unlock(&hba->net_dev_lock);
1952 return; 2066 return;
1953 } 2067 }
1954free_resc: 2068free_resc:
1955 mutex_unlock(&hba->net_dev_lock);
1956 bnx2i_free_qp_resc(hba, bnx2i_ep); 2069 bnx2i_free_qp_resc(hba, bnx2i_ep);
1957return_bnx2i_ep: 2070return_bnx2i_ep:
1958 if (bnx2i_conn) 2071 if (bnx2i_conn)
1959 bnx2i_conn->ep = NULL; 2072 bnx2i_conn->ep = NULL;
1960 2073
1961 bnx2i_free_ep(ep); 2074 bnx2i_free_ep(ep);
1962 2075 mutex_unlock(&hba->net_dev_lock);
1963 if (!hba->ofld_conns_active) 2076 if (!hba->ofld_conns_active)
1964 bnx2i_unreg_dev_all(); 2077 bnx2i_unreg_dev_all();
1965 2078
@@ -2038,7 +2151,8 @@ struct iscsi_transport bnx2i_iscsi_transport = {
2038 ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO | 2151 ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
2039 ISCSI_PING_TMO | ISCSI_RECV_TMO | 2152 ISCSI_PING_TMO | ISCSI_RECV_TMO |
2040 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, 2153 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
2041 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_NETDEV_NAME, 2154 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
2155 ISCSI_HOST_NETDEV_NAME,
2042 .create_session = bnx2i_session_create, 2156 .create_session = bnx2i_session_create,
2043 .destroy_session = bnx2i_session_destroy, 2157 .destroy_session = bnx2i_session_destroy,
2044 .create_conn = bnx2i_conn_create, 2158 .create_conn = bnx2i_conn_create,
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.c b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
index b58d9134ac1b..be0e23042c76 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_ddp.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
@@ -499,6 +499,7 @@ static int setup_conn_pgidx(struct t3cdev *tdev, unsigned int tid, int pg_idx,
499 /* set up ulp submode and page size */ 499 /* set up ulp submode and page size */
500 req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req)); 500 req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
501 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 501 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
502 req->wr.wr_lo = 0;
502 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 503 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
503 req->reply = V_NO_REPLY(reply ? 0 : 1); 504 req->reply = V_NO_REPLY(reply ? 0 : 1);
504 req->cpu_idx = 0; 505 req->cpu_idx = 0;
@@ -564,6 +565,7 @@ int cxgb3i_setup_conn_digest(struct t3cdev *tdev, unsigned int tid,
564 /* set up ulp submode and page size */ 565 /* set up ulp submode and page size */
565 req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req)); 566 req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
566 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 567 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
568 req->wr.wr_lo = 0;
567 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 569 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
568 req->reply = V_NO_REPLY(reply ? 0 : 1); 570 req->reply = V_NO_REPLY(reply ? 0 : 1);
569 req->cpu_idx = 0; 571 req->cpu_idx = 0;
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
index 3b6a06eebf7f..3ee13cf9556b 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -264,6 +264,7 @@ static void make_act_open_req(struct s3_conn *c3cn, struct sk_buff *skb,
264 skb->priority = CPL_PRIORITY_SETUP; 264 skb->priority = CPL_PRIORITY_SETUP;
265 req = (struct cpl_act_open_req *)__skb_put(skb, sizeof(*req)); 265 req = (struct cpl_act_open_req *)__skb_put(skb, sizeof(*req));
266 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 266 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
267 req->wr.wr_lo = 0;
267 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, atid)); 268 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, atid));
268 req->local_port = c3cn->saddr.sin_port; 269 req->local_port = c3cn->saddr.sin_port;
269 req->peer_port = c3cn->daddr.sin_port; 270 req->peer_port = c3cn->daddr.sin_port;
@@ -273,6 +274,7 @@ static void make_act_open_req(struct s3_conn *c3cn, struct sk_buff *skb,
273 V_TX_CHANNEL(e->smt_idx)); 274 V_TX_CHANNEL(e->smt_idx));
274 req->opt0l = htonl(calc_opt0l(c3cn)); 275 req->opt0l = htonl(calc_opt0l(c3cn));
275 req->params = 0; 276 req->params = 0;
277 req->opt2 = 0;
276} 278}
277 279
278static void fail_act_open(struct s3_conn *c3cn, int errno) 280static void fail_act_open(struct s3_conn *c3cn, int errno)
@@ -379,6 +381,7 @@ static void send_abort_req(struct s3_conn *c3cn)
379 381
380 c3cn->cpl_abort_req = NULL; 382 c3cn->cpl_abort_req = NULL;
381 req = (struct cpl_abort_req *)skb->head; 383 req = (struct cpl_abort_req *)skb->head;
384 memset(req, 0, sizeof(*req));
382 385
383 skb->priority = CPL_PRIORITY_DATA; 386 skb->priority = CPL_PRIORITY_DATA;
384 set_arp_failure_handler(skb, abort_arp_failure); 387 set_arp_failure_handler(skb, abort_arp_failure);
@@ -406,6 +409,7 @@ static void send_abort_rpl(struct s3_conn *c3cn, int rst_status)
406 c3cn->cpl_abort_rpl = NULL; 409 c3cn->cpl_abort_rpl = NULL;
407 410
408 skb->priority = CPL_PRIORITY_DATA; 411 skb->priority = CPL_PRIORITY_DATA;
412 memset(rpl, 0, sizeof(*rpl));
409 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); 413 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
410 rpl->wr.wr_lo = htonl(V_WR_TID(c3cn->tid)); 414 rpl->wr.wr_lo = htonl(V_WR_TID(c3cn->tid));
411 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, c3cn->tid)); 415 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, c3cn->tid));
@@ -430,6 +434,7 @@ static u32 send_rx_credits(struct s3_conn *c3cn, u32 credits, u32 dack)
430 434
431 req = (struct cpl_rx_data_ack *)__skb_put(skb, sizeof(*req)); 435 req = (struct cpl_rx_data_ack *)__skb_put(skb, sizeof(*req));
432 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 436 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
437 req->wr.wr_lo = 0;
433 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, c3cn->tid)); 438 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, c3cn->tid));
434 req->credit_dack = htonl(dack | V_RX_CREDITS(credits)); 439 req->credit_dack = htonl(dack | V_RX_CREDITS(credits));
435 skb->priority = CPL_PRIORITY_ACK; 440 skb->priority = CPL_PRIORITY_ACK;
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 5b683e429542..b9bcfa4c7d26 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -768,6 +768,7 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
768 {"DELL", "MD3000i"}, 768 {"DELL", "MD3000i"},
769 {"DELL", "MD32xx"}, 769 {"DELL", "MD32xx"},
770 {"DELL", "MD32xxi"}, 770 {"DELL", "MD32xxi"},
771 {"DELL", "MD36xxi"},
771 {"LSI", "INF-01-00"}, 772 {"LSI", "INF-01-00"},
772 {"ENGENIO", "INF-01-00"}, 773 {"ENGENIO", "INF-01-00"},
773 {"STK", "FLEXLINE 380"}, 774 {"STK", "FLEXLINE 380"},
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index b0c576f84b28..ffc1edf5e80d 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -1290,7 +1290,7 @@ static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1290 ulong flags = 0; 1290 ulong flags = 0;
1291 struct adpt_i2o_post_wait_data *p1, *p2; 1291 struct adpt_i2o_post_wait_data *p1, *p2;
1292 struct adpt_i2o_post_wait_data *wait_data = 1292 struct adpt_i2o_post_wait_data *wait_data =
1293 kmalloc(sizeof(struct adpt_i2o_post_wait_data),GFP_KERNEL); 1293 kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1294 DECLARE_WAITQUEUE(wait, current); 1294 DECLARE_WAITQUEUE(wait, current);
1295 1295
1296 if (!wait_data) 1296 if (!wait_data)
@@ -2640,6 +2640,13 @@ static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2640 continue; 2640 continue;
2641 } 2641 }
2642 bus_no = buf[0]>>16; 2642 bus_no = buf[0]>>16;
2643 if (bus_no >= MAX_CHANNEL) { /* Something wrong skip it */
2644 printk(KERN_WARNING
2645 "%s: Channel number %d out of range\n",
2646 pHba->name, bus_no);
2647 continue;
2648 }
2649
2643 scsi_id = buf[1]; 2650 scsi_id = buf[1];
2644 scsi_lun = (buf[2]>>8 )&0xff; 2651 scsi_lun = (buf[2]>>8 )&0xff;
2645 pDev = pHba->channel[bus_no].device[scsi_id]; 2652 pDev = pHba->channel[bus_no].device[scsi_id];
@@ -2651,7 +2658,8 @@ static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2651 pDev = pDev->next_lun; 2658 pDev = pDev->next_lun;
2652 } 2659 }
2653 if(!pDev ) { // Something new add it 2660 if(!pDev ) { // Something new add it
2654 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL); 2661 d = kmalloc(sizeof(struct i2o_device),
2662 GFP_ATOMIC);
2655 if(d==NULL) 2663 if(d==NULL)
2656 { 2664 {
2657 printk(KERN_CRIT "Out of memory for I2O device data.\n"); 2665 printk(KERN_CRIT "Out of memory for I2O device data.\n");
@@ -2667,13 +2675,11 @@ static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2667 adpt_i2o_report_hba_unit(pHba, d); 2675 adpt_i2o_report_hba_unit(pHba, d);
2668 adpt_i2o_install_device(pHba, d); 2676 adpt_i2o_install_device(pHba, d);
2669 2677
2670 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
2671 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
2672 continue;
2673 }
2674 pDev = pHba->channel[bus_no].device[scsi_id]; 2678 pDev = pHba->channel[bus_no].device[scsi_id];
2675 if( pDev == NULL){ 2679 if( pDev == NULL){
2676 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL); 2680 pDev =
2681 kzalloc(sizeof(struct adpt_device),
2682 GFP_ATOMIC);
2677 if(pDev == NULL) { 2683 if(pDev == NULL) {
2678 return -ENOMEM; 2684 return -ENOMEM;
2679 } 2685 }
@@ -2682,7 +2688,9 @@ static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2682 while (pDev->next_lun) { 2688 while (pDev->next_lun) {
2683 pDev = pDev->next_lun; 2689 pDev = pDev->next_lun;
2684 } 2690 }
2685 pDev = pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL); 2691 pDev = pDev->next_lun =
2692 kzalloc(sizeof(struct adpt_device),
2693 GFP_ATOMIC);
2686 if(pDev == NULL) { 2694 if(pDev == NULL) {
2687 return -ENOMEM; 2695 return -ENOMEM;
2688 } 2696 }
@@ -3127,7 +3135,7 @@ static int adpt_i2o_lct_get(adpt_hba* pHba)
3127 if (pHba->lct == NULL) { 3135 if (pHba->lct == NULL) {
3128 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev, 3136 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3129 pHba->lct_size, &pHba->lct_pa, 3137 pHba->lct_size, &pHba->lct_pa,
3130 GFP_KERNEL); 3138 GFP_ATOMIC);
3131 if(pHba->lct == NULL) { 3139 if(pHba->lct == NULL) {
3132 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n", 3140 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3133 pHba->name); 3141 pHba->name);
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 1a429ed6da9d..e79605a61155 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -117,9 +117,14 @@ static void fcoe_recv_frame(struct sk_buff *skb);
117 117
118static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *); 118static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
119 119
120module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR); 120module_param_call(create, fcoe_create, NULL, (void *)FIP_MODE_AUTO, S_IWUSR);
121__MODULE_PARM_TYPE(create, "string"); 121__MODULE_PARM_TYPE(create, "string");
122MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface"); 122MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
123module_param_call(create_vn2vn, fcoe_create, NULL,
124 (void *)FIP_MODE_VN2VN, S_IWUSR);
125__MODULE_PARM_TYPE(create_vn2vn, "string");
126MODULE_PARM_DESC(create_vn2vn, " Creates a VN_node to VN_node FCoE instance "
127 "on an Ethernet interface");
123module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR); 128module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
124__MODULE_PARM_TYPE(destroy, "string"); 129__MODULE_PARM_TYPE(destroy, "string");
125MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface"); 130MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface");
@@ -315,7 +320,11 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
315 dev_uc_add(netdev, flogi_maddr); 320 dev_uc_add(netdev, flogi_maddr);
316 if (fip->spma) 321 if (fip->spma)
317 dev_uc_add(netdev, fip->ctl_src_addr); 322 dev_uc_add(netdev, fip->ctl_src_addr);
318 dev_mc_add(netdev, FIP_ALL_ENODE_MACS); 323 if (fip->mode == FIP_MODE_VN2VN) {
324 dev_mc_add(netdev, FIP_ALL_VN2VN_MACS);
325 dev_mc_add(netdev, FIP_ALL_P2P_MACS);
326 } else
327 dev_mc_add(netdev, FIP_ALL_ENODE_MACS);
319 328
320 /* 329 /*
321 * setup the receive function from ethernet driver 330 * setup the receive function from ethernet driver
@@ -337,10 +346,12 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
337/** 346/**
338 * fcoe_interface_create() - Create a FCoE interface on a net device 347 * fcoe_interface_create() - Create a FCoE interface on a net device
339 * @netdev: The net device to create the FCoE interface on 348 * @netdev: The net device to create the FCoE interface on
349 * @fip_mode: The mode to use for FIP
340 * 350 *
341 * Returns: pointer to a struct fcoe_interface or NULL on error 351 * Returns: pointer to a struct fcoe_interface or NULL on error
342 */ 352 */
343static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev) 353static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
354 enum fip_state fip_mode)
344{ 355{
345 struct fcoe_interface *fcoe; 356 struct fcoe_interface *fcoe;
346 int err; 357 int err;
@@ -357,7 +368,7 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev)
357 /* 368 /*
358 * Initialize FIP. 369 * Initialize FIP.
359 */ 370 */
360 fcoe_ctlr_init(&fcoe->ctlr); 371 fcoe_ctlr_init(&fcoe->ctlr, fip_mode);
361 fcoe->ctlr.send = fcoe_fip_send; 372 fcoe->ctlr.send = fcoe_fip_send;
362 fcoe->ctlr.update_mac = fcoe_update_src_mac; 373 fcoe->ctlr.update_mac = fcoe_update_src_mac;
363 fcoe->ctlr.get_src_addr = fcoe_get_src_mac; 374 fcoe->ctlr.get_src_addr = fcoe_get_src_mac;
@@ -401,7 +412,11 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
401 dev_uc_del(netdev, flogi_maddr); 412 dev_uc_del(netdev, flogi_maddr);
402 if (fip->spma) 413 if (fip->spma)
403 dev_uc_del(netdev, fip->ctl_src_addr); 414 dev_uc_del(netdev, fip->ctl_src_addr);
404 dev_mc_del(netdev, FIP_ALL_ENODE_MACS); 415 if (fip->mode == FIP_MODE_VN2VN) {
416 dev_mc_del(netdev, FIP_ALL_VN2VN_MACS);
417 dev_mc_del(netdev, FIP_ALL_P2P_MACS);
418 } else
419 dev_mc_del(netdev, FIP_ALL_ENODE_MACS);
405 420
406 /* Tell the LLD we are done w/ FCoE */ 421 /* Tell the LLD we are done w/ FCoE */
407 ops = netdev->netdev_ops; 422 ops = netdev->netdev_ops;
@@ -574,6 +589,50 @@ static int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
574} 589}
575 590
576/** 591/**
592 * fcoe_netdev_features_change - Updates the lport's offload flags based
593 * on the LLD netdev's FCoE feature flags
594 */
595static void fcoe_netdev_features_change(struct fc_lport *lport,
596 struct net_device *netdev)
597{
598 mutex_lock(&lport->lp_mutex);
599
600 if (netdev->features & NETIF_F_SG)
601 lport->sg_supp = 1;
602 else
603 lport->sg_supp = 0;
604
605 if (netdev->features & NETIF_F_FCOE_CRC) {
606 lport->crc_offload = 1;
607 FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n");
608 } else {
609 lport->crc_offload = 0;
610 }
611
612 if (netdev->features & NETIF_F_FSO) {
613 lport->seq_offload = 1;
614 lport->lso_max = netdev->gso_max_size;
615 FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
616 lport->lso_max);
617 } else {
618 lport->seq_offload = 0;
619 lport->lso_max = 0;
620 }
621
622 if (netdev->fcoe_ddp_xid) {
623 lport->lro_enabled = 1;
624 lport->lro_xid = netdev->fcoe_ddp_xid;
625 FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n",
626 lport->lro_xid);
627 } else {
628 lport->lro_enabled = 0;
629 lport->lro_xid = 0;
630 }
631
632 mutex_unlock(&lport->lp_mutex);
633}
634
635/**
577 * fcoe_netdev_config() - Set up net devive for SW FCoE 636 * fcoe_netdev_config() - Set up net devive for SW FCoE
578 * @lport: The local port that is associated with the net device 637 * @lport: The local port that is associated with the net device
579 * @netdev: The associated net device 638 * @netdev: The associated net device
@@ -588,7 +647,6 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
588 u64 wwnn, wwpn; 647 u64 wwnn, wwpn;
589 struct fcoe_interface *fcoe; 648 struct fcoe_interface *fcoe;
590 struct fcoe_port *port; 649 struct fcoe_port *port;
591 int vid = 0;
592 650
593 /* Setup lport private data to point to fcoe softc */ 651 /* Setup lport private data to point to fcoe softc */
594 port = lport_priv(lport); 652 port = lport_priv(lport);
@@ -609,25 +667,8 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
609 return -EINVAL; 667 return -EINVAL;
610 668
611 /* offload features support */ 669 /* offload features support */
612 if (netdev->features & NETIF_F_SG) 670 fcoe_netdev_features_change(lport, netdev);
613 lport->sg_supp = 1;
614 671
615 if (netdev->features & NETIF_F_FCOE_CRC) {
616 lport->crc_offload = 1;
617 FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n");
618 }
619 if (netdev->features & NETIF_F_FSO) {
620 lport->seq_offload = 1;
621 lport->lso_max = netdev->gso_max_size;
622 FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
623 lport->lso_max);
624 }
625 if (netdev->fcoe_ddp_xid) {
626 lport->lro_enabled = 1;
627 lport->lro_xid = netdev->fcoe_ddp_xid;
628 FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n",
629 lport->lro_xid);
630 }
631 skb_queue_head_init(&port->fcoe_pending_queue); 672 skb_queue_head_init(&port->fcoe_pending_queue);
632 port->fcoe_pending_queue_active = 0; 673 port->fcoe_pending_queue_active = 0;
633 setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lport); 674 setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lport);
@@ -635,20 +676,12 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
635 fcoe_link_speed_update(lport); 676 fcoe_link_speed_update(lport);
636 677
637 if (!lport->vport) { 678 if (!lport->vport) {
638 /*
639 * Use NAA 1&2 (FC-FS Rev. 2.0, Sec. 15) to generate WWNN/WWPN:
640 * For WWNN, we use NAA 1 w/ bit 27-16 of word 0 as 0.
641 * For WWPN, we use NAA 2 w/ bit 27-16 of word 0 from VLAN ID
642 */
643 if (netdev->priv_flags & IFF_802_1Q_VLAN)
644 vid = vlan_dev_vlan_id(netdev);
645
646 if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN)) 679 if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
647 wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0); 680 wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0);
648 fc_set_wwnn(lport, wwnn); 681 fc_set_wwnn(lport, wwnn);
649 if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN)) 682 if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
650 wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 683 wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr,
651 2, vid); 684 2, 0);
652 fc_set_wwpn(lport, wwpn); 685 fc_set_wwpn(lport, wwpn);
653 } 686 }
654 687
@@ -967,7 +1000,7 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
967 } 1000 }
968 1001
969 /* Initialize the library */ 1002 /* Initialize the library */
970 rc = fcoe_libfc_config(lport, &fcoe_libfc_fcn_templ); 1003 rc = fcoe_libfc_config(lport, &fcoe->ctlr, &fcoe_libfc_fcn_templ, 1);
971 if (rc) { 1004 if (rc) {
972 FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the " 1005 FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
973 "interface\n"); 1006 "interface\n");
@@ -1210,6 +1243,8 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1210 struct fcoe_interface *fcoe; 1243 struct fcoe_interface *fcoe;
1211 struct fc_frame_header *fh; 1244 struct fc_frame_header *fh;
1212 struct fcoe_percpu_s *fps; 1245 struct fcoe_percpu_s *fps;
1246 struct fcoe_port *port;
1247 struct ethhdr *eh;
1213 unsigned int cpu; 1248 unsigned int cpu;
1214 1249
1215 fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type); 1250 fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type);
@@ -1227,9 +1262,21 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1227 skb_tail_pointer(skb), skb_end_pointer(skb), 1262 skb_tail_pointer(skb), skb_end_pointer(skb),
1228 skb->csum, skb->dev ? skb->dev->name : "<NULL>"); 1263 skb->csum, skb->dev ? skb->dev->name : "<NULL>");
1229 1264
1230 /* check for FCOE packet type */ 1265 /* check for mac addresses */
1231 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { 1266 eh = eth_hdr(skb);
1232 FCOE_NETDEV_DBG(netdev, "Wrong FC type frame"); 1267 port = lport_priv(lport);
1268 if (compare_ether_addr(eh->h_dest, port->data_src_addr) &&
1269 compare_ether_addr(eh->h_dest, fcoe->ctlr.ctl_src_addr) &&
1270 compare_ether_addr(eh->h_dest, (u8[6])FC_FCOE_FLOGI_MAC)) {
1271 FCOE_NETDEV_DBG(netdev, "wrong destination mac address:%pM\n",
1272 eh->h_dest);
1273 goto err;
1274 }
1275
1276 if (is_fip_mode(&fcoe->ctlr) &&
1277 compare_ether_addr(eh->h_source, fcoe->ctlr.dest_addr)) {
1278 FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n",
1279 eh->h_source);
1233 goto err; 1280 goto err;
1234 } 1281 }
1235 1282
@@ -1512,11 +1559,9 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1512 /* fill up mac and fcoe headers */ 1559 /* fill up mac and fcoe headers */
1513 eh = eth_hdr(skb); 1560 eh = eth_hdr(skb);
1514 eh->h_proto = htons(ETH_P_FCOE); 1561 eh->h_proto = htons(ETH_P_FCOE);
1562 memcpy(eh->h_dest, fcoe->ctlr.dest_addr, ETH_ALEN);
1515 if (fcoe->ctlr.map_dest) 1563 if (fcoe->ctlr.map_dest)
1516 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); 1564 memcpy(eh->h_dest + 3, fh->fh_d_id, 3);
1517 else
1518 /* insert GW address */
1519 memcpy(eh->h_dest, fcoe->ctlr.dest_addr, ETH_ALEN);
1520 1565
1521 if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN)) 1566 if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN))
1522 memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN); 1567 memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN);
@@ -1834,6 +1879,9 @@ static int fcoe_device_notification(struct notifier_block *notifier,
1834 schedule_work(&port->destroy_work); 1879 schedule_work(&port->destroy_work);
1835 goto out; 1880 goto out;
1836 break; 1881 break;
1882 case NETDEV_FEAT_CHANGE:
1883 fcoe_netdev_features_change(lport, netdev);
1884 break;
1837 default: 1885 default:
1838 FCOE_NETDEV_DBG(netdev, "Unknown event %ld " 1886 FCOE_NETDEV_DBG(netdev, "Unknown event %ld "
1839 "from netdev netlink\n", event); 1887 "from netdev netlink\n", event);
@@ -1918,8 +1966,8 @@ static int fcoe_disable(const char *buffer, struct kernel_param *kp)
1918 rtnl_unlock(); 1966 rtnl_unlock();
1919 1967
1920 if (fcoe) { 1968 if (fcoe) {
1921 fc_fabric_logoff(fcoe->ctlr.lp);
1922 fcoe_ctlr_link_down(&fcoe->ctlr); 1969 fcoe_ctlr_link_down(&fcoe->ctlr);
1970 fcoe_clean_pending_queue(fcoe->ctlr.lp);
1923 } else 1971 } else
1924 rc = -ENODEV; 1972 rc = -ENODEV;
1925 1973
@@ -1972,12 +2020,10 @@ static int fcoe_enable(const char *buffer, struct kernel_param *kp)
1972 fcoe = fcoe_hostlist_lookup_port(netdev); 2020 fcoe = fcoe_hostlist_lookup_port(netdev);
1973 rtnl_unlock(); 2021 rtnl_unlock();
1974 2022
1975 if (fcoe) { 2023 if (!fcoe)
1976 if (!fcoe_link_ok(fcoe->ctlr.lp))
1977 fcoe_ctlr_link_up(&fcoe->ctlr);
1978 rc = fc_fabric_login(fcoe->ctlr.lp);
1979 } else
1980 rc = -ENODEV; 2024 rc = -ENODEV;
2025 else if (!fcoe_link_ok(fcoe->ctlr.lp))
2026 fcoe_ctlr_link_up(&fcoe->ctlr);
1981 2027
1982 dev_put(netdev); 2028 dev_put(netdev);
1983out_nodev: 2029out_nodev:
@@ -2031,8 +2077,8 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
2031 rc = -ENODEV; 2077 rc = -ENODEV;
2032 goto out_putdev; 2078 goto out_putdev;
2033 } 2079 }
2034 list_del(&fcoe->list);
2035 fcoe_interface_cleanup(fcoe); 2080 fcoe_interface_cleanup(fcoe);
2081 list_del(&fcoe->list);
2036 /* RTNL mutex is dropped by fcoe_if_destroy */ 2082 /* RTNL mutex is dropped by fcoe_if_destroy */
2037 fcoe_if_destroy(fcoe->ctlr.lp); 2083 fcoe_if_destroy(fcoe->ctlr.lp);
2038 2084
@@ -2070,6 +2116,7 @@ static void fcoe_destroy_work(struct work_struct *work)
2070 */ 2116 */
2071static int fcoe_create(const char *buffer, struct kernel_param *kp) 2117static int fcoe_create(const char *buffer, struct kernel_param *kp)
2072{ 2118{
2119 enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
2073 int rc; 2120 int rc;
2074 struct fcoe_interface *fcoe; 2121 struct fcoe_interface *fcoe;
2075 struct fc_lport *lport; 2122 struct fc_lport *lport;
@@ -2111,7 +2158,7 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
2111 goto out_putdev; 2158 goto out_putdev;
2112 } 2159 }
2113 2160
2114 fcoe = fcoe_interface_create(netdev); 2161 fcoe = fcoe_interface_create(netdev, fip_mode);
2115 if (!fcoe) { 2162 if (!fcoe) {
2116 rc = -ENOMEM; 2163 rc = -ENOMEM;
2117 goto out_putdev; 2164 goto out_putdev;
@@ -2521,6 +2568,8 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did,
2521 switch (op) { 2568 switch (op) {
2522 case ELS_FLOGI: 2569 case ELS_FLOGI:
2523 case ELS_FDISC: 2570 case ELS_FDISC:
2571 if (lport->point_to_multipoint)
2572 break;
2524 return fc_elsct_send(lport, did, fp, op, fcoe_flogi_resp, 2573 return fc_elsct_send(lport, did, fp, op, fcoe_flogi_resp,
2525 fip, timeout); 2574 fip, timeout);
2526 case ELS_LOGO: 2575 case ELS_LOGO:
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index 50aaa4bcfc50..aa503d83092a 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -39,6 +39,7 @@
39#include <scsi/fc/fc_fip.h> 39#include <scsi/fc/fc_fip.h>
40#include <scsi/fc/fc_encaps.h> 40#include <scsi/fc/fc_encaps.h>
41#include <scsi/fc/fc_fcoe.h> 41#include <scsi/fc/fc_fcoe.h>
42#include <scsi/fc/fc_fcp.h>
42 43
43#include <scsi/libfc.h> 44#include <scsi/libfc.h>
44#include <scsi/libfcoe.h> 45#include <scsi/libfcoe.h>
@@ -54,7 +55,15 @@ static void fcoe_ctlr_timeout(unsigned long);
54static void fcoe_ctlr_timer_work(struct work_struct *); 55static void fcoe_ctlr_timer_work(struct work_struct *);
55static void fcoe_ctlr_recv_work(struct work_struct *); 56static void fcoe_ctlr_recv_work(struct work_struct *);
56 57
58static void fcoe_ctlr_vn_start(struct fcoe_ctlr *);
59static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *, struct sk_buff *);
60static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *);
61static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *, u32, u8 *);
62
57static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; 63static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
64static u8 fcoe_all_enode[ETH_ALEN] = FIP_ALL_ENODE_MACS;
65static u8 fcoe_all_vn2vn[ETH_ALEN] = FIP_ALL_VN2VN_MACS;
66static u8 fcoe_all_p2p[ETH_ALEN] = FIP_ALL_P2P_MACS;
58 67
59unsigned int libfcoe_debug_logging; 68unsigned int libfcoe_debug_logging;
60module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR); 69module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR);
@@ -80,6 +89,45 @@ do { \
80 printk(KERN_INFO "host%d: fip: " fmt, \ 89 printk(KERN_INFO "host%d: fip: " fmt, \
81 (fip)->lp->host->host_no, ##args);) 90 (fip)->lp->host->host_no, ##args);)
82 91
92static const char *fcoe_ctlr_states[] = {
93 [FIP_ST_DISABLED] = "DISABLED",
94 [FIP_ST_LINK_WAIT] = "LINK_WAIT",
95 [FIP_ST_AUTO] = "AUTO",
96 [FIP_ST_NON_FIP] = "NON_FIP",
97 [FIP_ST_ENABLED] = "ENABLED",
98 [FIP_ST_VNMP_START] = "VNMP_START",
99 [FIP_ST_VNMP_PROBE1] = "VNMP_PROBE1",
100 [FIP_ST_VNMP_PROBE2] = "VNMP_PROBE2",
101 [FIP_ST_VNMP_CLAIM] = "VNMP_CLAIM",
102 [FIP_ST_VNMP_UP] = "VNMP_UP",
103};
104
105static const char *fcoe_ctlr_state(enum fip_state state)
106{
107 const char *cp = "unknown";
108
109 if (state < ARRAY_SIZE(fcoe_ctlr_states))
110 cp = fcoe_ctlr_states[state];
111 if (!cp)
112 cp = "unknown";
113 return cp;
114}
115
116/**
117 * fcoe_ctlr_set_state() - Set and do debug printing for the new FIP state.
118 * @fip: The FCoE controller
119 * @state: The new state
120 */
121static void fcoe_ctlr_set_state(struct fcoe_ctlr *fip, enum fip_state state)
122{
123 if (state == fip->state)
124 return;
125 if (fip->lp)
126 LIBFCOE_FIP_DBG(fip, "state %s -> %s\n",
127 fcoe_ctlr_state(fip->state), fcoe_ctlr_state(state));
128 fip->state = state;
129}
130
83/** 131/**
84 * fcoe_ctlr_mtu_valid() - Check if a FCF's MTU is valid 132 * fcoe_ctlr_mtu_valid() - Check if a FCF's MTU is valid
85 * @fcf: The FCF to check 133 * @fcf: The FCF to check
@@ -105,15 +153,29 @@ static inline int fcoe_ctlr_fcf_usable(struct fcoe_fcf *fcf)
105} 153}
106 154
107/** 155/**
156 * fcoe_ctlr_map_dest() - Set flag and OUI for mapping destination addresses
157 * @fip: The FCoE controller
158 */
159static void fcoe_ctlr_map_dest(struct fcoe_ctlr *fip)
160{
161 if (fip->mode == FIP_MODE_VN2VN)
162 hton24(fip->dest_addr, FIP_VN_FC_MAP);
163 else
164 hton24(fip->dest_addr, FIP_DEF_FC_MAP);
165 hton24(fip->dest_addr + 3, 0);
166 fip->map_dest = 1;
167}
168
169/**
108 * fcoe_ctlr_init() - Initialize the FCoE Controller instance 170 * fcoe_ctlr_init() - Initialize the FCoE Controller instance
109 * @fip: The FCoE controller to initialize 171 * @fip: The FCoE controller to initialize
110 */ 172 */
111void fcoe_ctlr_init(struct fcoe_ctlr *fip) 173void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)
112{ 174{
113 fip->state = FIP_ST_LINK_WAIT; 175 fcoe_ctlr_set_state(fip, FIP_ST_LINK_WAIT);
114 fip->mode = FIP_ST_AUTO; 176 fip->mode = mode;
115 INIT_LIST_HEAD(&fip->fcfs); 177 INIT_LIST_HEAD(&fip->fcfs);
116 spin_lock_init(&fip->lock); 178 mutex_init(&fip->ctlr_mutex);
117 fip->flogi_oxid = FC_XID_UNKNOWN; 179 fip->flogi_oxid = FC_XID_UNKNOWN;
118 setup_timer(&fip->timer, fcoe_ctlr_timeout, (unsigned long)fip); 180 setup_timer(&fip->timer, fcoe_ctlr_timeout, (unsigned long)fip);
119 INIT_WORK(&fip->timer_work, fcoe_ctlr_timer_work); 181 INIT_WORK(&fip->timer_work, fcoe_ctlr_timer_work);
@@ -159,10 +221,10 @@ void fcoe_ctlr_destroy(struct fcoe_ctlr *fip)
159 cancel_work_sync(&fip->recv_work); 221 cancel_work_sync(&fip->recv_work);
160 skb_queue_purge(&fip->fip_recv_list); 222 skb_queue_purge(&fip->fip_recv_list);
161 223
162 spin_lock_bh(&fip->lock); 224 mutex_lock(&fip->ctlr_mutex);
163 fip->state = FIP_ST_DISABLED; 225 fcoe_ctlr_set_state(fip, FIP_ST_DISABLED);
164 fcoe_ctlr_reset_fcfs(fip); 226 fcoe_ctlr_reset_fcfs(fip);
165 spin_unlock_bh(&fip->lock); 227 mutex_unlock(&fip->ctlr_mutex);
166 del_timer_sync(&fip->timer); 228 del_timer_sync(&fip->timer);
167 cancel_work_sync(&fip->timer_work); 229 cancel_work_sync(&fip->timer_work);
168} 230}
@@ -255,19 +317,33 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
255 */ 317 */
256void fcoe_ctlr_link_up(struct fcoe_ctlr *fip) 318void fcoe_ctlr_link_up(struct fcoe_ctlr *fip)
257{ 319{
258 spin_lock_bh(&fip->lock); 320 mutex_lock(&fip->ctlr_mutex);
259 if (fip->state == FIP_ST_NON_FIP || fip->state == FIP_ST_AUTO) { 321 if (fip->state == FIP_ST_NON_FIP || fip->state == FIP_ST_AUTO) {
260 spin_unlock_bh(&fip->lock); 322 mutex_unlock(&fip->ctlr_mutex);
261 fc_linkup(fip->lp); 323 fc_linkup(fip->lp);
262 } else if (fip->state == FIP_ST_LINK_WAIT) { 324 } else if (fip->state == FIP_ST_LINK_WAIT) {
263 fip->state = fip->mode; 325 fcoe_ctlr_set_state(fip, fip->mode);
264 spin_unlock_bh(&fip->lock); 326 switch (fip->mode) {
265 if (fip->state == FIP_ST_AUTO) 327 default:
328 LIBFCOE_FIP_DBG(fip, "invalid mode %d\n", fip->mode);
329 /* fall-through */
330 case FIP_MODE_AUTO:
266 LIBFCOE_FIP_DBG(fip, "%s", "setting AUTO mode.\n"); 331 LIBFCOE_FIP_DBG(fip, "%s", "setting AUTO mode.\n");
267 fc_linkup(fip->lp); 332 /* fall-through */
268 fcoe_ctlr_solicit(fip, NULL); 333 case FIP_MODE_FABRIC:
334 case FIP_MODE_NON_FIP:
335 mutex_unlock(&fip->ctlr_mutex);
336 fc_linkup(fip->lp);
337 fcoe_ctlr_solicit(fip, NULL);
338 break;
339 case FIP_MODE_VN2VN:
340 fcoe_ctlr_vn_start(fip);
341 mutex_unlock(&fip->ctlr_mutex);
342 fc_linkup(fip->lp);
343 break;
344 }
269 } else 345 } else
270 spin_unlock_bh(&fip->lock); 346 mutex_unlock(&fip->ctlr_mutex);
271} 347}
272EXPORT_SYMBOL(fcoe_ctlr_link_up); 348EXPORT_SYMBOL(fcoe_ctlr_link_up);
273 349
@@ -283,7 +359,7 @@ static void fcoe_ctlr_reset(struct fcoe_ctlr *fip)
283 fip->port_ka_time = 0; 359 fip->port_ka_time = 0;
284 fip->sol_time = 0; 360 fip->sol_time = 0;
285 fip->flogi_oxid = FC_XID_UNKNOWN; 361 fip->flogi_oxid = FC_XID_UNKNOWN;
286 fip->map_dest = 0; 362 fcoe_ctlr_map_dest(fip);
287} 363}
288 364
289/** 365/**
@@ -300,11 +376,11 @@ int fcoe_ctlr_link_down(struct fcoe_ctlr *fip)
300 int link_dropped; 376 int link_dropped;
301 377
302 LIBFCOE_FIP_DBG(fip, "link down.\n"); 378 LIBFCOE_FIP_DBG(fip, "link down.\n");
303 spin_lock_bh(&fip->lock); 379 mutex_lock(&fip->ctlr_mutex);
304 fcoe_ctlr_reset(fip); 380 fcoe_ctlr_reset(fip);
305 link_dropped = fip->state != FIP_ST_LINK_WAIT; 381 link_dropped = fip->state != FIP_ST_LINK_WAIT;
306 fip->state = FIP_ST_LINK_WAIT; 382 fcoe_ctlr_set_state(fip, FIP_ST_LINK_WAIT);
307 spin_unlock_bh(&fip->lock); 383 mutex_unlock(&fip->ctlr_mutex);
308 384
309 if (link_dropped) 385 if (link_dropped)
310 fc_linkdown(fip->lp); 386 fc_linkdown(fip->lp);
@@ -343,7 +419,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
343 419
344 fcf = fip->sel_fcf; 420 fcf = fip->sel_fcf;
345 lp = fip->lp; 421 lp = fip->lp;
346 if (!fcf || !lp->port_id) 422 if (!fcf || (ports && !lp->port_id))
347 return; 423 return;
348 424
349 len = sizeof(*kal) + ports * sizeof(*vn); 425 len = sizeof(*kal) + ports * sizeof(*vn);
@@ -389,6 +465,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
389 * @fip: The FCoE controller for the ELS frame 465 * @fip: The FCoE controller for the ELS frame
390 * @dtype: The FIP descriptor type for the frame 466 * @dtype: The FIP descriptor type for the frame
391 * @skb: The FCoE ELS frame including FC header but no FCoE headers 467 * @skb: The FCoE ELS frame including FC header but no FCoE headers
468 * @d_id: The destination port ID.
392 * 469 *
393 * Returns non-zero error code on failure. 470 * Returns non-zero error code on failure.
394 * 471 *
@@ -399,58 +476,75 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
399 * Ethernet header. The tailroom is for the FIP MAC descriptor. 476 * Ethernet header. The tailroom is for the FIP MAC descriptor.
400 */ 477 */
401static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport, 478static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
402 u8 dtype, struct sk_buff *skb) 479 u8 dtype, struct sk_buff *skb, u32 d_id)
403{ 480{
404 struct fip_encaps_head { 481 struct fip_encaps_head {
405 struct ethhdr eth; 482 struct ethhdr eth;
406 struct fip_header fip; 483 struct fip_header fip;
407 struct fip_encaps encaps; 484 struct fip_encaps encaps;
408 } __attribute__((packed)) *cap; 485 } __attribute__((packed)) *cap;
486 struct fc_frame_header *fh;
409 struct fip_mac_desc *mac; 487 struct fip_mac_desc *mac;
410 struct fcoe_fcf *fcf; 488 struct fcoe_fcf *fcf;
411 size_t dlen; 489 size_t dlen;
412 u16 fip_flags; 490 u16 fip_flags;
491 u8 op;
413 492
414 fcf = fip->sel_fcf; 493 fh = (struct fc_frame_header *)skb->data;
415 if (!fcf) 494 op = *(u8 *)(fh + 1);
416 return -ENODEV;
417
418 /* set flags according to both FCF and lport's capability on SPMA */
419 fip_flags = fcf->flags;
420 fip_flags &= fip->spma ? FIP_FL_SPMA | FIP_FL_FPMA : FIP_FL_FPMA;
421 if (!fip_flags)
422 return -ENODEV;
423
424 dlen = sizeof(struct fip_encaps) + skb->len; /* len before push */ 495 dlen = sizeof(struct fip_encaps) + skb->len; /* len before push */
425 cap = (struct fip_encaps_head *)skb_push(skb, sizeof(*cap)); 496 cap = (struct fip_encaps_head *)skb_push(skb, sizeof(*cap));
426
427 memset(cap, 0, sizeof(*cap)); 497 memset(cap, 0, sizeof(*cap));
428 memcpy(cap->eth.h_dest, fcf->fcf_mac, ETH_ALEN); 498
499 if (lport->point_to_multipoint) {
500 if (fcoe_ctlr_vn_lookup(fip, d_id, cap->eth.h_dest))
501 return -ENODEV;
502 fip_flags = 0;
503 } else {
504 fcf = fip->sel_fcf;
505 if (!fcf)
506 return -ENODEV;
507 fip_flags = fcf->flags;
508 fip_flags &= fip->spma ? FIP_FL_SPMA | FIP_FL_FPMA :
509 FIP_FL_FPMA;
510 if (!fip_flags)
511 return -ENODEV;
512 memcpy(cap->eth.h_dest, fcf->fcf_mac, ETH_ALEN);
513 }
429 memcpy(cap->eth.h_source, fip->ctl_src_addr, ETH_ALEN); 514 memcpy(cap->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
430 cap->eth.h_proto = htons(ETH_P_FIP); 515 cap->eth.h_proto = htons(ETH_P_FIP);
431 516
432 cap->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); 517 cap->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
433 cap->fip.fip_op = htons(FIP_OP_LS); 518 cap->fip.fip_op = htons(FIP_OP_LS);
434 cap->fip.fip_subcode = FIP_SC_REQ; 519 if (op == ELS_LS_ACC || op == ELS_LS_RJT)
435 cap->fip.fip_dl_len = htons((dlen + sizeof(*mac)) / FIP_BPW); 520 cap->fip.fip_subcode = FIP_SC_REP;
521 else
522 cap->fip.fip_subcode = FIP_SC_REQ;
436 cap->fip.fip_flags = htons(fip_flags); 523 cap->fip.fip_flags = htons(fip_flags);
437 524
438 cap->encaps.fd_desc.fip_dtype = dtype; 525 cap->encaps.fd_desc.fip_dtype = dtype;
439 cap->encaps.fd_desc.fip_dlen = dlen / FIP_BPW; 526 cap->encaps.fd_desc.fip_dlen = dlen / FIP_BPW;
440 527
441 mac = (struct fip_mac_desc *)skb_put(skb, sizeof(*mac)); 528 if (op != ELS_LS_RJT) {
442 memset(mac, 0, sizeof(*mac)); 529 dlen += sizeof(*mac);
443 mac->fd_desc.fip_dtype = FIP_DT_MAC; 530 mac = (struct fip_mac_desc *)skb_put(skb, sizeof(*mac));
444 mac->fd_desc.fip_dlen = sizeof(*mac) / FIP_BPW; 531 memset(mac, 0, sizeof(*mac));
445 if (dtype != FIP_DT_FLOGI && dtype != FIP_DT_FDISC) { 532 mac->fd_desc.fip_dtype = FIP_DT_MAC;
446 memcpy(mac->fd_mac, fip->get_src_addr(lport), ETH_ALEN); 533 mac->fd_desc.fip_dlen = sizeof(*mac) / FIP_BPW;
447 } else if (fip_flags & FIP_FL_SPMA) { 534 if (dtype != FIP_DT_FLOGI && dtype != FIP_DT_FDISC) {
448 LIBFCOE_FIP_DBG(fip, "FLOGI/FDISC sent with SPMA\n"); 535 memcpy(mac->fd_mac, fip->get_src_addr(lport), ETH_ALEN);
449 memcpy(mac->fd_mac, fip->ctl_src_addr, ETH_ALEN); 536 } else if (fip->mode == FIP_MODE_VN2VN) {
450 } else { 537 hton24(mac->fd_mac, FIP_VN_FC_MAP);
451 LIBFCOE_FIP_DBG(fip, "FLOGI/FDISC sent with FPMA\n"); 538 hton24(mac->fd_mac + 3, fip->port_id);
452 /* FPMA only FLOGI must leave the MAC desc set to all 0s */ 539 } else if (fip_flags & FIP_FL_SPMA) {
540 LIBFCOE_FIP_DBG(fip, "FLOGI/FDISC sent with SPMA\n");
541 memcpy(mac->fd_mac, fip->ctl_src_addr, ETH_ALEN);
542 } else {
543 LIBFCOE_FIP_DBG(fip, "FLOGI/FDISC sent with FPMA\n");
544 /* FPMA only FLOGI. Must leave the MAC desc zeroed. */
545 }
453 } 546 }
547 cap->fip.fip_dl_len = htons(dlen / FIP_BPW);
454 548
455 skb->protocol = htons(ETH_P_FIP); 549 skb->protocol = htons(ETH_P_FIP);
456 skb_reset_mac_header(skb); 550 skb_reset_mac_header(skb);
@@ -469,19 +563,22 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
469 * 563 *
470 * The caller must check that the length is a multiple of 4. 564 * The caller must check that the length is a multiple of 4.
471 * The SKB must have enough headroom (28 bytes) and tailroom (8 bytes). 565 * The SKB must have enough headroom (28 bytes) and tailroom (8 bytes).
566 * The the skb must also be an fc_frame.
472 */ 567 */
473int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, 568int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
474 struct sk_buff *skb) 569 struct sk_buff *skb)
475{ 570{
571 struct fc_frame *fp;
476 struct fc_frame_header *fh; 572 struct fc_frame_header *fh;
477 u16 old_xid; 573 u16 old_xid;
478 u8 op; 574 u8 op;
479 u8 mac[ETH_ALEN]; 575 u8 mac[ETH_ALEN];
480 576
577 fp = container_of(skb, struct fc_frame, skb);
481 fh = (struct fc_frame_header *)skb->data; 578 fh = (struct fc_frame_header *)skb->data;
482 op = *(u8 *)(fh + 1); 579 op = *(u8 *)(fh + 1);
483 580
484 if (op == ELS_FLOGI) { 581 if (op == ELS_FLOGI && fip->mode != FIP_MODE_VN2VN) {
485 old_xid = fip->flogi_oxid; 582 old_xid = fip->flogi_oxid;
486 fip->flogi_oxid = ntohs(fh->fh_ox_id); 583 fip->flogi_oxid = ntohs(fh->fh_ox_id);
487 if (fip->state == FIP_ST_AUTO) { 584 if (fip->state == FIP_ST_AUTO) {
@@ -490,18 +587,17 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
490 fip->flogi_count++; 587 fip->flogi_count++;
491 if (fip->flogi_count < 3) 588 if (fip->flogi_count < 3)
492 goto drop; 589 goto drop;
493 fip->map_dest = 1; 590 fcoe_ctlr_map_dest(fip);
494 return 0; 591 return 0;
495 } 592 }
496 if (fip->state == FIP_ST_NON_FIP) 593 if (fip->state == FIP_ST_NON_FIP)
497 fip->map_dest = 1; 594 fcoe_ctlr_map_dest(fip);
498 } 595 }
499 596
500 if (fip->state == FIP_ST_NON_FIP) 597 if (fip->state == FIP_ST_NON_FIP)
501 return 0; 598 return 0;
502 if (!fip->sel_fcf) 599 if (!fip->sel_fcf && fip->mode != FIP_MODE_VN2VN)
503 goto drop; 600 goto drop;
504
505 switch (op) { 601 switch (op) {
506 case ELS_FLOGI: 602 case ELS_FLOGI:
507 op = FIP_DT_FLOGI; 603 op = FIP_DT_FLOGI;
@@ -512,36 +608,49 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
512 op = FIP_DT_FDISC; 608 op = FIP_DT_FDISC;
513 break; 609 break;
514 case ELS_LOGO: 610 case ELS_LOGO:
515 if (fip->state != FIP_ST_ENABLED) 611 if (fip->mode == FIP_MODE_VN2VN) {
516 return 0; 612 if (fip->state != FIP_ST_VNMP_UP)
517 if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI) 613 return -EINVAL;
518 return 0; 614 if (ntoh24(fh->fh_d_id) == FC_FID_FLOGI)
615 return -EINVAL;
616 } else {
617 if (fip->state != FIP_ST_ENABLED)
618 return 0;
619 if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
620 return 0;
621 }
519 op = FIP_DT_LOGO; 622 op = FIP_DT_LOGO;
520 break; 623 break;
521 case ELS_LS_ACC: 624 case ELS_LS_ACC:
522 if (fip->flogi_oxid == FC_XID_UNKNOWN)
523 return 0;
524 if (!ntoh24(fh->fh_s_id))
525 return 0;
526 if (fip->state == FIP_ST_AUTO)
527 return 0;
528 /* 625 /*
529 * Here we must've gotten an SID by accepting an FLOGI 626 * If non-FIP, we may have gotten an SID by accepting an FLOGI
530 * from a point-to-point connection. Switch to using 627 * from a point-to-point connection. Switch to using
531 * the source mac based on the SID. The destination 628 * the source mac based on the SID. The destination
532 * MAC in this case would have been set by receving the 629 * MAC in this case would have been set by receving the
533 * FLOGI. 630 * FLOGI.
534 */ 631 */
535 fip->flogi_oxid = FC_XID_UNKNOWN; 632 if (fip->state == FIP_ST_NON_FIP) {
536 fc_fcoe_set_mac(mac, fh->fh_d_id); 633 if (fip->flogi_oxid == FC_XID_UNKNOWN)
537 fip->update_mac(lport, mac); 634 return 0;
635 fip->flogi_oxid = FC_XID_UNKNOWN;
636 fc_fcoe_set_mac(mac, fh->fh_d_id);
637 fip->update_mac(lport, mac);
638 }
639 /* fall through */
640 case ELS_LS_RJT:
641 op = fr_encaps(fp);
642 if (op)
643 break;
538 return 0; 644 return 0;
539 default: 645 default:
540 if (fip->state != FIP_ST_ENABLED) 646 if (fip->state != FIP_ST_ENABLED &&
647 fip->state != FIP_ST_VNMP_UP)
541 goto drop; 648 goto drop;
542 return 0; 649 return 0;
543 } 650 }
544 if (fcoe_ctlr_encaps(fip, lport, op, skb)) 651 LIBFCOE_FIP_DBG(fip, "els_send op %u d_id %x\n",
652 op, ntoh24(fh->fh_d_id));
653 if (fcoe_ctlr_encaps(fip, lport, op, skb, ntoh24(fh->fh_d_id)))
545 goto drop; 654 goto drop;
546 fip->send(fip, skb); 655 fip->send(fip, skb);
547 return -EINPROGRESS; 656 return -EINPROGRESS;
@@ -557,60 +666,66 @@ EXPORT_SYMBOL(fcoe_ctlr_els_send);
557 * 666 *
558 * Called with lock held and preemption disabled. 667 * Called with lock held and preemption disabled.
559 * 668 *
560 * An FCF is considered old if we have missed three advertisements. 669 * An FCF is considered old if we have missed two advertisements.
561 * That is, there have been no valid advertisement from it for three 670 * That is, there have been no valid advertisement from it for 2.5
562 * times its keep-alive period including fuzz. 671 * times its keep-alive period.
563 * 672 *
564 * In addition, determine the time when an FCF selection can occur. 673 * In addition, determine the time when an FCF selection can occur.
565 * 674 *
566 * Also, increment the MissDiscAdvCount when no advertisement is received 675 * Also, increment the MissDiscAdvCount when no advertisement is received
567 * for the corresponding FCF for 1.5 * FKA_ADV_PERIOD (FC-BB-5 LESB). 676 * for the corresponding FCF for 1.5 * FKA_ADV_PERIOD (FC-BB-5 LESB).
677 *
678 * Returns the time in jiffies for the next call.
568 */ 679 */
569static void fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) 680static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
570{ 681{
571 struct fcoe_fcf *fcf; 682 struct fcoe_fcf *fcf;
572 struct fcoe_fcf *next; 683 struct fcoe_fcf *next;
684 unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD);
685 unsigned long deadline;
573 unsigned long sel_time = 0; 686 unsigned long sel_time = 0;
574 unsigned long mda_time = 0;
575 struct fcoe_dev_stats *stats; 687 struct fcoe_dev_stats *stats;
576 688
689 stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu());
690
577 list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { 691 list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
578 mda_time = fcf->fka_period + (fcf->fka_period >> 1); 692 deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
579 if ((fip->sel_fcf == fcf) && 693 if (fip->sel_fcf == fcf) {
580 (time_after(jiffies, fcf->time + mda_time))) { 694 if (time_after(jiffies, deadline)) {
581 mod_timer(&fip->timer, jiffies + mda_time); 695 stats->MissDiscAdvCount++;
582 stats = per_cpu_ptr(fip->lp->dev_stats, 696 printk(KERN_INFO "libfcoe: host%d: "
583 smp_processor_id()); 697 "Missing Discovery Advertisement "
584 stats->MissDiscAdvCount++; 698 "for fab %16.16llx count %lld\n",
585 printk(KERN_INFO "libfcoe: host%d: Missing Discovery " 699 fip->lp->host->host_no, fcf->fabric_name,
586 "Advertisement for fab %16.16llx count %lld\n", 700 stats->MissDiscAdvCount);
587 fip->lp->host->host_no, fcf->fabric_name, 701 } else if (time_after(next_timer, deadline))
588 stats->MissDiscAdvCount); 702 next_timer = deadline;
589 } 703 }
590 if (time_after(jiffies, fcf->time + fcf->fka_period * 3 + 704
591 msecs_to_jiffies(FIP_FCF_FUZZ * 3))) { 705 deadline += fcf->fka_period;
706 if (time_after_eq(jiffies, deadline)) {
592 if (fip->sel_fcf == fcf) 707 if (fip->sel_fcf == fcf)
593 fip->sel_fcf = NULL; 708 fip->sel_fcf = NULL;
594 list_del(&fcf->list); 709 list_del(&fcf->list);
595 WARN_ON(!fip->fcf_count); 710 WARN_ON(!fip->fcf_count);
596 fip->fcf_count--; 711 fip->fcf_count--;
597 kfree(fcf); 712 kfree(fcf);
598 stats = per_cpu_ptr(fip->lp->dev_stats,
599 smp_processor_id());
600 stats->VLinkFailureCount++; 713 stats->VLinkFailureCount++;
601 } else if (fcoe_ctlr_mtu_valid(fcf) && 714 } else {
602 (!sel_time || time_before(sel_time, fcf->time))) { 715 if (time_after(next_timer, deadline))
603 sel_time = fcf->time; 716 next_timer = deadline;
717 if (fcoe_ctlr_mtu_valid(fcf) &&
718 (!sel_time || time_before(sel_time, fcf->time)))
719 sel_time = fcf->time;
604 } 720 }
605 } 721 }
606 if (sel_time) { 722 put_cpu();
723 if (sel_time && !fip->sel_fcf && !fip->sel_time) {
607 sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY); 724 sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY);
608 fip->sel_time = sel_time; 725 fip->sel_time = sel_time;
609 if (time_before(sel_time, fip->timer.expires))
610 mod_timer(&fip->timer, sel_time);
611 } else {
612 fip->sel_time = 0;
613 } 726 }
727
728 return next_timer;
614} 729}
615 730
616/** 731/**
@@ -633,6 +748,7 @@ static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip,
633 unsigned long t; 748 unsigned long t;
634 size_t rlen; 749 size_t rlen;
635 size_t dlen; 750 size_t dlen;
751 u32 desc_mask;
636 752
637 memset(fcf, 0, sizeof(*fcf)); 753 memset(fcf, 0, sizeof(*fcf));
638 fcf->fka_period = msecs_to_jiffies(FCOE_CTLR_DEF_FKA); 754 fcf->fka_period = msecs_to_jiffies(FCOE_CTLR_DEF_FKA);
@@ -640,6 +756,12 @@ static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip,
640 fiph = (struct fip_header *)skb->data; 756 fiph = (struct fip_header *)skb->data;
641 fcf->flags = ntohs(fiph->fip_flags); 757 fcf->flags = ntohs(fiph->fip_flags);
642 758
759 /*
760 * mask of required descriptors. validating each one clears its bit.
761 */
762 desc_mask = BIT(FIP_DT_PRI) | BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) |
763 BIT(FIP_DT_FAB) | BIT(FIP_DT_FKA);
764
643 rlen = ntohs(fiph->fip_dl_len) * 4; 765 rlen = ntohs(fiph->fip_dl_len) * 4;
644 if (rlen + sizeof(*fiph) > skb->len) 766 if (rlen + sizeof(*fiph) > skb->len)
645 return -EINVAL; 767 return -EINVAL;
@@ -649,11 +771,19 @@ static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip,
649 dlen = desc->fip_dlen * FIP_BPW; 771 dlen = desc->fip_dlen * FIP_BPW;
650 if (dlen < sizeof(*desc) || dlen > rlen) 772 if (dlen < sizeof(*desc) || dlen > rlen)
651 return -EINVAL; 773 return -EINVAL;
774 /* Drop Adv if there are duplicate critical descriptors */
775 if ((desc->fip_dtype < 32) &&
776 !(desc_mask & 1U << desc->fip_dtype)) {
777 LIBFCOE_FIP_DBG(fip, "Duplicate Critical "
778 "Descriptors in FIP adv\n");
779 return -EINVAL;
780 }
652 switch (desc->fip_dtype) { 781 switch (desc->fip_dtype) {
653 case FIP_DT_PRI: 782 case FIP_DT_PRI:
654 if (dlen != sizeof(struct fip_pri_desc)) 783 if (dlen != sizeof(struct fip_pri_desc))
655 goto len_err; 784 goto len_err;
656 fcf->pri = ((struct fip_pri_desc *)desc)->fd_pri; 785 fcf->pri = ((struct fip_pri_desc *)desc)->fd_pri;
786 desc_mask &= ~BIT(FIP_DT_PRI);
657 break; 787 break;
658 case FIP_DT_MAC: 788 case FIP_DT_MAC:
659 if (dlen != sizeof(struct fip_mac_desc)) 789 if (dlen != sizeof(struct fip_mac_desc))
@@ -662,16 +792,19 @@ static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip,
662 ((struct fip_mac_desc *)desc)->fd_mac, 792 ((struct fip_mac_desc *)desc)->fd_mac,
663 ETH_ALEN); 793 ETH_ALEN);
664 if (!is_valid_ether_addr(fcf->fcf_mac)) { 794 if (!is_valid_ether_addr(fcf->fcf_mac)) {
665 LIBFCOE_FIP_DBG(fip, "Invalid MAC address " 795 LIBFCOE_FIP_DBG(fip,
666 "in FIP adv\n"); 796 "Invalid MAC addr %pM in FIP adv\n",
797 fcf->fcf_mac);
667 return -EINVAL; 798 return -EINVAL;
668 } 799 }
800 desc_mask &= ~BIT(FIP_DT_MAC);
669 break; 801 break;
670 case FIP_DT_NAME: 802 case FIP_DT_NAME:
671 if (dlen != sizeof(struct fip_wwn_desc)) 803 if (dlen != sizeof(struct fip_wwn_desc))
672 goto len_err; 804 goto len_err;
673 wwn = (struct fip_wwn_desc *)desc; 805 wwn = (struct fip_wwn_desc *)desc;
674 fcf->switch_name = get_unaligned_be64(&wwn->fd_wwn); 806 fcf->switch_name = get_unaligned_be64(&wwn->fd_wwn);
807 desc_mask &= ~BIT(FIP_DT_NAME);
675 break; 808 break;
676 case FIP_DT_FAB: 809 case FIP_DT_FAB:
677 if (dlen != sizeof(struct fip_fab_desc)) 810 if (dlen != sizeof(struct fip_fab_desc))
@@ -680,6 +813,7 @@ static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip,
680 fcf->fabric_name = get_unaligned_be64(&fab->fd_wwn); 813 fcf->fabric_name = get_unaligned_be64(&fab->fd_wwn);
681 fcf->vfid = ntohs(fab->fd_vfid); 814 fcf->vfid = ntohs(fab->fd_vfid);
682 fcf->fc_map = ntoh24(fab->fd_map); 815 fcf->fc_map = ntoh24(fab->fd_map);
816 desc_mask &= ~BIT(FIP_DT_FAB);
683 break; 817 break;
684 case FIP_DT_FKA: 818 case FIP_DT_FKA:
685 if (dlen != sizeof(struct fip_fka_desc)) 819 if (dlen != sizeof(struct fip_fka_desc))
@@ -690,6 +824,7 @@ static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip,
690 t = ntohl(fka->fd_fka_period); 824 t = ntohl(fka->fd_fka_period);
691 if (t >= FCOE_CTLR_MIN_FKA) 825 if (t >= FCOE_CTLR_MIN_FKA)
692 fcf->fka_period = msecs_to_jiffies(t); 826 fcf->fka_period = msecs_to_jiffies(t);
827 desc_mask &= ~BIT(FIP_DT_FKA);
693 break; 828 break;
694 case FIP_DT_MAP_OUI: 829 case FIP_DT_MAP_OUI:
695 case FIP_DT_FCOE_SIZE: 830 case FIP_DT_FCOE_SIZE:
@@ -703,15 +838,20 @@ static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip,
703 /* standard says ignore unknown descriptors >= 128 */ 838 /* standard says ignore unknown descriptors >= 128 */
704 if (desc->fip_dtype < FIP_DT_VENDOR_BASE) 839 if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
705 return -EINVAL; 840 return -EINVAL;
706 continue; 841 break;
707 } 842 }
708 desc = (struct fip_desc *)((char *)desc + dlen); 843 desc = (struct fip_desc *)((char *)desc + dlen);
709 rlen -= dlen; 844 rlen -= dlen;
710 } 845 }
711 if (!fcf->fc_map || (fcf->fc_map & 0x10000)) 846 if (!fcf->fc_map || (fcf->fc_map & 0x10000))
712 return -EINVAL; 847 return -EINVAL;
713 if (!fcf->switch_name || !fcf->fabric_name) 848 if (!fcf->switch_name)
849 return -EINVAL;
850 if (desc_mask) {
851 LIBFCOE_FIP_DBG(fip, "adv missing descriptors mask %x\n",
852 desc_mask);
714 return -EINVAL; 853 return -EINVAL;
854 }
715 return 0; 855 return 0;
716 856
717len_err: 857len_err:
@@ -737,7 +877,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
737 if (fcoe_ctlr_parse_adv(fip, skb, &new)) 877 if (fcoe_ctlr_parse_adv(fip, skb, &new))
738 return; 878 return;
739 879
740 spin_lock_bh(&fip->lock); 880 mutex_lock(&fip->ctlr_mutex);
741 first = list_empty(&fip->fcfs); 881 first = list_empty(&fip->fcfs);
742 found = NULL; 882 found = NULL;
743 list_for_each_entry(fcf, &fip->fcfs, list) { 883 list_for_each_entry(fcf, &fip->fcfs, list) {
@@ -762,18 +902,21 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
762 list_add(&fcf->list, &fip->fcfs); 902 list_add(&fcf->list, &fip->fcfs);
763 } else { 903 } else {
764 /* 904 /*
765 * Flags in advertisements are ignored once the FCF is 905 * Update the FCF's keep-alive descriptor flags.
766 * selected. Flags in unsolicited advertisements are 906 * Other flag changes from new advertisements are
767 * ignored after a usable solicited advertisement 907 * ignored after a solicited advertisement is
768 * has been received. 908 * received and the FCF is selectable (usable).
769 */ 909 */
770 if (fcf == fip->sel_fcf) { 910 fcf->fd_flags = new.fd_flags;
911 if (!fcoe_ctlr_fcf_usable(fcf))
912 fcf->flags = new.flags;
913
914 if (fcf == fip->sel_fcf && !fcf->fd_flags) {
771 fip->ctlr_ka_time -= fcf->fka_period; 915 fip->ctlr_ka_time -= fcf->fka_period;
772 fip->ctlr_ka_time += new.fka_period; 916 fip->ctlr_ka_time += new.fka_period;
773 if (time_before(fip->ctlr_ka_time, fip->timer.expires)) 917 if (time_before(fip->ctlr_ka_time, fip->timer.expires))
774 mod_timer(&fip->timer, fip->ctlr_ka_time); 918 mod_timer(&fip->timer, fip->ctlr_ka_time);
775 } else if (!fcoe_ctlr_fcf_usable(fcf)) 919 }
776 fcf->flags = new.flags;
777 fcf->fka_period = new.fka_period; 920 fcf->fka_period = new.fka_period;
778 memcpy(fcf->fcf_mac, new.fcf_mac, ETH_ALEN); 921 memcpy(fcf->fcf_mac, new.fcf_mac, ETH_ALEN);
779 } 922 }
@@ -805,7 +948,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
805 * If this is the first validated FCF, note the time and 948 * If this is the first validated FCF, note the time and
806 * set a timer to trigger selection. 949 * set a timer to trigger selection.
807 */ 950 */
808 if (mtu_valid && !fip->sel_time && fcoe_ctlr_fcf_usable(fcf)) { 951 if (mtu_valid && !fip->sel_fcf && fcoe_ctlr_fcf_usable(fcf)) {
809 fip->sel_time = jiffies + 952 fip->sel_time = jiffies +
810 msecs_to_jiffies(FCOE_CTLR_START_DELAY); 953 msecs_to_jiffies(FCOE_CTLR_START_DELAY);
811 if (!timer_pending(&fip->timer) || 954 if (!timer_pending(&fip->timer) ||
@@ -813,7 +956,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
813 mod_timer(&fip->timer, fip->sel_time); 956 mod_timer(&fip->timer, fip->sel_time);
814 } 957 }
815out: 958out:
816 spin_unlock_bh(&fip->lock); 959 mutex_unlock(&fip->ctlr_mutex);
817} 960}
818 961
819/** 962/**
@@ -837,6 +980,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
837 size_t els_len = 0; 980 size_t els_len = 0;
838 size_t rlen; 981 size_t rlen;
839 size_t dlen; 982 size_t dlen;
983 u32 desc_mask = 0;
984 u32 desc_cnt = 0;
840 985
841 fiph = (struct fip_header *)skb->data; 986 fiph = (struct fip_header *)skb->data;
842 sub = fiph->fip_subcode; 987 sub = fiph->fip_subcode;
@@ -849,27 +994,42 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
849 994
850 desc = (struct fip_desc *)(fiph + 1); 995 desc = (struct fip_desc *)(fiph + 1);
851 while (rlen > 0) { 996 while (rlen > 0) {
997 desc_cnt++;
852 dlen = desc->fip_dlen * FIP_BPW; 998 dlen = desc->fip_dlen * FIP_BPW;
853 if (dlen < sizeof(*desc) || dlen > rlen) 999 if (dlen < sizeof(*desc) || dlen > rlen)
854 goto drop; 1000 goto drop;
1001 /* Drop ELS if there are duplicate critical descriptors */
1002 if (desc->fip_dtype < 32) {
1003 if (desc_mask & 1U << desc->fip_dtype) {
1004 LIBFCOE_FIP_DBG(fip, "Duplicate Critical "
1005 "Descriptors in FIP ELS\n");
1006 goto drop;
1007 }
1008 desc_mask |= (1 << desc->fip_dtype);
1009 }
855 switch (desc->fip_dtype) { 1010 switch (desc->fip_dtype) {
856 case FIP_DT_MAC: 1011 case FIP_DT_MAC:
1012 if (desc_cnt == 1) {
1013 LIBFCOE_FIP_DBG(fip, "FIP descriptors "
1014 "received out of order\n");
1015 goto drop;
1016 }
1017
857 if (dlen != sizeof(struct fip_mac_desc)) 1018 if (dlen != sizeof(struct fip_mac_desc))
858 goto len_err; 1019 goto len_err;
859 memcpy(granted_mac, 1020 memcpy(granted_mac,
860 ((struct fip_mac_desc *)desc)->fd_mac, 1021 ((struct fip_mac_desc *)desc)->fd_mac,
861 ETH_ALEN); 1022 ETH_ALEN);
862 if (!is_valid_ether_addr(granted_mac)) {
863 LIBFCOE_FIP_DBG(fip, "Invalid MAC address "
864 "in FIP ELS\n");
865 goto drop;
866 }
867 memcpy(fr_cb(fp)->granted_mac, granted_mac, ETH_ALEN);
868 break; 1023 break;
869 case FIP_DT_FLOGI: 1024 case FIP_DT_FLOGI:
870 case FIP_DT_FDISC: 1025 case FIP_DT_FDISC:
871 case FIP_DT_LOGO: 1026 case FIP_DT_LOGO:
872 case FIP_DT_ELP: 1027 case FIP_DT_ELP:
1028 if (desc_cnt != 1) {
1029 LIBFCOE_FIP_DBG(fip, "FIP descriptors "
1030 "received out of order\n");
1031 goto drop;
1032 }
873 if (fh) 1033 if (fh)
874 goto drop; 1034 goto drop;
875 if (dlen < sizeof(*els) + sizeof(*fh) + 1) 1035 if (dlen < sizeof(*els) + sizeof(*fh) + 1)
@@ -885,7 +1045,12 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
885 /* standard says ignore unknown descriptors >= 128 */ 1045 /* standard says ignore unknown descriptors >= 128 */
886 if (desc->fip_dtype < FIP_DT_VENDOR_BASE) 1046 if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
887 goto drop; 1047 goto drop;
888 continue; 1048 if (desc_cnt <= 2) {
1049 LIBFCOE_FIP_DBG(fip, "FIP descriptors "
1050 "received out of order\n");
1051 goto drop;
1052 }
1053 break;
889 } 1054 }
890 desc = (struct fip_desc *)((char *)desc + dlen); 1055 desc = (struct fip_desc *)((char *)desc + dlen);
891 rlen -= dlen; 1056 rlen -= dlen;
@@ -895,10 +1060,27 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
895 goto drop; 1060 goto drop;
896 els_op = *(u8 *)(fh + 1); 1061 els_op = *(u8 *)(fh + 1);
897 1062
898 if (els_dtype == FIP_DT_FLOGI && sub == FIP_SC_REP && 1063 if ((els_dtype == FIP_DT_FLOGI || els_dtype == FIP_DT_FDISC) &&
899 fip->flogi_oxid == ntohs(fh->fh_ox_id) && 1064 sub == FIP_SC_REP && els_op == ELS_LS_ACC &&
900 els_op == ELS_LS_ACC && is_valid_ether_addr(granted_mac)) 1065 fip->mode != FIP_MODE_VN2VN) {
901 fip->flogi_oxid = FC_XID_UNKNOWN; 1066 if (!is_valid_ether_addr(granted_mac)) {
1067 LIBFCOE_FIP_DBG(fip,
1068 "Invalid MAC address %pM in FIP ELS\n",
1069 granted_mac);
1070 goto drop;
1071 }
1072 memcpy(fr_cb(fp)->granted_mac, granted_mac, ETH_ALEN);
1073
1074 if (fip->flogi_oxid == ntohs(fh->fh_ox_id))
1075 fip->flogi_oxid = FC_XID_UNKNOWN;
1076 }
1077
1078 if ((desc_cnt == 0) || ((els_op != ELS_LS_RJT) &&
1079 (!(1U << FIP_DT_MAC & desc_mask)))) {
1080 LIBFCOE_FIP_DBG(fip, "Missing critical descriptors "
1081 "in FIP ELS\n");
1082 goto drop;
1083 }
902 1084
903 /* 1085 /*
904 * Convert skb into an fc_frame containing only the ELS. 1086 * Convert skb into an fc_frame containing only the ELS.
@@ -910,6 +1092,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
910 fr_sof(fp) = FC_SOF_I3; 1092 fr_sof(fp) = FC_SOF_I3;
911 fr_eof(fp) = FC_EOF_T; 1093 fr_eof(fp) = FC_EOF_T;
912 fr_dev(fp) = lport; 1094 fr_dev(fp) = lport;
1095 fr_encaps(fp) = els_dtype;
913 1096
914 stats = per_cpu_ptr(lport->dev_stats, get_cpu()); 1097 stats = per_cpu_ptr(lport->dev_stats, get_cpu());
915 stats->RxFrames++; 1098 stats->RxFrames++;
@@ -945,7 +1128,9 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
945 size_t dlen; 1128 size_t dlen;
946 struct fcoe_fcf *fcf = fip->sel_fcf; 1129 struct fcoe_fcf *fcf = fip->sel_fcf;
947 struct fc_lport *lport = fip->lp; 1130 struct fc_lport *lport = fip->lp;
948 u32 desc_mask; 1131 struct fc_lport *vn_port = NULL;
1132 u32 desc_mask;
1133 int is_vn_port = 0;
949 1134
950 LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n"); 1135 LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
951 1136
@@ -963,6 +1148,13 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
963 dlen = desc->fip_dlen * FIP_BPW; 1148 dlen = desc->fip_dlen * FIP_BPW;
964 if (dlen > rlen) 1149 if (dlen > rlen)
965 return; 1150 return;
1151 /* Drop CVL if there are duplicate critical descriptors */
1152 if ((desc->fip_dtype < 32) &&
1153 !(desc_mask & 1U << desc->fip_dtype)) {
1154 LIBFCOE_FIP_DBG(fip, "Duplicate Critical "
1155 "Descriptors in FIP CVL\n");
1156 return;
1157 }
966 switch (desc->fip_dtype) { 1158 switch (desc->fip_dtype) {
967 case FIP_DT_MAC: 1159 case FIP_DT_MAC:
968 mp = (struct fip_mac_desc *)desc; 1160 mp = (struct fip_mac_desc *)desc;
@@ -987,8 +1179,26 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
987 if (compare_ether_addr(vp->fd_mac, 1179 if (compare_ether_addr(vp->fd_mac,
988 fip->get_src_addr(lport)) == 0 && 1180 fip->get_src_addr(lport)) == 0 &&
989 get_unaligned_be64(&vp->fd_wwpn) == lport->wwpn && 1181 get_unaligned_be64(&vp->fd_wwpn) == lport->wwpn &&
990 ntoh24(vp->fd_fc_id) == lport->port_id) 1182 ntoh24(vp->fd_fc_id) == lport->port_id) {
991 desc_mask &= ~BIT(FIP_DT_VN_ID); 1183 desc_mask &= ~BIT(FIP_DT_VN_ID);
1184 break;
1185 }
1186 /* check if clr_vlink is for NPIV port */
1187 mutex_lock(&lport->lp_mutex);
1188 list_for_each_entry(vn_port, &lport->vports, list) {
1189 if (compare_ether_addr(vp->fd_mac,
1190 fip->get_src_addr(vn_port)) == 0 &&
1191 (get_unaligned_be64(&vp->fd_wwpn)
1192 == vn_port->wwpn) &&
1193 (ntoh24(vp->fd_fc_id) ==
1194 fc_host_port_id(vn_port->host))) {
1195 desc_mask &= ~BIT(FIP_DT_VN_ID);
1196 is_vn_port = 1;
1197 break;
1198 }
1199 }
1200 mutex_unlock(&lport->lp_mutex);
1201
992 break; 1202 break;
993 default: 1203 default:
994 /* standard says ignore unknown descriptors >= 128 */ 1204 /* standard says ignore unknown descriptors >= 128 */
@@ -1009,14 +1219,19 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
1009 } else { 1219 } else {
1010 LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n"); 1220 LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n");
1011 1221
1012 spin_lock_bh(&fip->lock); 1222 if (is_vn_port)
1013 per_cpu_ptr(lport->dev_stats, 1223 fc_lport_reset(vn_port);
1014 smp_processor_id())->VLinkFailureCount++; 1224 else {
1015 fcoe_ctlr_reset(fip); 1225 mutex_lock(&fip->ctlr_mutex);
1016 spin_unlock_bh(&fip->lock); 1226 per_cpu_ptr(lport->dev_stats,
1017 1227 get_cpu())->VLinkFailureCount++;
1018 fc_lport_reset(fip->lp); 1228 put_cpu();
1019 fcoe_ctlr_solicit(fip, NULL); 1229 fcoe_ctlr_reset(fip);
1230 mutex_unlock(&fip->ctlr_mutex);
1231
1232 fc_lport_reset(fip->lp);
1233 fcoe_ctlr_solicit(fip, NULL);
1234 }
1020 } 1235 }
1021} 1236}
1022 1237
@@ -1054,8 +1269,13 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb)
1054 if (skb->len < sizeof(*fiph)) 1269 if (skb->len < sizeof(*fiph))
1055 goto drop; 1270 goto drop;
1056 eh = eth_hdr(skb); 1271 eh = eth_hdr(skb);
1057 if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) && 1272 if (fip->mode == FIP_MODE_VN2VN) {
1058 compare_ether_addr(eh->h_dest, FIP_ALL_ENODE_MACS)) 1273 if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) &&
1274 compare_ether_addr(eh->h_dest, fcoe_all_vn2vn) &&
1275 compare_ether_addr(eh->h_dest, fcoe_all_p2p))
1276 goto drop;
1277 } else if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) &&
1278 compare_ether_addr(eh->h_dest, fcoe_all_enode))
1059 goto drop; 1279 goto drop;
1060 fiph = (struct fip_header *)skb->data; 1280 fiph = (struct fip_header *)skb->data;
1061 op = ntohs(fiph->fip_op); 1281 op = ntohs(fiph->fip_op);
@@ -1066,22 +1286,31 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb)
1066 if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len) 1286 if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
1067 goto drop; 1287 goto drop;
1068 1288
1069 spin_lock_bh(&fip->lock); 1289 mutex_lock(&fip->ctlr_mutex);
1070 state = fip->state; 1290 state = fip->state;
1071 if (state == FIP_ST_AUTO) { 1291 if (state == FIP_ST_AUTO) {
1072 fip->map_dest = 0; 1292 fip->map_dest = 0;
1073 fip->state = FIP_ST_ENABLED; 1293 fcoe_ctlr_set_state(fip, FIP_ST_ENABLED);
1074 state = FIP_ST_ENABLED; 1294 state = FIP_ST_ENABLED;
1075 LIBFCOE_FIP_DBG(fip, "Using FIP mode\n"); 1295 LIBFCOE_FIP_DBG(fip, "Using FIP mode\n");
1076 } 1296 }
1077 spin_unlock_bh(&fip->lock); 1297 mutex_unlock(&fip->ctlr_mutex);
1078 if (state != FIP_ST_ENABLED) 1298
1299 if (fip->mode == FIP_MODE_VN2VN && op == FIP_OP_VN2VN)
1300 return fcoe_ctlr_vn_recv(fip, skb);
1301
1302 if (state != FIP_ST_ENABLED && state != FIP_ST_VNMP_UP &&
1303 state != FIP_ST_VNMP_CLAIM)
1079 goto drop; 1304 goto drop;
1080 1305
1081 if (op == FIP_OP_LS) { 1306 if (op == FIP_OP_LS) {
1082 fcoe_ctlr_recv_els(fip, skb); /* consumes skb */ 1307 fcoe_ctlr_recv_els(fip, skb); /* consumes skb */
1083 return 0; 1308 return 0;
1084 } 1309 }
1310
1311 if (state != FIP_ST_ENABLED)
1312 goto drop;
1313
1085 if (op == FIP_OP_DISC && sub == FIP_SC_ADV) 1314 if (op == FIP_OP_DISC && sub == FIP_SC_ADV)
1086 fcoe_ctlr_recv_adv(fip, skb); 1315 fcoe_ctlr_recv_adv(fip, skb);
1087 else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) 1316 else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK)
@@ -1140,30 +1369,53 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip)
1140/** 1369/**
1141 * fcoe_ctlr_timeout() - FIP timeout handler 1370 * fcoe_ctlr_timeout() - FIP timeout handler
1142 * @arg: The FCoE controller that timed out 1371 * @arg: The FCoE controller that timed out
1143 *
1144 * Ages FCFs. Triggers FCF selection if possible. Sends keep-alives.
1145 */ 1372 */
1146static void fcoe_ctlr_timeout(unsigned long arg) 1373static void fcoe_ctlr_timeout(unsigned long arg)
1147{ 1374{
1148 struct fcoe_ctlr *fip = (struct fcoe_ctlr *)arg; 1375 struct fcoe_ctlr *fip = (struct fcoe_ctlr *)arg;
1376
1377 schedule_work(&fip->timer_work);
1378}
1379
1380/**
1381 * fcoe_ctlr_timer_work() - Worker thread function for timer work
1382 * @work: Handle to a FCoE controller
1383 *
1384 * Ages FCFs. Triggers FCF selection if possible.
1385 * Sends keep-alives and resets.
1386 */
1387static void fcoe_ctlr_timer_work(struct work_struct *work)
1388{
1389 struct fcoe_ctlr *fip;
1390 struct fc_lport *vport;
1391 u8 *mac;
1392 u8 reset = 0;
1393 u8 send_ctlr_ka = 0;
1394 u8 send_port_ka = 0;
1149 struct fcoe_fcf *sel; 1395 struct fcoe_fcf *sel;
1150 struct fcoe_fcf *fcf; 1396 struct fcoe_fcf *fcf;
1151 unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD); 1397 unsigned long next_timer;
1152 1398
1153 spin_lock_bh(&fip->lock); 1399 fip = container_of(work, struct fcoe_ctlr, timer_work);
1400 if (fip->mode == FIP_MODE_VN2VN)
1401 return fcoe_ctlr_vn_timeout(fip);
1402 mutex_lock(&fip->ctlr_mutex);
1154 if (fip->state == FIP_ST_DISABLED) { 1403 if (fip->state == FIP_ST_DISABLED) {
1155 spin_unlock_bh(&fip->lock); 1404 mutex_unlock(&fip->ctlr_mutex);
1156 return; 1405 return;
1157 } 1406 }
1158 1407
1159 fcf = fip->sel_fcf; 1408 fcf = fip->sel_fcf;
1160 fcoe_ctlr_age_fcfs(fip); 1409 next_timer = fcoe_ctlr_age_fcfs(fip);
1161 1410
1162 sel = fip->sel_fcf; 1411 sel = fip->sel_fcf;
1163 if (!sel && fip->sel_time && time_after_eq(jiffies, fip->sel_time)) { 1412 if (!sel && fip->sel_time) {
1164 fcoe_ctlr_select(fip); 1413 if (time_after_eq(jiffies, fip->sel_time)) {
1165 sel = fip->sel_fcf; 1414 fcoe_ctlr_select(fip);
1166 fip->sel_time = 0; 1415 sel = fip->sel_fcf;
1416 fip->sel_time = 0;
1417 } else if (time_after(next_timer, fip->sel_time))
1418 next_timer = fip->sel_time;
1167 } 1419 }
1168 1420
1169 if (sel != fcf) { 1421 if (sel != fcf) {
@@ -1173,23 +1425,25 @@ static void fcoe_ctlr_timeout(unsigned long arg)
1173 "Fibre-Channel Forwarder MAC %pM\n", 1425 "Fibre-Channel Forwarder MAC %pM\n",
1174 fip->lp->host->host_no, sel->fcf_mac); 1426 fip->lp->host->host_no, sel->fcf_mac);
1175 memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN); 1427 memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN);
1428 fip->map_dest = 0;
1176 fip->port_ka_time = jiffies + 1429 fip->port_ka_time = jiffies +
1177 msecs_to_jiffies(FIP_VN_KA_PERIOD); 1430 msecs_to_jiffies(FIP_VN_KA_PERIOD);
1178 fip->ctlr_ka_time = jiffies + sel->fka_period; 1431 fip->ctlr_ka_time = jiffies + sel->fka_period;
1432 if (time_after(next_timer, fip->ctlr_ka_time))
1433 next_timer = fip->ctlr_ka_time;
1179 } else { 1434 } else {
1180 printk(KERN_NOTICE "libfcoe: host%d: " 1435 printk(KERN_NOTICE "libfcoe: host%d: "
1181 "FIP Fibre-Channel Forwarder timed out. " 1436 "FIP Fibre-Channel Forwarder timed out. "
1182 "Starting FCF discovery.\n", 1437 "Starting FCF discovery.\n",
1183 fip->lp->host->host_no); 1438 fip->lp->host->host_no);
1184 fip->reset_req = 1; 1439 reset = 1;
1185 schedule_work(&fip->timer_work);
1186 } 1440 }
1187 } 1441 }
1188 1442
1189 if (sel && !sel->fd_flags) { 1443 if (sel && !sel->fd_flags) {
1190 if (time_after_eq(jiffies, fip->ctlr_ka_time)) { 1444 if (time_after_eq(jiffies, fip->ctlr_ka_time)) {
1191 fip->ctlr_ka_time = jiffies + sel->fka_period; 1445 fip->ctlr_ka_time = jiffies + sel->fka_period;
1192 fip->send_ctlr_ka = 1; 1446 send_ctlr_ka = 1;
1193 } 1447 }
1194 if (time_after(next_timer, fip->ctlr_ka_time)) 1448 if (time_after(next_timer, fip->ctlr_ka_time))
1195 next_timer = fip->ctlr_ka_time; 1449 next_timer = fip->ctlr_ka_time;
@@ -1197,50 +1451,25 @@ static void fcoe_ctlr_timeout(unsigned long arg)
1197 if (time_after_eq(jiffies, fip->port_ka_time)) { 1451 if (time_after_eq(jiffies, fip->port_ka_time)) {
1198 fip->port_ka_time = jiffies + 1452 fip->port_ka_time = jiffies +
1199 msecs_to_jiffies(FIP_VN_KA_PERIOD); 1453 msecs_to_jiffies(FIP_VN_KA_PERIOD);
1200 fip->send_port_ka = 1; 1454 send_port_ka = 1;
1201 } 1455 }
1202 if (time_after(next_timer, fip->port_ka_time)) 1456 if (time_after(next_timer, fip->port_ka_time))
1203 next_timer = fip->port_ka_time; 1457 next_timer = fip->port_ka_time;
1204 mod_timer(&fip->timer, next_timer);
1205 } else if (fip->sel_time) {
1206 next_timer = fip->sel_time +
1207 msecs_to_jiffies(FCOE_CTLR_START_DELAY);
1208 mod_timer(&fip->timer, next_timer);
1209 } 1458 }
1210 if (fip->send_ctlr_ka || fip->send_port_ka) 1459 if (!list_empty(&fip->fcfs))
1211 schedule_work(&fip->timer_work); 1460 mod_timer(&fip->timer, next_timer);
1212 spin_unlock_bh(&fip->lock); 1461 mutex_unlock(&fip->ctlr_mutex);
1213}
1214
1215/**
1216 * fcoe_ctlr_timer_work() - Worker thread function for timer work
1217 * @work: Handle to a FCoE controller
1218 *
1219 * Sends keep-alives and resets which must not
1220 * be called from the timer directly, since they use a mutex.
1221 */
1222static void fcoe_ctlr_timer_work(struct work_struct *work)
1223{
1224 struct fcoe_ctlr *fip;
1225 struct fc_lport *vport;
1226 u8 *mac;
1227 int reset;
1228
1229 fip = container_of(work, struct fcoe_ctlr, timer_work);
1230 spin_lock_bh(&fip->lock);
1231 reset = fip->reset_req;
1232 fip->reset_req = 0;
1233 spin_unlock_bh(&fip->lock);
1234 1462
1235 if (reset) 1463 if (reset) {
1236 fc_lport_reset(fip->lp); 1464 fc_lport_reset(fip->lp);
1465 /* restart things with a solicitation */
1466 fcoe_ctlr_solicit(fip, NULL);
1467 }
1237 1468
1238 if (fip->send_ctlr_ka) { 1469 if (send_ctlr_ka)
1239 fip->send_ctlr_ka = 0;
1240 fcoe_ctlr_send_keep_alive(fip, NULL, 0, fip->ctl_src_addr); 1470 fcoe_ctlr_send_keep_alive(fip, NULL, 0, fip->ctl_src_addr);
1241 } 1471
1242 if (fip->send_port_ka) { 1472 if (send_port_ka) {
1243 fip->send_port_ka = 0;
1244 mutex_lock(&fip->lp->lp_mutex); 1473 mutex_lock(&fip->lp->lp_mutex);
1245 mac = fip->get_src_addr(fip->lp); 1474 mac = fip->get_src_addr(fip->lp);
1246 fcoe_ctlr_send_keep_alive(fip, fip->lp, 1, mac); 1475 fcoe_ctlr_send_keep_alive(fip, fip->lp, 1, mac);
@@ -1297,12 +1526,12 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport,
1297 if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP && 1526 if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP &&
1298 fip->flogi_oxid == ntohs(fh->fh_ox_id)) { 1527 fip->flogi_oxid == ntohs(fh->fh_ox_id)) {
1299 1528
1300 spin_lock_bh(&fip->lock); 1529 mutex_lock(&fip->ctlr_mutex);
1301 if (fip->state != FIP_ST_AUTO && fip->state != FIP_ST_NON_FIP) { 1530 if (fip->state != FIP_ST_AUTO && fip->state != FIP_ST_NON_FIP) {
1302 spin_unlock_bh(&fip->lock); 1531 mutex_unlock(&fip->ctlr_mutex);
1303 return -EINVAL; 1532 return -EINVAL;
1304 } 1533 }
1305 fip->state = FIP_ST_NON_FIP; 1534 fcoe_ctlr_set_state(fip, FIP_ST_NON_FIP);
1306 LIBFCOE_FIP_DBG(fip, 1535 LIBFCOE_FIP_DBG(fip,
1307 "received FLOGI LS_ACC using non-FIP mode\n"); 1536 "received FLOGI LS_ACC using non-FIP mode\n");
1308 1537
@@ -1313,28 +1542,28 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport,
1313 * Otherwise we use the FCoE gateway addr 1542 * Otherwise we use the FCoE gateway addr
1314 */ 1543 */
1315 if (!compare_ether_addr(sa, (u8[6])FC_FCOE_FLOGI_MAC)) { 1544 if (!compare_ether_addr(sa, (u8[6])FC_FCOE_FLOGI_MAC)) {
1316 fip->map_dest = 1; 1545 fcoe_ctlr_map_dest(fip);
1317 } else { 1546 } else {
1318 memcpy(fip->dest_addr, sa, ETH_ALEN); 1547 memcpy(fip->dest_addr, sa, ETH_ALEN);
1319 fip->map_dest = 0; 1548 fip->map_dest = 0;
1320 } 1549 }
1321 fip->flogi_oxid = FC_XID_UNKNOWN; 1550 fip->flogi_oxid = FC_XID_UNKNOWN;
1322 spin_unlock_bh(&fip->lock); 1551 mutex_unlock(&fip->ctlr_mutex);
1323 fc_fcoe_set_mac(fr_cb(fp)->granted_mac, fh->fh_d_id); 1552 fc_fcoe_set_mac(fr_cb(fp)->granted_mac, fh->fh_d_id);
1324 } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) { 1553 } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) {
1325 /* 1554 /*
1326 * Save source MAC for point-to-point responses. 1555 * Save source MAC for point-to-point responses.
1327 */ 1556 */
1328 spin_lock_bh(&fip->lock); 1557 mutex_lock(&fip->ctlr_mutex);
1329 if (fip->state == FIP_ST_AUTO || fip->state == FIP_ST_NON_FIP) { 1558 if (fip->state == FIP_ST_AUTO || fip->state == FIP_ST_NON_FIP) {
1330 memcpy(fip->dest_addr, sa, ETH_ALEN); 1559 memcpy(fip->dest_addr, sa, ETH_ALEN);
1331 fip->map_dest = 0; 1560 fip->map_dest = 0;
1332 if (fip->state == FIP_ST_AUTO) 1561 if (fip->state == FIP_ST_AUTO)
1333 LIBFCOE_FIP_DBG(fip, "received non-FIP FLOGI. " 1562 LIBFCOE_FIP_DBG(fip, "received non-FIP FLOGI. "
1334 "Setting non-FIP mode\n"); 1563 "Setting non-FIP mode\n");
1335 fip->state = FIP_ST_NON_FIP; 1564 fcoe_ctlr_set_state(fip, FIP_ST_NON_FIP);
1336 } 1565 }
1337 spin_unlock_bh(&fip->lock); 1566 mutex_unlock(&fip->ctlr_mutex);
1338 } 1567 }
1339 return 0; 1568 return 0;
1340} 1569}
@@ -1382,26 +1611,916 @@ u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
1382EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac); 1611EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
1383 1612
1384/** 1613/**
1614 * fcoe_ctlr_rport() - return the fcoe_rport for a given fc_rport_priv
1615 * @rdata: libfc remote port
1616 */
1617static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata)
1618{
1619 return (struct fcoe_rport *)(rdata + 1);
1620}
1621
1622/**
1623 * fcoe_ctlr_vn_send() - Send a FIP VN2VN Probe Request or Reply.
1624 * @fip: The FCoE controller
1625 * @sub: sub-opcode for probe request, reply, or advertisement.
1626 * @dest: The destination Ethernet MAC address
1627 * @min_len: minimum size of the Ethernet payload to be sent
1628 */
1629static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip,
1630 enum fip_vn2vn_subcode sub,
1631 const u8 *dest, size_t min_len)
1632{
1633 struct sk_buff *skb;
1634 struct fip_frame {
1635 struct ethhdr eth;
1636 struct fip_header fip;
1637 struct fip_mac_desc mac;
1638 struct fip_wwn_desc wwnn;
1639 struct fip_vn_desc vn;
1640 } __attribute__((packed)) *frame;
1641 struct fip_fc4_feat *ff;
1642 struct fip_size_desc *size;
1643 u32 fcp_feat;
1644 size_t len;
1645 size_t dlen;
1646
1647 len = sizeof(*frame);
1648 dlen = 0;
1649 if (sub == FIP_SC_VN_CLAIM_NOTIFY || sub == FIP_SC_VN_CLAIM_REP) {
1650 dlen = sizeof(struct fip_fc4_feat) +
1651 sizeof(struct fip_size_desc);
1652 len += dlen;
1653 }
1654 dlen += sizeof(frame->mac) + sizeof(frame->wwnn) + sizeof(frame->vn);
1655 len = max(len, min_len + sizeof(struct ethhdr));
1656
1657 skb = dev_alloc_skb(len);
1658 if (!skb)
1659 return;
1660
1661 frame = (struct fip_frame *)skb->data;
1662 memset(frame, 0, len);
1663 memcpy(frame->eth.h_dest, dest, ETH_ALEN);
1664 memcpy(frame->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
1665 frame->eth.h_proto = htons(ETH_P_FIP);
1666
1667 frame->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
1668 frame->fip.fip_op = htons(FIP_OP_VN2VN);
1669 frame->fip.fip_subcode = sub;
1670 frame->fip.fip_dl_len = htons(dlen / FIP_BPW);
1671
1672 frame->mac.fd_desc.fip_dtype = FIP_DT_MAC;
1673 frame->mac.fd_desc.fip_dlen = sizeof(frame->mac) / FIP_BPW;
1674 memcpy(frame->mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
1675
1676 frame->wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
1677 frame->wwnn.fd_desc.fip_dlen = sizeof(frame->wwnn) / FIP_BPW;
1678 put_unaligned_be64(fip->lp->wwnn, &frame->wwnn.fd_wwn);
1679
1680 frame->vn.fd_desc.fip_dtype = FIP_DT_VN_ID;
1681 frame->vn.fd_desc.fip_dlen = sizeof(frame->vn) / FIP_BPW;
1682 hton24(frame->vn.fd_mac, FIP_VN_FC_MAP);
1683 hton24(frame->vn.fd_mac + 3, fip->port_id);
1684 hton24(frame->vn.fd_fc_id, fip->port_id);
1685 put_unaligned_be64(fip->lp->wwpn, &frame->vn.fd_wwpn);
1686
1687 /*
1688 * For claims, add FC-4 features.
1689 * TBD: Add interface to get fc-4 types and features from libfc.
1690 */
1691 if (sub == FIP_SC_VN_CLAIM_NOTIFY || sub == FIP_SC_VN_CLAIM_REP) {
1692 ff = (struct fip_fc4_feat *)(frame + 1);
1693 ff->fd_desc.fip_dtype = FIP_DT_FC4F;
1694 ff->fd_desc.fip_dlen = sizeof(*ff) / FIP_BPW;
1695 ff->fd_fts = fip->lp->fcts;
1696
1697 fcp_feat = 0;
1698 if (fip->lp->service_params & FCP_SPPF_INIT_FCN)
1699 fcp_feat |= FCP_FEAT_INIT;
1700 if (fip->lp->service_params & FCP_SPPF_TARG_FCN)
1701 fcp_feat |= FCP_FEAT_TARG;
1702 fcp_feat <<= (FC_TYPE_FCP * 4) % 32;
1703 ff->fd_ff.fd_feat[FC_TYPE_FCP * 4 / 32] = htonl(fcp_feat);
1704
1705 size = (struct fip_size_desc *)(ff + 1);
1706 size->fd_desc.fip_dtype = FIP_DT_FCOE_SIZE;
1707 size->fd_desc.fip_dlen = sizeof(*size) / FIP_BPW;
1708 size->fd_size = htons(fcoe_ctlr_fcoe_size(fip));
1709 }
1710
1711 skb_put(skb, len);
1712 skb->protocol = htons(ETH_P_FIP);
1713 skb_reset_mac_header(skb);
1714 skb_reset_network_header(skb);
1715
1716 fip->send(fip, skb);
1717}
1718
1719/**
1720 * fcoe_ctlr_vn_rport_callback - Event handler for rport events.
1721 * @lport: The lport which is receiving the event
1722 * @rdata: remote port private data
1723 * @event: The event that occured
1724 *
1725 * Locking Note: The rport lock must not be held when calling this function.
1726 */
1727static void fcoe_ctlr_vn_rport_callback(struct fc_lport *lport,
1728 struct fc_rport_priv *rdata,
1729 enum fc_rport_event event)
1730{
1731 struct fcoe_ctlr *fip = lport->disc.priv;
1732 struct fcoe_rport *frport = fcoe_ctlr_rport(rdata);
1733
1734 LIBFCOE_FIP_DBG(fip, "vn_rport_callback %x event %d\n",
1735 rdata->ids.port_id, event);
1736
1737 mutex_lock(&fip->ctlr_mutex);
1738 switch (event) {
1739 case RPORT_EV_READY:
1740 frport->login_count = 0;
1741 break;
1742 case RPORT_EV_LOGO:
1743 case RPORT_EV_FAILED:
1744 case RPORT_EV_STOP:
1745 frport->login_count++;
1746 if (frport->login_count > FCOE_CTLR_VN2VN_LOGIN_LIMIT) {
1747 LIBFCOE_FIP_DBG(fip,
1748 "rport FLOGI limited port_id %6.6x\n",
1749 rdata->ids.port_id);
1750 lport->tt.rport_logoff(rdata);
1751 }
1752 break;
1753 default:
1754 break;
1755 }
1756 mutex_unlock(&fip->ctlr_mutex);
1757}
1758
1759static struct fc_rport_operations fcoe_ctlr_vn_rport_ops = {
1760 .event_callback = fcoe_ctlr_vn_rport_callback,
1761};
1762
1763/**
1764 * fcoe_ctlr_disc_stop_locked() - stop discovery in VN2VN mode
1765 * @fip: The FCoE controller
1766 *
1767 * Called with ctlr_mutex held.
1768 */
1769static void fcoe_ctlr_disc_stop_locked(struct fc_lport *lport)
1770{
1771 mutex_lock(&lport->disc.disc_mutex);
1772 lport->disc.disc_callback = NULL;
1773 mutex_unlock(&lport->disc.disc_mutex);
1774}
1775
1776/**
1777 * fcoe_ctlr_disc_stop() - stop discovery in VN2VN mode
1778 * @fip: The FCoE controller
1779 *
1780 * Called through the local port template for discovery.
1781 * Called without the ctlr_mutex held.
1782 */
1783static void fcoe_ctlr_disc_stop(struct fc_lport *lport)
1784{
1785 struct fcoe_ctlr *fip = lport->disc.priv;
1786
1787 mutex_lock(&fip->ctlr_mutex);
1788 fcoe_ctlr_disc_stop_locked(lport);
1789 mutex_unlock(&fip->ctlr_mutex);
1790}
1791
1792/**
1793 * fcoe_ctlr_disc_stop_final() - stop discovery for shutdown in VN2VN mode
1794 * @fip: The FCoE controller
1795 *
1796 * Called through the local port template for discovery.
1797 * Called without the ctlr_mutex held.
1798 */
1799static void fcoe_ctlr_disc_stop_final(struct fc_lport *lport)
1800{
1801 fcoe_ctlr_disc_stop(lport);
1802 lport->tt.rport_flush_queue();
1803 synchronize_rcu();
1804}
1805
1806/**
1807 * fcoe_ctlr_vn_restart() - VN2VN probe restart with new port_id
1808 * @fip: The FCoE controller
1809 *
1810 * Called with fcoe_ctlr lock held.
1811 */
1812static void fcoe_ctlr_vn_restart(struct fcoe_ctlr *fip)
1813{
1814 unsigned long wait;
1815 u32 port_id;
1816
1817 fcoe_ctlr_disc_stop_locked(fip->lp);
1818
1819 /*
1820 * Get proposed port ID.
1821 * If this is the first try after link up, use any previous port_id.
1822 * If there was none, use the low bits of the port_name.
1823 * On subsequent tries, get the next random one.
1824 * Don't use reserved IDs, use another non-zero value, just as random.
1825 */
1826 port_id = fip->port_id;
1827 if (fip->probe_tries)
1828 port_id = prandom32(&fip->rnd_state) & 0xffff;
1829 else if (!port_id)
1830 port_id = fip->lp->wwpn & 0xffff;
1831 if (!port_id || port_id == 0xffff)
1832 port_id = 1;
1833 fip->port_id = port_id;
1834
1835 if (fip->probe_tries < FIP_VN_RLIM_COUNT) {
1836 fip->probe_tries++;
1837 wait = random32() % FIP_VN_PROBE_WAIT;
1838 } else
1839 wait = FIP_VN_RLIM_INT;
1840 mod_timer(&fip->timer, jiffies + msecs_to_jiffies(wait));
1841 fcoe_ctlr_set_state(fip, FIP_ST_VNMP_START);
1842}
1843
1844/**
1845 * fcoe_ctlr_vn_start() - Start in VN2VN mode
1846 * @fip: The FCoE controller
1847 *
1848 * Called with fcoe_ctlr lock held.
1849 */
1850static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip)
1851{
1852 fip->probe_tries = 0;
1853 prandom32_seed(&fip->rnd_state, fip->lp->wwpn);
1854 fcoe_ctlr_vn_restart(fip);
1855}
1856
1857/**
1858 * fcoe_ctlr_vn_parse - parse probe request or response
1859 * @fip: The FCoE controller
1860 * @skb: incoming packet
1861 * @rdata: buffer for resulting parsed VN entry plus fcoe_rport
1862 *
1863 * Returns non-zero error number on error.
1864 * Does not consume the packet.
1865 */
1866static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
1867 struct sk_buff *skb,
1868 struct fc_rport_priv *rdata)
1869{
1870 struct fip_header *fiph;
1871 struct fip_desc *desc = NULL;
1872 struct fip_mac_desc *macd = NULL;
1873 struct fip_wwn_desc *wwn = NULL;
1874 struct fip_vn_desc *vn = NULL;
1875 struct fip_size_desc *size = NULL;
1876 struct fcoe_rport *frport;
1877 size_t rlen;
1878 size_t dlen;
1879 u32 desc_mask = 0;
1880 u32 dtype;
1881 u8 sub;
1882
1883 memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
1884 frport = fcoe_ctlr_rport(rdata);
1885
1886 fiph = (struct fip_header *)skb->data;
1887 frport->flags = ntohs(fiph->fip_flags);
1888
1889 sub = fiph->fip_subcode;
1890 switch (sub) {
1891 case FIP_SC_VN_PROBE_REQ:
1892 case FIP_SC_VN_PROBE_REP:
1893 case FIP_SC_VN_BEACON:
1894 desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) |
1895 BIT(FIP_DT_VN_ID);
1896 break;
1897 case FIP_SC_VN_CLAIM_NOTIFY:
1898 case FIP_SC_VN_CLAIM_REP:
1899 desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) |
1900 BIT(FIP_DT_VN_ID) | BIT(FIP_DT_FC4F) |
1901 BIT(FIP_DT_FCOE_SIZE);
1902 break;
1903 default:
1904 LIBFCOE_FIP_DBG(fip, "vn_parse unknown subcode %u\n", sub);
1905 return -EINVAL;
1906 }
1907
1908 rlen = ntohs(fiph->fip_dl_len) * 4;
1909 if (rlen + sizeof(*fiph) > skb->len)
1910 return -EINVAL;
1911
1912 desc = (struct fip_desc *)(fiph + 1);
1913 while (rlen > 0) {
1914 dlen = desc->fip_dlen * FIP_BPW;
1915 if (dlen < sizeof(*desc) || dlen > rlen)
1916 return -EINVAL;
1917
1918 dtype = desc->fip_dtype;
1919 if (dtype < 32) {
1920 if (!(desc_mask & BIT(dtype))) {
1921 LIBFCOE_FIP_DBG(fip,
1922 "unexpected or duplicated desc "
1923 "desc type %u in "
1924 "FIP VN2VN subtype %u\n",
1925 dtype, sub);
1926 return -EINVAL;
1927 }
1928 desc_mask &= ~BIT(dtype);
1929 }
1930
1931 switch (dtype) {
1932 case FIP_DT_MAC:
1933 if (dlen != sizeof(struct fip_mac_desc))
1934 goto len_err;
1935 macd = (struct fip_mac_desc *)desc;
1936 if (!is_valid_ether_addr(macd->fd_mac)) {
1937 LIBFCOE_FIP_DBG(fip,
1938 "Invalid MAC addr %pM in FIP VN2VN\n",
1939 macd->fd_mac);
1940 return -EINVAL;
1941 }
1942 memcpy(frport->enode_mac, macd->fd_mac, ETH_ALEN);
1943 break;
1944 case FIP_DT_NAME:
1945 if (dlen != sizeof(struct fip_wwn_desc))
1946 goto len_err;
1947 wwn = (struct fip_wwn_desc *)desc;
1948 rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
1949 break;
1950 case FIP_DT_VN_ID:
1951 if (dlen != sizeof(struct fip_vn_desc))
1952 goto len_err;
1953 vn = (struct fip_vn_desc *)desc;
1954 memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN);
1955 rdata->ids.port_id = ntoh24(vn->fd_fc_id);
1956 rdata->ids.port_name = get_unaligned_be64(&vn->fd_wwpn);
1957 break;
1958 case FIP_DT_FC4F:
1959 if (dlen != sizeof(struct fip_fc4_feat))
1960 goto len_err;
1961 break;
1962 case FIP_DT_FCOE_SIZE:
1963 if (dlen != sizeof(struct fip_size_desc))
1964 goto len_err;
1965 size = (struct fip_size_desc *)desc;
1966 frport->fcoe_len = ntohs(size->fd_size);
1967 break;
1968 default:
1969 LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
1970 "in FIP probe\n", dtype);
1971 /* standard says ignore unknown descriptors >= 128 */
1972 if (dtype < FIP_DT_VENDOR_BASE)
1973 return -EINVAL;
1974 break;
1975 }
1976 desc = (struct fip_desc *)((char *)desc + dlen);
1977 rlen -= dlen;
1978 }
1979 return 0;
1980
1981len_err:
1982 LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n",
1983 dtype, dlen);
1984 return -EINVAL;
1985}
1986
1987/**
1988 * fcoe_ctlr_vn_send_claim() - send multicast FIP VN2VN Claim Notification.
1989 * @fip: The FCoE controller
1990 *
1991 * Called with ctlr_mutex held.
1992 */
1993static void fcoe_ctlr_vn_send_claim(struct fcoe_ctlr *fip)
1994{
1995 fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_NOTIFY, fcoe_all_vn2vn, 0);
1996 fip->sol_time = jiffies;
1997}
1998
1999/**
2000 * fcoe_ctlr_vn_probe_req() - handle incoming VN2VN probe request.
2001 * @fip: The FCoE controller
2002 * @rdata: parsed remote port with frport from the probe request
2003 *
2004 * Called with ctlr_mutex held.
2005 */
2006static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
2007 struct fc_rport_priv *rdata)
2008{
2009 struct fcoe_rport *frport = fcoe_ctlr_rport(rdata);
2010
2011 if (rdata->ids.port_id != fip->port_id)
2012 return;
2013
2014 switch (fip->state) {
2015 case FIP_ST_VNMP_CLAIM:
2016 case FIP_ST_VNMP_UP:
2017 fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP,
2018 frport->enode_mac, 0);
2019 break;
2020 case FIP_ST_VNMP_PROBE1:
2021 case FIP_ST_VNMP_PROBE2:
2022 /*
2023 * Decide whether to reply to the Probe.
2024 * Our selected address is never a "recorded" one, so
2025 * only reply if our WWPN is greater and the
2026 * Probe's REC bit is not set.
2027 * If we don't reply, we will change our address.
2028 */
2029 if (fip->lp->wwpn > rdata->ids.port_name &&
2030 !(frport->flags & FIP_FL_REC_OR_P2P)) {
2031 fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP,
2032 frport->enode_mac, 0);
2033 break;
2034 }
2035 /* fall through */
2036 case FIP_ST_VNMP_START:
2037 fcoe_ctlr_vn_restart(fip);
2038 break;
2039 default:
2040 break;
2041 }
2042}
2043
2044/**
2045 * fcoe_ctlr_vn_probe_reply() - handle incoming VN2VN probe reply.
2046 * @fip: The FCoE controller
2047 * @rdata: parsed remote port with frport from the probe request
2048 *
2049 * Called with ctlr_mutex held.
2050 */
2051static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip,
2052 struct fc_rport_priv *rdata)
2053{
2054 if (rdata->ids.port_id != fip->port_id)
2055 return;
2056 switch (fip->state) {
2057 case FIP_ST_VNMP_START:
2058 case FIP_ST_VNMP_PROBE1:
2059 case FIP_ST_VNMP_PROBE2:
2060 case FIP_ST_VNMP_CLAIM:
2061 fcoe_ctlr_vn_restart(fip);
2062 break;
2063 case FIP_ST_VNMP_UP:
2064 fcoe_ctlr_vn_send_claim(fip);
2065 break;
2066 default:
2067 break;
2068 }
2069}
2070
2071/**
2072 * fcoe_ctlr_vn_add() - Add a VN2VN entry to the list, based on a claim reply.
2073 * @fip: The FCoE controller
2074 * @new: newly-parsed remote port with frport as a template for new rdata
2075 *
2076 * Called with ctlr_mutex held.
2077 */
2078static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new)
2079{
2080 struct fc_lport *lport = fip->lp;
2081 struct fc_rport_priv *rdata;
2082 struct fc_rport_identifiers *ids;
2083 struct fcoe_rport *frport;
2084 u32 port_id;
2085
2086 port_id = new->ids.port_id;
2087 if (port_id == fip->port_id)
2088 return;
2089
2090 mutex_lock(&lport->disc.disc_mutex);
2091 rdata = lport->tt.rport_create(lport, port_id);
2092 if (!rdata) {
2093 mutex_unlock(&lport->disc.disc_mutex);
2094 return;
2095 }
2096
2097 rdata->ops = &fcoe_ctlr_vn_rport_ops;
2098 rdata->disc_id = lport->disc.disc_id;
2099
2100 ids = &rdata->ids;
2101 if ((ids->port_name != -1 && ids->port_name != new->ids.port_name) ||
2102 (ids->node_name != -1 && ids->node_name != new->ids.node_name))
2103 lport->tt.rport_logoff(rdata);
2104 ids->port_name = new->ids.port_name;
2105 ids->node_name = new->ids.node_name;
2106 mutex_unlock(&lport->disc.disc_mutex);
2107
2108 frport = fcoe_ctlr_rport(rdata);
2109 LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s\n",
2110 port_id, frport->fcoe_len ? "old" : "new");
2111 *frport = *fcoe_ctlr_rport(new);
2112 frport->time = 0;
2113}
2114
2115/**
2116 * fcoe_ctlr_vn_lookup() - Find VN remote port's MAC address
2117 * @fip: The FCoE controller
2118 * @port_id: The port_id of the remote VN_node
2119 * @mac: buffer which will hold the VN_NODE destination MAC address, if found.
2120 *
2121 * Returns non-zero error if no remote port found.
2122 */
2123static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *fip, u32 port_id, u8 *mac)
2124{
2125 struct fc_lport *lport = fip->lp;
2126 struct fc_rport_priv *rdata;
2127 struct fcoe_rport *frport;
2128 int ret = -1;
2129
2130 rcu_read_lock();
2131 rdata = lport->tt.rport_lookup(lport, port_id);
2132 if (rdata) {
2133 frport = fcoe_ctlr_rport(rdata);
2134 memcpy(mac, frport->enode_mac, ETH_ALEN);
2135 ret = 0;
2136 }
2137 rcu_read_unlock();
2138 return ret;
2139}
2140
2141/**
2142 * fcoe_ctlr_vn_claim_notify() - handle received FIP VN2VN Claim Notification
2143 * @fip: The FCoE controller
2144 * @new: newly-parsed remote port with frport as a template for new rdata
2145 *
2146 * Called with ctlr_mutex held.
2147 */
2148static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
2149 struct fc_rport_priv *new)
2150{
2151 struct fcoe_rport *frport = fcoe_ctlr_rport(new);
2152
2153 if (frport->flags & FIP_FL_REC_OR_P2P) {
2154 fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
2155 return;
2156 }
2157 switch (fip->state) {
2158 case FIP_ST_VNMP_START:
2159 case FIP_ST_VNMP_PROBE1:
2160 case FIP_ST_VNMP_PROBE2:
2161 if (new->ids.port_id == fip->port_id)
2162 fcoe_ctlr_vn_restart(fip);
2163 break;
2164 case FIP_ST_VNMP_CLAIM:
2165 case FIP_ST_VNMP_UP:
2166 if (new->ids.port_id == fip->port_id) {
2167 if (new->ids.port_name > fip->lp->wwpn) {
2168 fcoe_ctlr_vn_restart(fip);
2169 break;
2170 }
2171 fcoe_ctlr_vn_send_claim(fip);
2172 break;
2173 }
2174 fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, frport->enode_mac,
2175 min((u32)frport->fcoe_len,
2176 fcoe_ctlr_fcoe_size(fip)));
2177 fcoe_ctlr_vn_add(fip, new);
2178 break;
2179 default:
2180 break;
2181 }
2182}
2183
2184/**
2185 * fcoe_ctlr_vn_claim_resp() - handle received Claim Response
2186 * @fip: The FCoE controller that received the frame
2187 * @new: newly-parsed remote port with frport from the Claim Response
2188 *
2189 * Called with ctlr_mutex held.
2190 */
2191static void fcoe_ctlr_vn_claim_resp(struct fcoe_ctlr *fip,
2192 struct fc_rport_priv *new)
2193{
2194 LIBFCOE_FIP_DBG(fip, "claim resp from from rport %x - state %s\n",
2195 new->ids.port_id, fcoe_ctlr_state(fip->state));
2196 if (fip->state == FIP_ST_VNMP_UP || fip->state == FIP_ST_VNMP_CLAIM)
2197 fcoe_ctlr_vn_add(fip, new);
2198}
2199
2200/**
2201 * fcoe_ctlr_vn_beacon() - handle received beacon.
2202 * @fip: The FCoE controller that received the frame
2203 * @new: newly-parsed remote port with frport from the Beacon
2204 *
2205 * Called with ctlr_mutex held.
2206 */
2207static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip,
2208 struct fc_rport_priv *new)
2209{
2210 struct fc_lport *lport = fip->lp;
2211 struct fc_rport_priv *rdata;
2212 struct fcoe_rport *frport;
2213
2214 frport = fcoe_ctlr_rport(new);
2215 if (frport->flags & FIP_FL_REC_OR_P2P) {
2216 fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
2217 return;
2218 }
2219 mutex_lock(&lport->disc.disc_mutex);
2220 rdata = lport->tt.rport_lookup(lport, new->ids.port_id);
2221 if (rdata)
2222 kref_get(&rdata->kref);
2223 mutex_unlock(&lport->disc.disc_mutex);
2224 if (rdata) {
2225 if (rdata->ids.node_name == new->ids.node_name &&
2226 rdata->ids.port_name == new->ids.port_name) {
2227 frport = fcoe_ctlr_rport(rdata);
2228 if (!frport->time && fip->state == FIP_ST_VNMP_UP)
2229 lport->tt.rport_login(rdata);
2230 frport->time = jiffies;
2231 }
2232 kref_put(&rdata->kref, lport->tt.rport_destroy);
2233 return;
2234 }
2235 if (fip->state != FIP_ST_VNMP_UP)
2236 return;
2237
2238 /*
2239 * Beacon from a new neighbor.
2240 * Send a claim notify if one hasn't been sent recently.
2241 * Don't add the neighbor yet.
2242 */
2243 LIBFCOE_FIP_DBG(fip, "beacon from new rport %x. sending claim notify\n",
2244 new->ids.port_id);
2245 if (time_after(jiffies,
2246 fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT)))
2247 fcoe_ctlr_vn_send_claim(fip);
2248}
2249
2250/**
2251 * fcoe_ctlr_vn_age() - Check for VN_ports without recent beacons
2252 * @fip: The FCoE controller
2253 *
2254 * Called with ctlr_mutex held.
2255 * Called only in state FIP_ST_VNMP_UP.
2256 * Returns the soonest time for next age-out or a time far in the future.
2257 */
2258static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip)
2259{
2260 struct fc_lport *lport = fip->lp;
2261 struct fc_rport_priv *rdata;
2262 struct fcoe_rport *frport;
2263 unsigned long next_time;
2264 unsigned long deadline;
2265
2266 next_time = jiffies + msecs_to_jiffies(FIP_VN_BEACON_INT * 10);
2267 mutex_lock(&lport->disc.disc_mutex);
2268 list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
2269 frport = fcoe_ctlr_rport(rdata);
2270 if (!frport->time)
2271 continue;
2272 deadline = frport->time +
2273 msecs_to_jiffies(FIP_VN_BEACON_INT * 25 / 10);
2274 if (time_after_eq(jiffies, deadline)) {
2275 frport->time = 0;
2276 LIBFCOE_FIP_DBG(fip,
2277 "port %16.16llx fc_id %6.6x beacon expired\n",
2278 rdata->ids.port_name, rdata->ids.port_id);
2279 lport->tt.rport_logoff(rdata);
2280 } else if (time_before(deadline, next_time))
2281 next_time = deadline;
2282 }
2283 mutex_unlock(&lport->disc.disc_mutex);
2284 return next_time;
2285}
2286
2287/**
2288 * fcoe_ctlr_vn_recv() - Receive a FIP frame
2289 * @fip: The FCoE controller that received the frame
2290 * @skb: The received FIP frame
2291 *
2292 * Returns non-zero if the frame is dropped.
2293 * Always consumes the frame.
2294 */
2295static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
2296{
2297 struct fip_header *fiph;
2298 enum fip_vn2vn_subcode sub;
2299 union {
2300 struct fc_rport_priv rdata;
2301 struct fcoe_rport frport;
2302 } buf;
2303 int rc;
2304
2305 fiph = (struct fip_header *)skb->data;
2306 sub = fiph->fip_subcode;
2307
2308 rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata);
2309 if (rc) {
2310 LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc);
2311 goto drop;
2312 }
2313
2314 mutex_lock(&fip->ctlr_mutex);
2315 switch (sub) {
2316 case FIP_SC_VN_PROBE_REQ:
2317 fcoe_ctlr_vn_probe_req(fip, &buf.rdata);
2318 break;
2319 case FIP_SC_VN_PROBE_REP:
2320 fcoe_ctlr_vn_probe_reply(fip, &buf.rdata);
2321 break;
2322 case FIP_SC_VN_CLAIM_NOTIFY:
2323 fcoe_ctlr_vn_claim_notify(fip, &buf.rdata);
2324 break;
2325 case FIP_SC_VN_CLAIM_REP:
2326 fcoe_ctlr_vn_claim_resp(fip, &buf.rdata);
2327 break;
2328 case FIP_SC_VN_BEACON:
2329 fcoe_ctlr_vn_beacon(fip, &buf.rdata);
2330 break;
2331 default:
2332 LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub);
2333 rc = -1;
2334 break;
2335 }
2336 mutex_unlock(&fip->ctlr_mutex);
2337drop:
2338 kfree_skb(skb);
2339 return rc;
2340}
2341
2342/**
2343 * fcoe_ctlr_disc_recv - discovery receive handler for VN2VN mode.
2344 * @lport: The local port
2345 * @fp: The received frame
2346 *
2347 * This should never be called since we don't see RSCNs or other
2348 * fabric-generated ELSes.
2349 */
2350static void fcoe_ctlr_disc_recv(struct fc_lport *lport, struct fc_frame *fp)
2351{
2352 struct fc_seq_els_data rjt_data;
2353
2354 rjt_data.reason = ELS_RJT_UNSUP;
2355 rjt_data.explan = ELS_EXPL_NONE;
2356 lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
2357 fc_frame_free(fp);
2358}
2359
2360/**
2361 * fcoe_ctlr_disc_recv - start discovery for VN2VN mode.
2362 * @fip: The FCoE controller
2363 *
2364 * This sets a flag indicating that remote ports should be created
2365 * and started for the peers we discover. We use the disc_callback
2366 * pointer as that flag. Peers already discovered are created here.
2367 *
2368 * The lport lock is held during this call. The callback must be done
2369 * later, without holding either the lport or discovery locks.
2370 * The fcoe_ctlr lock may also be held during this call.
2371 */
2372static void fcoe_ctlr_disc_start(void (*callback)(struct fc_lport *,
2373 enum fc_disc_event),
2374 struct fc_lport *lport)
2375{
2376 struct fc_disc *disc = &lport->disc;
2377 struct fcoe_ctlr *fip = disc->priv;
2378
2379 mutex_lock(&disc->disc_mutex);
2380 disc->disc_callback = callback;
2381 disc->disc_id = (disc->disc_id + 2) | 1;
2382 disc->pending = 1;
2383 schedule_work(&fip->timer_work);
2384 mutex_unlock(&disc->disc_mutex);
2385}
2386
2387/**
2388 * fcoe_ctlr_vn_disc() - report FIP VN_port discovery results after claim state.
2389 * @fip: The FCoE controller
2390 *
2391 * Starts the FLOGI and PLOGI login process to each discovered rport for which
2392 * we've received at least one beacon.
2393 * Performs the discovery complete callback.
2394 */
2395static void fcoe_ctlr_vn_disc(struct fcoe_ctlr *fip)
2396{
2397 struct fc_lport *lport = fip->lp;
2398 struct fc_disc *disc = &lport->disc;
2399 struct fc_rport_priv *rdata;
2400 struct fcoe_rport *frport;
2401 void (*callback)(struct fc_lport *, enum fc_disc_event);
2402
2403 mutex_lock(&disc->disc_mutex);
2404 callback = disc->pending ? disc->disc_callback : NULL;
2405 disc->pending = 0;
2406 list_for_each_entry_rcu(rdata, &disc->rports, peers) {
2407 frport = fcoe_ctlr_rport(rdata);
2408 if (frport->time)
2409 lport->tt.rport_login(rdata);
2410 }
2411 mutex_unlock(&disc->disc_mutex);
2412 if (callback)
2413 callback(lport, DISC_EV_SUCCESS);
2414}
2415
2416/**
2417 * fcoe_ctlr_vn_timeout - timer work function for VN2VN mode.
2418 * @fip: The FCoE controller
2419 */
2420static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
2421{
2422 unsigned long next_time;
2423 u8 mac[ETH_ALEN];
2424 u32 new_port_id = 0;
2425
2426 mutex_lock(&fip->ctlr_mutex);
2427 switch (fip->state) {
2428 case FIP_ST_VNMP_START:
2429 fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE1);
2430 fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
2431 next_time = jiffies + msecs_to_jiffies(FIP_VN_PROBE_WAIT);
2432 break;
2433 case FIP_ST_VNMP_PROBE1:
2434 fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE2);
2435 fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
2436 next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT);
2437 break;
2438 case FIP_ST_VNMP_PROBE2:
2439 fcoe_ctlr_set_state(fip, FIP_ST_VNMP_CLAIM);
2440 new_port_id = fip->port_id;
2441 hton24(mac, FIP_VN_FC_MAP);
2442 hton24(mac + 3, new_port_id);
2443 fcoe_ctlr_map_dest(fip);
2444 fip->update_mac(fip->lp, mac);
2445 fcoe_ctlr_vn_send_claim(fip);
2446 next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT);
2447 break;
2448 case FIP_ST_VNMP_CLAIM:
2449 /*
2450 * This may be invoked either by starting discovery so don't
2451 * go to the next state unless it's been long enough.
2452 */
2453 next_time = fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT);
2454 if (time_after_eq(jiffies, next_time)) {
2455 fcoe_ctlr_set_state(fip, FIP_ST_VNMP_UP);
2456 fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON,
2457 fcoe_all_vn2vn, 0);
2458 next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT);
2459 fip->port_ka_time = next_time;
2460 }
2461 fcoe_ctlr_vn_disc(fip);
2462 break;
2463 case FIP_ST_VNMP_UP:
2464 next_time = fcoe_ctlr_vn_age(fip);
2465 if (time_after_eq(jiffies, fip->port_ka_time)) {
2466 fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON,
2467 fcoe_all_vn2vn, 0);
2468 fip->port_ka_time = jiffies +
2469 msecs_to_jiffies(FIP_VN_BEACON_INT +
2470 (random32() % FIP_VN_BEACON_FUZZ));
2471 }
2472 if (time_before(fip->port_ka_time, next_time))
2473 next_time = fip->port_ka_time;
2474 break;
2475 case FIP_ST_LINK_WAIT:
2476 goto unlock;
2477 default:
2478 WARN(1, "unexpected state %d", fip->state);
2479 goto unlock;
2480 }
2481 mod_timer(&fip->timer, next_time);
2482unlock:
2483 mutex_unlock(&fip->ctlr_mutex);
2484
2485 /* If port ID is new, notify local port after dropping ctlr_mutex */
2486 if (new_port_id)
2487 fc_lport_set_local_id(fip->lp, new_port_id);
2488}
2489
2490/**
1385 * fcoe_libfc_config() - Sets up libfc related properties for local port 2491 * fcoe_libfc_config() - Sets up libfc related properties for local port
1386 * @lp: The local port to configure libfc for 2492 * @lp: The local port to configure libfc for
2493 * @fip: The FCoE controller in use by the local port
1387 * @tt: The libfc function template 2494 * @tt: The libfc function template
2495 * @init_fcp: If non-zero, the FCP portion of libfc should be initialized
1388 * 2496 *
1389 * Returns : 0 for success 2497 * Returns : 0 for success
1390 */ 2498 */
1391int fcoe_libfc_config(struct fc_lport *lport, 2499int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip,
1392 struct libfc_function_template *tt) 2500 const struct libfc_function_template *tt, int init_fcp)
1393{ 2501{
1394 /* Set the function pointers set by the LLDD */ 2502 /* Set the function pointers set by the LLDD */
1395 memcpy(&lport->tt, tt, sizeof(*tt)); 2503 memcpy(&lport->tt, tt, sizeof(*tt));
1396 if (fc_fcp_init(lport)) 2504 if (init_fcp && fc_fcp_init(lport))
1397 return -ENOMEM; 2505 return -ENOMEM;
1398 fc_exch_init(lport); 2506 fc_exch_init(lport);
1399 fc_elsct_init(lport); 2507 fc_elsct_init(lport);
1400 fc_lport_init(lport); 2508 fc_lport_init(lport);
2509 if (fip->mode == FIP_MODE_VN2VN)
2510 lport->rport_priv_size = sizeof(struct fcoe_rport);
1401 fc_rport_init(lport); 2511 fc_rport_init(lport);
1402 fc_disc_init(lport); 2512 if (fip->mode == FIP_MODE_VN2VN) {
1403 2513 lport->point_to_multipoint = 1;
2514 lport->tt.disc_recv_req = fcoe_ctlr_disc_recv;
2515 lport->tt.disc_start = fcoe_ctlr_disc_start;
2516 lport->tt.disc_stop = fcoe_ctlr_disc_stop;
2517 lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final;
2518 mutex_init(&lport->disc.disc_mutex);
2519 INIT_LIST_HEAD(&lport->disc.rports);
2520 lport->disc.priv = fip;
2521 } else {
2522 fc_disc_init(lport);
2523 }
1404 return 0; 2524 return 0;
1405} 2525}
1406EXPORT_SYMBOL_GPL(fcoe_libfc_config); 2526EXPORT_SYMBOL_GPL(fcoe_libfc_config);
1407
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 265e73d9cd6f..9eb7a9ebccae 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -617,7 +617,6 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
617 fnic->ctlr.send = fnic_eth_send; 617 fnic->ctlr.send = fnic_eth_send;
618 fnic->ctlr.update_mac = fnic_update_mac; 618 fnic->ctlr.update_mac = fnic_update_mac;
619 fnic->ctlr.get_src_addr = fnic_get_mac; 619 fnic->ctlr.get_src_addr = fnic_get_mac;
620 fcoe_ctlr_init(&fnic->ctlr);
621 if (fnic->config.flags & VFCF_FIP_CAPABLE) { 620 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
622 shost_printk(KERN_INFO, fnic->lport->host, 621 shost_printk(KERN_INFO, fnic->lport->host,
623 "firmware supports FIP\n"); 622 "firmware supports FIP\n");
@@ -625,10 +624,11 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
625 vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0); 624 vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
626 vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS); 625 vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
627 vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); 626 vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
627 fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
628 } else { 628 } else {
629 shost_printk(KERN_INFO, fnic->lport->host, 629 shost_printk(KERN_INFO, fnic->lport->host,
630 "firmware uses non-FIP mode\n"); 630 "firmware uses non-FIP mode\n");
631 fnic->ctlr.mode = FIP_ST_NON_FIP; 631 fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP);
632 } 632 }
633 fnic->state = FNIC_IN_FC_MODE; 633 fnic->state = FNIC_IN_FC_MODE;
634 634
@@ -673,7 +673,6 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
673 /* Start local port initiatialization */ 673 /* Start local port initiatialization */
674 674
675 lp->link_up = 0; 675 lp->link_up = 0;
676 lp->tt = fnic_transport_template;
677 676
678 lp->max_retry_count = fnic->config.flogi_retries; 677 lp->max_retry_count = fnic->config.flogi_retries;
679 lp->max_rport_retry_count = fnic->config.plogi_retries; 678 lp->max_rport_retry_count = fnic->config.plogi_retries;
@@ -689,11 +688,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
689 fc_set_wwnn(lp, fnic->config.node_wwn); 688 fc_set_wwnn(lp, fnic->config.node_wwn);
690 fc_set_wwpn(lp, fnic->config.port_wwn); 689 fc_set_wwpn(lp, fnic->config.port_wwn);
691 690
692 fc_lport_init(lp); 691 fcoe_libfc_config(lp, &fnic->ctlr, &fnic_transport_template, 0);
693 fc_exch_init(lp);
694 fc_elsct_init(lp);
695 fc_rport_init(lp);
696 fc_disc_init(lp);
697 692
698 if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START, 693 if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START,
699 FCPIO_HOST_EXCH_RANGE_END, NULL)) { 694 FCPIO_HOST_EXCH_RANGE_END, NULL)) {
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 3cc47c6e1ada..198cbab3e894 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1246,11 +1246,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
1246 lp = shost_priv(sc->device->host); 1246 lp = shost_priv(sc->device->host);
1247 1247
1248 fnic = lport_priv(lp); 1248 fnic = lport_priv(lp);
1249 FNIC_SCSI_DBG(KERN_DEBUG, 1249 rport = starget_to_rport(scsi_target(sc->device));
1250 fnic->lport->host, 1250 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1251 "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %d\n", 1251 "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %d\n",
1252 (starget_to_rport(scsi_target(sc->device)))->port_id, 1252 rport->port_id, sc->device->lun, sc->request->tag);
1253 sc->device->lun, sc->request->tag);
1254 1253
1255 if (lp->state != LPORT_ST_READY || !(lp->link_up)) { 1254 if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
1256 ret = FAILED; 1255 ret = FAILED;
@@ -1299,7 +1298,6 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
1299 * port is up, then send abts to the remote port to terminate 1298 * port is up, then send abts to the remote port to terminate
1300 * the IO. Else, just locally terminate the IO in the firmware 1299 * the IO. Else, just locally terminate the IO in the firmware
1301 */ 1300 */
1302 rport = starget_to_rport(scsi_target(sc->device));
1303 if (fc_remote_port_chkready(rport) == 0) 1301 if (fc_remote_port_chkready(rport) == 0)
1304 task_req = FCPIO_ITMF_ABT_TASK; 1302 task_req = FCPIO_ITMF_ABT_TASK;
1305 else 1303 else
@@ -1418,7 +1416,6 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
1418 unsigned long flags; 1416 unsigned long flags;
1419 int ret = 0; 1417 int ret = 0;
1420 struct scsi_cmnd *sc; 1418 struct scsi_cmnd *sc;
1421 struct fc_rport *rport;
1422 struct scsi_lun fc_lun; 1419 struct scsi_lun fc_lun;
1423 struct scsi_device *lun_dev = lr_sc->device; 1420 struct scsi_device *lun_dev = lr_sc->device;
1424 DECLARE_COMPLETION_ONSTACK(tm_done); 1421 DECLARE_COMPLETION_ONSTACK(tm_done);
@@ -1458,7 +1455,6 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
1458 1455
1459 /* Now queue the abort command to firmware */ 1456 /* Now queue the abort command to firmware */
1460 int_to_scsilun(sc->device->lun, &fc_lun); 1457 int_to_scsilun(sc->device->lun, &fc_lun);
1461 rport = starget_to_rport(scsi_target(sc->device));
1462 1458
1463 if (fnic_queue_abort_io_req(fnic, tag, 1459 if (fnic_queue_abort_io_req(fnic, tag,
1464 FCPIO_ITMF_ABT_TASK_TERM, 1460 FCPIO_ITMF_ABT_TASK_TERM,
@@ -1528,18 +1524,16 @@ int fnic_device_reset(struct scsi_cmnd *sc)
1528 lp = shost_priv(sc->device->host); 1524 lp = shost_priv(sc->device->host);
1529 1525
1530 fnic = lport_priv(lp); 1526 fnic = lport_priv(lp);
1531 FNIC_SCSI_DBG(KERN_DEBUG,
1532 fnic->lport->host,
1533 "Device reset called FCID 0x%x, LUN 0x%x\n",
1534 (starget_to_rport(scsi_target(sc->device)))->port_id,
1535 sc->device->lun);
1536 1527
1528 rport = starget_to_rport(scsi_target(sc->device));
1529 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1530 "Device reset called FCID 0x%x, LUN 0x%x\n",
1531 rport->port_id, sc->device->lun);
1537 1532
1538 if (lp->state != LPORT_ST_READY || !(lp->link_up)) 1533 if (lp->state != LPORT_ST_READY || !(lp->link_up))
1539 goto fnic_device_reset_end; 1534 goto fnic_device_reset_end;
1540 1535
1541 /* Check if remote port up */ 1536 /* Check if remote port up */
1542 rport = starget_to_rport(scsi_target(sc->device));
1543 if (fc_remote_port_chkready(rport)) 1537 if (fc_remote_port_chkready(rport))
1544 goto fnic_device_reset_end; 1538 goto fnic_device_reset_end;
1545 1539
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 6660fa92ffa1..8a8f803439e1 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -32,6 +32,7 @@
32#include <linux/completion.h> 32#include <linux/completion.h>
33#include <linux/transport_class.h> 33#include <linux/transport_class.h>
34#include <linux/platform_device.h> 34#include <linux/platform_device.h>
35#include <linux/pm_runtime.h>
35 36
36#include <scsi/scsi_device.h> 37#include <scsi/scsi_device.h>
37#include <scsi/scsi_host.h> 38#include <scsi/scsi_host.h>
@@ -156,6 +157,7 @@ EXPORT_SYMBOL(scsi_host_set_state);
156void scsi_remove_host(struct Scsi_Host *shost) 157void scsi_remove_host(struct Scsi_Host *shost)
157{ 158{
158 unsigned long flags; 159 unsigned long flags;
160
159 mutex_lock(&shost->scan_mutex); 161 mutex_lock(&shost->scan_mutex);
160 spin_lock_irqsave(shost->host_lock, flags); 162 spin_lock_irqsave(shost->host_lock, flags);
161 if (scsi_host_set_state(shost, SHOST_CANCEL)) 163 if (scsi_host_set_state(shost, SHOST_CANCEL))
@@ -165,6 +167,8 @@ void scsi_remove_host(struct Scsi_Host *shost)
165 return; 167 return;
166 } 168 }
167 spin_unlock_irqrestore(shost->host_lock, flags); 169 spin_unlock_irqrestore(shost->host_lock, flags);
170
171 scsi_autopm_get_host(shost);
168 scsi_forget_host(shost); 172 scsi_forget_host(shost);
169 mutex_unlock(&shost->scan_mutex); 173 mutex_unlock(&shost->scan_mutex);
170 scsi_proc_host_rm(shost); 174 scsi_proc_host_rm(shost);
@@ -216,12 +220,14 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
216 shost->shost_gendev.parent = dev ? dev : &platform_bus; 220 shost->shost_gendev.parent = dev ? dev : &platform_bus;
217 shost->dma_dev = dma_dev; 221 shost->dma_dev = dma_dev;
218 222
219 device_enable_async_suspend(&shost->shost_gendev);
220
221 error = device_add(&shost->shost_gendev); 223 error = device_add(&shost->shost_gendev);
222 if (error) 224 if (error)
223 goto out; 225 goto out;
224 226
227 pm_runtime_set_active(&shost->shost_gendev);
228 pm_runtime_enable(&shost->shost_gendev);
229 device_enable_async_suspend(&shost->shost_gendev);
230
225 scsi_host_set_state(shost, SHOST_RUNNING); 231 scsi_host_set_state(shost, SHOST_RUNNING);
226 get_device(shost->shost_gendev.parent); 232 get_device(shost->shost_gendev.parent);
227 233
@@ -325,7 +331,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
325{ 331{
326 struct Scsi_Host *shost; 332 struct Scsi_Host *shost;
327 gfp_t gfp_mask = GFP_KERNEL; 333 gfp_t gfp_mask = GFP_KERNEL;
328 int rval;
329 334
330 if (sht->unchecked_isa_dma && privsize) 335 if (sht->unchecked_isa_dma && privsize)
331 gfp_mask |= __GFP_DMA; 336 gfp_mask |= __GFP_DMA;
@@ -420,7 +425,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
420 shost->ehandler = kthread_run(scsi_error_handler, shost, 425 shost->ehandler = kthread_run(scsi_error_handler, shost,
421 "scsi_eh_%d", shost->host_no); 426 "scsi_eh_%d", shost->host_no);
422 if (IS_ERR(shost->ehandler)) { 427 if (IS_ERR(shost->ehandler)) {
423 rval = PTR_ERR(shost->ehandler); 428 printk(KERN_WARNING "scsi%d: error handler thread failed to spawn, error = %ld\n",
429 shost->host_no, PTR_ERR(shost->ehandler));
424 goto fail_kfree; 430 goto fail_kfree;
425 } 431 }
426 432
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index c016426b31b2..4f5551b5fe53 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -86,10 +86,17 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a}, 86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b}, 87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, 88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3250},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3251},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3252},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3253},
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3254},
89#define PCI_DEVICE_ID_HP_CISSF 0x333f 94#define PCI_DEVICE_ID_HP_CISSF 0x333f
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x333F}, 95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x333F},
91 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 96 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
92 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, 97 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
98 {PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
99 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
93 {0,} 100 {0,}
94}; 101};
95 102
@@ -109,12 +116,18 @@ static struct board_type products[] = {
109 {0x324b103C, "Smart Array P711m", &SA5_access}, 116 {0x324b103C, "Smart Array P711m", &SA5_access},
110 {0x3233103C, "StorageWorks P1210m", &SA5_access}, 117 {0x3233103C, "StorageWorks P1210m", &SA5_access},
111 {0x333F103C, "StorageWorks P1210m", &SA5_access}, 118 {0x333F103C, "StorageWorks P1210m", &SA5_access},
119 {0x3250103C, "Smart Array", &SA5_access},
120 {0x3250113C, "Smart Array", &SA5_access},
121 {0x3250123C, "Smart Array", &SA5_access},
122 {0x3250133C, "Smart Array", &SA5_access},
123 {0x3250143C, "Smart Array", &SA5_access},
112 {0xFFFF103C, "Unknown Smart Array", &SA5_access}, 124 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
113}; 125};
114 126
115static int number_of_controllers; 127static int number_of_controllers;
116 128
117static irqreturn_t do_hpsa_intr(int irq, void *dev_id); 129static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
130static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
118static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); 131static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
119static void start_io(struct ctlr_info *h); 132static void start_io(struct ctlr_info *h);
120 133
@@ -148,6 +161,8 @@ static ssize_t lunid_show(struct device *dev,
148 struct device_attribute *attr, char *buf); 161 struct device_attribute *attr, char *buf);
149static ssize_t unique_id_show(struct device *dev, 162static ssize_t unique_id_show(struct device *dev,
150 struct device_attribute *attr, char *buf); 163 struct device_attribute *attr, char *buf);
164static ssize_t host_show_firmware_revision(struct device *dev,
165 struct device_attribute *attr, char *buf);
151static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno); 166static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
152static ssize_t host_store_rescan(struct device *dev, 167static ssize_t host_store_rescan(struct device *dev,
153 struct device_attribute *attr, const char *buf, size_t count); 168 struct device_attribute *attr, const char *buf, size_t count);
@@ -158,13 +173,21 @@ static void check_ioctl_unit_attention(struct ctlr_info *h,
158/* performant mode helper functions */ 173/* performant mode helper functions */
159static void calc_bucket_map(int *bucket, int num_buckets, 174static void calc_bucket_map(int *bucket, int num_buckets,
160 int nsgs, int *bucket_map); 175 int nsgs, int *bucket_map);
161static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); 176static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
162static inline u32 next_command(struct ctlr_info *h); 177static inline u32 next_command(struct ctlr_info *h);
178static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
179 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
180 u64 *cfg_offset);
181static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
182 unsigned long *memory_bar);
183static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
163 184
164static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); 185static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
165static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); 186static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
166static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); 187static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
167static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); 188static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
189static DEVICE_ATTR(firmware_revision, S_IRUGO,
190 host_show_firmware_revision, NULL);
168 191
169static struct device_attribute *hpsa_sdev_attrs[] = { 192static struct device_attribute *hpsa_sdev_attrs[] = {
170 &dev_attr_raid_level, 193 &dev_attr_raid_level,
@@ -175,6 +198,7 @@ static struct device_attribute *hpsa_sdev_attrs[] = {
175 198
176static struct device_attribute *hpsa_shost_attrs[] = { 199static struct device_attribute *hpsa_shost_attrs[] = {
177 &dev_attr_rescan, 200 &dev_attr_rescan,
201 &dev_attr_firmware_revision,
178 NULL, 202 NULL,
179}; 203};
180 204
@@ -260,6 +284,21 @@ static ssize_t host_store_rescan(struct device *dev,
260 return count; 284 return count;
261} 285}
262 286
287static ssize_t host_show_firmware_revision(struct device *dev,
288 struct device_attribute *attr, char *buf)
289{
290 struct ctlr_info *h;
291 struct Scsi_Host *shost = class_to_shost(dev);
292 unsigned char *fwrev;
293
294 h = shost_to_hba(shost);
295 if (!h->hba_inquiry_data)
296 return 0;
297 fwrev = &h->hba_inquiry_data[32];
298 return snprintf(buf, 20, "%c%c%c%c\n",
299 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
300}
301
263/* Enqueuing and dequeuing functions for cmdlists. */ 302/* Enqueuing and dequeuing functions for cmdlists. */
264static inline void addQ(struct hlist_head *list, struct CommandList *c) 303static inline void addQ(struct hlist_head *list, struct CommandList *c)
265{ 304{
@@ -1440,12 +1479,6 @@ static int hpsa_update_device_info(struct ctlr_info *h,
1440 goto bail_out; 1479 goto bail_out;
1441 } 1480 }
1442 1481
1443 /* As a side effect, record the firmware version number
1444 * if we happen to be talking to the RAID controller.
1445 */
1446 if (is_hba_lunid(scsi3addr))
1447 memcpy(h->firm_ver, &inq_buff[32], 4);
1448
1449 this_device->devtype = (inq_buff[0] & 0x1f); 1482 this_device->devtype = (inq_buff[0] & 0x1f);
1450 memcpy(this_device->scsi3addr, scsi3addr, 8); 1483 memcpy(this_device->scsi3addr, scsi3addr, 8);
1451 memcpy(this_device->vendor, &inq_buff[8], 1484 memcpy(this_device->vendor, &inq_buff[8],
@@ -2826,9 +2859,8 @@ static inline bool interrupt_pending(struct ctlr_info *h)
2826 2859
2827static inline long interrupt_not_for_us(struct ctlr_info *h) 2860static inline long interrupt_not_for_us(struct ctlr_info *h)
2828{ 2861{
2829 return !(h->msi_vector || h->msix_vector) && 2862 return (h->access.intr_pending(h) == 0) ||
2830 ((h->access.intr_pending(h) == 0) || 2863 (h->interrupts_enabled == 0);
2831 (h->interrupts_enabled == 0));
2832} 2864}
2833 2865
2834static inline int bad_tag(struct ctlr_info *h, u32 tag_index, 2866static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
@@ -2902,7 +2934,7 @@ static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
2902 return next_command(h); 2934 return next_command(h);
2903} 2935}
2904 2936
2905static irqreturn_t do_hpsa_intr(int irq, void *dev_id) 2937static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id)
2906{ 2938{
2907 struct ctlr_info *h = dev_id; 2939 struct ctlr_info *h = dev_id;
2908 unsigned long flags; 2940 unsigned long flags;
@@ -2911,6 +2943,26 @@ static irqreturn_t do_hpsa_intr(int irq, void *dev_id)
2911 if (interrupt_not_for_us(h)) 2943 if (interrupt_not_for_us(h))
2912 return IRQ_NONE; 2944 return IRQ_NONE;
2913 spin_lock_irqsave(&h->lock, flags); 2945 spin_lock_irqsave(&h->lock, flags);
2946 while (interrupt_pending(h)) {
2947 raw_tag = get_next_completion(h);
2948 while (raw_tag != FIFO_EMPTY) {
2949 if (hpsa_tag_contains_index(raw_tag))
2950 raw_tag = process_indexed_cmd(h, raw_tag);
2951 else
2952 raw_tag = process_nonindexed_cmd(h, raw_tag);
2953 }
2954 }
2955 spin_unlock_irqrestore(&h->lock, flags);
2956 return IRQ_HANDLED;
2957}
2958
2959static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id)
2960{
2961 struct ctlr_info *h = dev_id;
2962 unsigned long flags;
2963 u32 raw_tag;
2964
2965 spin_lock_irqsave(&h->lock, flags);
2914 raw_tag = get_next_completion(h); 2966 raw_tag = get_next_completion(h);
2915 while (raw_tag != FIFO_EMPTY) { 2967 while (raw_tag != FIFO_EMPTY) {
2916 if (hpsa_tag_contains_index(raw_tag)) 2968 if (hpsa_tag_contains_index(raw_tag))
@@ -3052,17 +3104,75 @@ static __devinit int hpsa_reset_msi(struct pci_dev *pdev)
3052 return 0; 3104 return 0;
3053} 3105}
3054 3106
3055/* This does a hard reset of the controller using PCI power management 3107static int hpsa_controller_hard_reset(struct pci_dev *pdev,
3056 * states. 3108 void * __iomem vaddr, bool use_doorbell)
3057 */
3058static __devinit int hpsa_hard_reset_controller(struct pci_dev *pdev)
3059{ 3109{
3060 u16 pmcsr, saved_config_space[32]; 3110 u16 pmcsr;
3061 int i, pos; 3111 int pos;
3062 3112
3063 dev_info(&pdev->dev, "using PCI PM to reset controller\n"); 3113 if (use_doorbell) {
3114 /* For everything after the P600, the PCI power state method
3115 * of resetting the controller doesn't work, so we have this
3116 * other way using the doorbell register.
3117 */
3118 dev_info(&pdev->dev, "using doorbell to reset controller\n");
3119 writel(DOORBELL_CTLR_RESET, vaddr + SA5_DOORBELL);
3120 msleep(1000);
3121 } else { /* Try to do it the PCI power state way */
3122
3123 /* Quoting from the Open CISS Specification: "The Power
3124 * Management Control/Status Register (CSR) controls the power
3125 * state of the device. The normal operating state is D0,
3126 * CSR=00h. The software off state is D3, CSR=03h. To reset
3127 * the controller, place the interface device in D3 then to D0,
3128 * this causes a secondary PCI reset which will reset the
3129 * controller." */
3130
3131 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
3132 if (pos == 0) {
3133 dev_err(&pdev->dev,
3134 "hpsa_reset_controller: "
3135 "PCI PM not supported\n");
3136 return -ENODEV;
3137 }
3138 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
3139 /* enter the D3hot power management state */
3140 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
3141 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3142 pmcsr |= PCI_D3hot;
3143 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3064 3144
3065 /* This is very nearly the same thing as 3145 msleep(500);
3146
3147 /* enter the D0 power management state */
3148 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3149 pmcsr |= PCI_D0;
3150 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3151
3152 msleep(500);
3153 }
3154 return 0;
3155}
3156
3157/* This does a hard reset of the controller using PCI power management
3158 * states or the using the doorbell register.
3159 */
3160static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
3161{
3162 u16 saved_config_space[32];
3163 u64 cfg_offset;
3164 u32 cfg_base_addr;
3165 u64 cfg_base_addr_index;
3166 void __iomem *vaddr;
3167 unsigned long paddr;
3168 u32 misc_fw_support, active_transport;
3169 int rc, i;
3170 struct CfgTable __iomem *cfgtable;
3171 bool use_doorbell;
3172 u32 board_id;
3173
3174 /* For controllers as old as the P600, this is very nearly
3175 * the same thing as
3066 * 3176 *
3067 * pci_save_state(pci_dev); 3177 * pci_save_state(pci_dev);
3068 * pci_set_power_state(pci_dev, PCI_D3hot); 3178 * pci_set_power_state(pci_dev, PCI_D3hot);
@@ -3076,41 +3186,54 @@ static __devinit int hpsa_hard_reset_controller(struct pci_dev *pdev)
3076 * violate the ordering requirements for restoring the 3186 * violate the ordering requirements for restoring the
3077 * configuration space from the CCISS document (see the 3187 * configuration space from the CCISS document (see the
3078 * comment below). So we roll our own .... 3188 * comment below). So we roll our own ....
3189 *
3190 * For controllers newer than the P600, the pci power state
3191 * method of resetting doesn't work so we have another way
3192 * using the doorbell register.
3079 */ 3193 */
3080 3194
3195 /* Exclude 640x boards. These are two pci devices in one slot
3196 * which share a battery backed cache module. One controls the
3197 * cache, the other accesses the cache through the one that controls
3198 * it. If we reset the one controlling the cache, the other will
3199 * likely not be happy. Just forbid resetting this conjoined mess.
3200 * The 640x isn't really supported by hpsa anyway.
3201 */
3202 hpsa_lookup_board_id(pdev, &board_id);
3203 if (board_id == 0x409C0E11 || board_id == 0x409D0E11)
3204 return -ENOTSUPP;
3205
3081 for (i = 0; i < 32; i++) 3206 for (i = 0; i < 32; i++)
3082 pci_read_config_word(pdev, 2*i, &saved_config_space[i]); 3207 pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
3083 3208
3084 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
3085 if (pos == 0) {
3086 dev_err(&pdev->dev,
3087 "hpsa_reset_controller: PCI PM not supported\n");
3088 return -ENODEV;
3089 }
3090
3091 /* Quoting from the Open CISS Specification: "The Power
3092 * Management Control/Status Register (CSR) controls the power
3093 * state of the device. The normal operating state is D0,
3094 * CSR=00h. The software off state is D3, CSR=03h. To reset
3095 * the controller, place the interface device in D3 then to
3096 * D0, this causes a secondary PCI reset which will reset the
3097 * controller."
3098 */
3099 3209
3100 /* enter the D3hot power management state */ 3210 /* find the first memory BAR, so we can find the cfg table */
3101 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); 3211 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
3102 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 3212 if (rc)
3103 pmcsr |= PCI_D3hot; 3213 return rc;
3104 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 3214 vaddr = remap_pci_mem(paddr, 0x250);
3215 if (!vaddr)
3216 return -ENOMEM;
3105 3217
3106 msleep(500); 3218 /* find cfgtable in order to check if reset via doorbell is supported */
3219 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
3220 &cfg_base_addr_index, &cfg_offset);
3221 if (rc)
3222 goto unmap_vaddr;
3223 cfgtable = remap_pci_mem(pci_resource_start(pdev,
3224 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
3225 if (!cfgtable) {
3226 rc = -ENOMEM;
3227 goto unmap_vaddr;
3228 }
3107 3229
3108 /* enter the D0 power management state */ 3230 /* If reset via doorbell register is supported, use that. */
3109 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 3231 misc_fw_support = readl(&cfgtable->misc_fw_support);
3110 pmcsr |= PCI_D0; 3232 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
3111 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3112 3233
3113 msleep(500); 3234 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
3235 if (rc)
3236 goto unmap_cfgtable;
3114 3237
3115 /* Restore the PCI configuration space. The Open CISS 3238 /* Restore the PCI configuration space. The Open CISS
3116 * Specification says, "Restore the PCI Configuration 3239 * Specification says, "Restore the PCI Configuration
@@ -3127,7 +3250,29 @@ static __devinit int hpsa_hard_reset_controller(struct pci_dev *pdev)
3127 wmb(); 3250 wmb();
3128 pci_write_config_word(pdev, 4, saved_config_space[2]); 3251 pci_write_config_word(pdev, 4, saved_config_space[2]);
3129 3252
3130 return 0; 3253 /* Some devices (notably the HP Smart Array 5i Controller)
3254 need a little pause here */
3255 msleep(HPSA_POST_RESET_PAUSE_MSECS);
3256
3257 /* Controller should be in simple mode at this point. If it's not,
3258 * It means we're on one of those controllers which doesn't support
3259 * the doorbell reset method and on which the PCI power management reset
3260 * method doesn't work (P800, for example.)
3261 * In those cases, pretend the reset worked and hope for the best.
3262 */
3263 active_transport = readl(&cfgtable->TransportActive);
3264 if (active_transport & PERFORMANT_MODE) {
3265 dev_warn(&pdev->dev, "Unable to successfully reset controller,"
3266 " proceeding anyway.\n");
3267 rc = -ENOTSUPP;
3268 }
3269
3270unmap_cfgtable:
3271 iounmap(cfgtable);
3272
3273unmap_vaddr:
3274 iounmap(vaddr);
3275 return rc;
3131} 3276}
3132 3277
3133/* 3278/*
@@ -3135,9 +3280,9 @@ static __devinit int hpsa_hard_reset_controller(struct pci_dev *pdev)
3135 * the io functions. 3280 * the io functions.
3136 * This is for debug only. 3281 * This is for debug only.
3137 */ 3282 */
3138#ifdef HPSA_DEBUG
3139static void print_cfg_table(struct device *dev, struct CfgTable *tb) 3283static void print_cfg_table(struct device *dev, struct CfgTable *tb)
3140{ 3284{
3285#ifdef HPSA_DEBUG
3141 int i; 3286 int i;
3142 char temp_name[17]; 3287 char temp_name[17];
3143 3288
@@ -3167,8 +3312,8 @@ static void print_cfg_table(struct device *dev, struct CfgTable *tb)
3167 dev_info(dev, " Server Name = %s\n", temp_name); 3312 dev_info(dev, " Server Name = %s\n", temp_name);
3168 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n", 3313 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
3169 readl(&(tb->HeartBeat))); 3314 readl(&(tb->HeartBeat)));
3170}
3171#endif /* HPSA_DEBUG */ 3315#endif /* HPSA_DEBUG */
3316}
3172 3317
3173static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) 3318static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
3174{ 3319{
@@ -3209,8 +3354,7 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
3209 * controllers that are capable. If not, we use IO-APIC mode. 3354 * controllers that are capable. If not, we use IO-APIC mode.
3210 */ 3355 */
3211 3356
3212static void __devinit hpsa_interrupt_mode(struct ctlr_info *h, 3357static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
3213 struct pci_dev *pdev, u32 board_id)
3214{ 3358{
3215#ifdef CONFIG_PCI_MSI 3359#ifdef CONFIG_PCI_MSI
3216 int err; 3360 int err;
@@ -3219,13 +3363,12 @@ static void __devinit hpsa_interrupt_mode(struct ctlr_info *h,
3219 }; 3363 };
3220 3364
3221 /* Some boards advertise MSI but don't really support it */ 3365 /* Some boards advertise MSI but don't really support it */
3222 if ((board_id == 0x40700E11) || 3366 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
3223 (board_id == 0x40800E11) || 3367 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
3224 (board_id == 0x40820E11) || (board_id == 0x40830E11))
3225 goto default_int_mode; 3368 goto default_int_mode;
3226 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) { 3369 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
3227 dev_info(&pdev->dev, "MSIX\n"); 3370 dev_info(&h->pdev->dev, "MSIX\n");
3228 err = pci_enable_msix(pdev, hpsa_msix_entries, 4); 3371 err = pci_enable_msix(h->pdev, hpsa_msix_entries, 4);
3229 if (!err) { 3372 if (!err) {
3230 h->intr[0] = hpsa_msix_entries[0].vector; 3373 h->intr[0] = hpsa_msix_entries[0].vector;
3231 h->intr[1] = hpsa_msix_entries[1].vector; 3374 h->intr[1] = hpsa_msix_entries[1].vector;
@@ -3235,144 +3378,158 @@ static void __devinit hpsa_interrupt_mode(struct ctlr_info *h,
3235 return; 3378 return;
3236 } 3379 }
3237 if (err > 0) { 3380 if (err > 0) {
3238 dev_warn(&pdev->dev, "only %d MSI-X vectors " 3381 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
3239 "available\n", err); 3382 "available\n", err);
3240 goto default_int_mode; 3383 goto default_int_mode;
3241 } else { 3384 } else {
3242 dev_warn(&pdev->dev, "MSI-X init failed %d\n", 3385 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
3243 err); 3386 err);
3244 goto default_int_mode; 3387 goto default_int_mode;
3245 } 3388 }
3246 } 3389 }
3247 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) { 3390 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
3248 dev_info(&pdev->dev, "MSI\n"); 3391 dev_info(&h->pdev->dev, "MSI\n");
3249 if (!pci_enable_msi(pdev)) 3392 if (!pci_enable_msi(h->pdev))
3250 h->msi_vector = 1; 3393 h->msi_vector = 1;
3251 else 3394 else
3252 dev_warn(&pdev->dev, "MSI init failed\n"); 3395 dev_warn(&h->pdev->dev, "MSI init failed\n");
3253 } 3396 }
3254default_int_mode: 3397default_int_mode:
3255#endif /* CONFIG_PCI_MSI */ 3398#endif /* CONFIG_PCI_MSI */
3256 /* if we get here we're going to use the default interrupt mode */ 3399 /* if we get here we're going to use the default interrupt mode */
3257 h->intr[PERF_MODE_INT] = pdev->irq; 3400 h->intr[PERF_MODE_INT] = h->pdev->irq;
3258} 3401}
3259 3402
3260static int __devinit hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev) 3403static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
3261{ 3404{
3262 ushort subsystem_vendor_id, subsystem_device_id, command; 3405 int i;
3263 u32 board_id, scratchpad = 0; 3406 u32 subsystem_vendor_id, subsystem_device_id;
3264 u64 cfg_offset;
3265 u32 cfg_base_addr;
3266 u64 cfg_base_addr_index;
3267 u32 trans_offset;
3268 int i, prod_index, err;
3269 3407
3270 subsystem_vendor_id = pdev->subsystem_vendor; 3408 subsystem_vendor_id = pdev->subsystem_vendor;
3271 subsystem_device_id = pdev->subsystem_device; 3409 subsystem_device_id = pdev->subsystem_device;
3272 board_id = (((u32) (subsystem_device_id << 16) & 0xffff0000) | 3410 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
3273 subsystem_vendor_id); 3411 subsystem_vendor_id;
3274 3412
3275 for (i = 0; i < ARRAY_SIZE(products); i++) 3413 for (i = 0; i < ARRAY_SIZE(products); i++)
3276 if (board_id == products[i].board_id) 3414 if (*board_id == products[i].board_id)
3277 break; 3415 return i;
3278 3416
3279 prod_index = i; 3417 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
3280 3418 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
3281 if (prod_index == ARRAY_SIZE(products)) { 3419 !hpsa_allow_any) {
3282 prod_index--; 3420 dev_warn(&pdev->dev, "unrecognized board ID: "
3283 if (subsystem_vendor_id != PCI_VENDOR_ID_HP || 3421 "0x%08x, ignoring.\n", *board_id);
3284 !hpsa_allow_any) {
3285 dev_warn(&pdev->dev, "unrecognized board ID:"
3286 " 0x%08lx, ignoring.\n",
3287 (unsigned long) board_id);
3288 return -ENODEV; 3422 return -ENODEV;
3289 }
3290 }
3291 /* check to see if controller has been disabled
3292 * BEFORE trying to enable it
3293 */
3294 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
3295 if (!(command & 0x02)) {
3296 dev_warn(&pdev->dev, "controller appears to be disabled\n");
3297 return -ENODEV;
3298 }
3299
3300 err = pci_enable_device(pdev);
3301 if (err) {
3302 dev_warn(&pdev->dev, "unable to enable PCI device\n");
3303 return err;
3304 } 3423 }
3424 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
3425}
3305 3426
3306 err = pci_request_regions(pdev, "hpsa"); 3427static inline bool hpsa_board_disabled(struct pci_dev *pdev)
3307 if (err) { 3428{
3308 dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n"); 3429 u16 command;
3309 return err;
3310 }
3311 3430
3312 /* If the kernel supports MSI/MSI-X we will try to enable that, 3431 (void) pci_read_config_word(pdev, PCI_COMMAND, &command);
3313 * else we use the IO-APIC interrupt assigned to us by system ROM. 3432 return ((command & PCI_COMMAND_MEMORY) == 0);
3314 */ 3433}
3315 hpsa_interrupt_mode(h, pdev, board_id);
3316 3434
3317 /* find the memory BAR */ 3435static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
3318 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 3436 unsigned long *memory_bar)
3319 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) 3437{
3320 break; 3438 int i;
3321 }
3322 if (i == DEVICE_COUNT_RESOURCE) {
3323 dev_warn(&pdev->dev, "no memory BAR found\n");
3324 err = -ENODEV;
3325 goto err_out_free_res;
3326 }
3327 3439
3328 h->paddr = pci_resource_start(pdev, i); /* addressing mode bits 3440 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
3329 * already removed 3441 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3330 */ 3442 /* addressing mode bits already removed */
3443 *memory_bar = pci_resource_start(pdev, i);
3444 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
3445 *memory_bar);
3446 return 0;
3447 }
3448 dev_warn(&pdev->dev, "no memory BAR found\n");
3449 return -ENODEV;
3450}
3331 3451
3332 h->vaddr = remap_pci_mem(h->paddr, 0x250); 3452static int __devinit hpsa_wait_for_board_ready(struct ctlr_info *h)
3453{
3454 int i;
3455 u32 scratchpad;
3333 3456
3334 /* Wait for the board to become ready. */
3335 for (i = 0; i < HPSA_BOARD_READY_ITERATIONS; i++) { 3457 for (i = 0; i < HPSA_BOARD_READY_ITERATIONS; i++) {
3336 scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 3458 scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
3337 if (scratchpad == HPSA_FIRMWARE_READY) 3459 if (scratchpad == HPSA_FIRMWARE_READY)
3338 break; 3460 return 0;
3339 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS); 3461 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
3340 } 3462 }
3341 if (scratchpad != HPSA_FIRMWARE_READY) { 3463 dev_warn(&h->pdev->dev, "board not ready, timed out.\n");
3342 dev_warn(&pdev->dev, "board not ready, timed out.\n"); 3464 return -ENODEV;
3343 err = -ENODEV; 3465}
3344 goto err_out_free_res;
3345 }
3346 3466
3347 /* get the address index number */ 3467static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
3348 cfg_base_addr = readl(h->vaddr + SA5_CTCFG_OFFSET); 3468 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
3349 cfg_base_addr &= (u32) 0x0000ffff; 3469 u64 *cfg_offset)
3350 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr); 3470{
3351 if (cfg_base_addr_index == -1) { 3471 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
3472 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
3473 *cfg_base_addr &= (u32) 0x0000ffff;
3474 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
3475 if (*cfg_base_addr_index == -1) {
3352 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n"); 3476 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
3353 err = -ENODEV; 3477 return -ENODEV;
3354 goto err_out_free_res;
3355 } 3478 }
3479 return 0;
3480}
3356 3481
3357 cfg_offset = readl(h->vaddr + SA5_CTMEM_OFFSET); 3482static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
3358 h->cfgtable = remap_pci_mem(pci_resource_start(pdev, 3483{
3359 cfg_base_addr_index) + cfg_offset, 3484 u64 cfg_offset;
3360 sizeof(h->cfgtable)); 3485 u32 cfg_base_addr;
3486 u64 cfg_base_addr_index;
3487 u32 trans_offset;
3488 int rc;
3489
3490 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
3491 &cfg_base_addr_index, &cfg_offset);
3492 if (rc)
3493 return rc;
3494 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
3495 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
3496 if (!h->cfgtable)
3497 return -ENOMEM;
3361 /* Find performant mode table. */ 3498 /* Find performant mode table. */
3362 trans_offset = readl(&(h->cfgtable->TransMethodOffset)); 3499 trans_offset = readl(&h->cfgtable->TransMethodOffset);
3363 h->transtable = remap_pci_mem(pci_resource_start(pdev, 3500 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
3364 cfg_base_addr_index)+cfg_offset+trans_offset, 3501 cfg_base_addr_index)+cfg_offset+trans_offset,
3365 sizeof(*h->transtable)); 3502 sizeof(*h->transtable));
3503 if (!h->transtable)
3504 return -ENOMEM;
3505 return 0;
3506}
3366 3507
3367 h->board_id = board_id; 3508static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
3509{
3368 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); 3510 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
3369 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); 3511 if (h->max_commands < 16) {
3512 dev_warn(&h->pdev->dev, "Controller reports "
3513 "max supported commands of %d, an obvious lie. "
3514 "Using 16. Ensure that firmware is up to date.\n",
3515 h->max_commands);
3516 h->max_commands = 16;
3517 }
3518}
3370 3519
3520/* Interrogate the hardware for some limits:
3521 * max commands, max SG elements without chaining, and with chaining,
3522 * SG chain block size, etc.
3523 */
3524static void __devinit hpsa_find_board_params(struct ctlr_info *h)
3525{
3526 hpsa_get_max_perf_mode_cmds(h);
3527 h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
3528 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
3371 /* 3529 /*
3372 * Limit in-command s/g elements to 32 save dma'able memory. 3530 * Limit in-command s/g elements to 32 save dma'able memory.
3373 * Howvever spec says if 0, use 31 3531 * Howvever spec says if 0, use 31
3374 */ 3532 */
3375
3376 h->max_cmd_sg_entries = 31; 3533 h->max_cmd_sg_entries = 31;
3377 if (h->maxsgentries > 512) { 3534 if (h->maxsgentries > 512) {
3378 h->max_cmd_sg_entries = 32; 3535 h->max_cmd_sg_entries = 32;
@@ -3382,45 +3539,49 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
3382 h->maxsgentries = 31; /* default to traditional values */ 3539 h->maxsgentries = 31; /* default to traditional values */
3383 h->chainsize = 0; 3540 h->chainsize = 0;
3384 } 3541 }
3542}
3385 3543
3386 h->product_name = products[prod_index].product_name; 3544static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
3387 h->access = *(products[prod_index].access); 3545{
3388 /* Allow room for some ioctls */
3389 h->nr_cmds = h->max_commands - 4;
3390
3391 if ((readb(&h->cfgtable->Signature[0]) != 'C') || 3546 if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
3392 (readb(&h->cfgtable->Signature[1]) != 'I') || 3547 (readb(&h->cfgtable->Signature[1]) != 'I') ||
3393 (readb(&h->cfgtable->Signature[2]) != 'S') || 3548 (readb(&h->cfgtable->Signature[2]) != 'S') ||
3394 (readb(&h->cfgtable->Signature[3]) != 'S')) { 3549 (readb(&h->cfgtable->Signature[3]) != 'S')) {
3395 dev_warn(&pdev->dev, "not a valid CISS config table\n"); 3550 dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
3396 err = -ENODEV; 3551 return false;
3397 goto err_out_free_res;
3398 } 3552 }
3553 return true;
3554}
3555
3556/* Need to enable prefetch in the SCSI core for 6400 in x86 */
3557static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h)
3558{
3399#ifdef CONFIG_X86 3559#ifdef CONFIG_X86
3400 { 3560 u32 prefetch;
3401 /* Need to enable prefetch in the SCSI core for 6400 in x86 */ 3561
3402 u32 prefetch; 3562 prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
3403 prefetch = readl(&(h->cfgtable->SCSI_Prefetch)); 3563 prefetch |= 0x100;
3404 prefetch |= 0x100; 3564 writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
3405 writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
3406 }
3407#endif 3565#endif
3566}
3408 3567
3409 /* Disabling DMA prefetch for the P600 3568/* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
3410 * An ASIC bug may result in a prefetch beyond 3569 * in a prefetch beyond physical memory.
3411 * physical memory. 3570 */
3412 */ 3571static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
3413 if (board_id == 0x3225103C) { 3572{
3414 u32 dma_prefetch; 3573 u32 dma_prefetch;
3415 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
3416 dma_prefetch |= 0x8000;
3417 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
3418 }
3419 3574
3420 h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); 3575 if (h->board_id != 0x3225103C)
3421 /* Update the field, and then ring the doorbell */ 3576 return;
3422 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); 3577 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
3423 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 3578 dma_prefetch |= 0x8000;
3579 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
3580}
3581
3582static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
3583{
3584 int i;
3424 3585
3425 /* under certain very rare conditions, this can take awhile. 3586 /* under certain very rare conditions, this can take awhile.
3426 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right 3587 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
@@ -3432,24 +3593,96 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
3432 /* delay and try again */ 3593 /* delay and try again */
3433 msleep(10); 3594 msleep(10);
3434 } 3595 }
3596}
3435 3597
3436#ifdef HPSA_DEBUG 3598static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
3437 print_cfg_table(&pdev->dev, h->cfgtable); 3599{
3438#endif /* HPSA_DEBUG */ 3600 u32 trans_support;
3439 3601
3602 trans_support = readl(&(h->cfgtable->TransportSupport));
3603 if (!(trans_support & SIMPLE_MODE))
3604 return -ENOTSUPP;
3605
3606 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
3607 /* Update the field, and then ring the doorbell */
3608 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
3609 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
3610 hpsa_wait_for_mode_change_ack(h);
3611 print_cfg_table(&h->pdev->dev, h->cfgtable);
3440 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) { 3612 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3441 dev_warn(&pdev->dev, "unable to get board into simple mode\n"); 3613 dev_warn(&h->pdev->dev,
3614 "unable to get board into simple mode\n");
3615 return -ENODEV;
3616 }
3617 return 0;
3618}
3619
3620static int __devinit hpsa_pci_init(struct ctlr_info *h)
3621{
3622 int prod_index, err;
3623
3624 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
3625 if (prod_index < 0)
3626 return -ENODEV;
3627 h->product_name = products[prod_index].product_name;
3628 h->access = *(products[prod_index].access);
3629
3630 if (hpsa_board_disabled(h->pdev)) {
3631 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
3632 return -ENODEV;
3633 }
3634 err = pci_enable_device(h->pdev);
3635 if (err) {
3636 dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
3637 return err;
3638 }
3639
3640 err = pci_request_regions(h->pdev, "hpsa");
3641 if (err) {
3642 dev_err(&h->pdev->dev,
3643 "cannot obtain PCI resources, aborting\n");
3644 return err;
3645 }
3646 hpsa_interrupt_mode(h);
3647 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
3648 if (err)
3649 goto err_out_free_res;
3650 h->vaddr = remap_pci_mem(h->paddr, 0x250);
3651 if (!h->vaddr) {
3652 err = -ENOMEM;
3653 goto err_out_free_res;
3654 }
3655 err = hpsa_wait_for_board_ready(h);
3656 if (err)
3657 goto err_out_free_res;
3658 err = hpsa_find_cfgtables(h);
3659 if (err)
3660 goto err_out_free_res;
3661 hpsa_find_board_params(h);
3662
3663 if (!hpsa_CISS_signature_present(h)) {
3442 err = -ENODEV; 3664 err = -ENODEV;
3443 goto err_out_free_res; 3665 goto err_out_free_res;
3444 } 3666 }
3667 hpsa_enable_scsi_prefetch(h);
3668 hpsa_p600_dma_prefetch_quirk(h);
3669 err = hpsa_enter_simple_mode(h);
3670 if (err)
3671 goto err_out_free_res;
3445 return 0; 3672 return 0;
3446 3673
3447err_out_free_res: 3674err_out_free_res:
3675 if (h->transtable)
3676 iounmap(h->transtable);
3677 if (h->cfgtable)
3678 iounmap(h->cfgtable);
3679 if (h->vaddr)
3680 iounmap(h->vaddr);
3448 /* 3681 /*
3449 * Deliberately omit pci_disable_device(): it does something nasty to 3682 * Deliberately omit pci_disable_device(): it does something nasty to
3450 * Smart Array controllers that pci_enable_device does not undo 3683 * Smart Array controllers that pci_enable_device does not undo
3451 */ 3684 */
3452 pci_release_regions(pdev); 3685 pci_release_regions(h->pdev);
3453 return err; 3686 return err;
3454} 3687}
3455 3688
@@ -3469,33 +3702,51 @@ static void __devinit hpsa_hba_inquiry(struct ctlr_info *h)
3469 } 3702 }
3470} 3703}
3471 3704
3705static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
3706{
3707 int rc, i;
3708
3709 if (!reset_devices)
3710 return 0;
3711
3712 /* Reset the controller with a PCI power-cycle or via doorbell */
3713 rc = hpsa_kdump_hard_reset_controller(pdev);
3714
3715 /* -ENOTSUPP here means we cannot reset the controller
3716 * but it's already (and still) up and running in
3717 * "performant mode". Or, it might be 640x, which can't reset
3718 * due to concerns about shared bbwc between 6402/6404 pair.
3719 */
3720 if (rc == -ENOTSUPP)
3721 return 0; /* just try to do the kdump anyhow. */
3722 if (rc)
3723 return -ENODEV;
3724 if (hpsa_reset_msi(pdev))
3725 return -ENODEV;
3726
3727 /* Now try to get the controller to respond to a no-op */
3728 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
3729 if (hpsa_noop(pdev) == 0)
3730 break;
3731 else
3732 dev_warn(&pdev->dev, "no-op failed%s\n",
3733 (i < 11 ? "; re-trying" : ""));
3734 }
3735 return 0;
3736}
3737
3472static int __devinit hpsa_init_one(struct pci_dev *pdev, 3738static int __devinit hpsa_init_one(struct pci_dev *pdev,
3473 const struct pci_device_id *ent) 3739 const struct pci_device_id *ent)
3474{ 3740{
3475 int i, rc; 3741 int dac, rc;
3476 int dac;
3477 struct ctlr_info *h; 3742 struct ctlr_info *h;
3478 3743
3479 if (number_of_controllers == 0) 3744 if (number_of_controllers == 0)
3480 printk(KERN_INFO DRIVER_NAME "\n"); 3745 printk(KERN_INFO DRIVER_NAME "\n");
3481 if (reset_devices) {
3482 /* Reset the controller with a PCI power-cycle */
3483 if (hpsa_hard_reset_controller(pdev) || hpsa_reset_msi(pdev))
3484 return -ENODEV;
3485 3746
3486 /* Some devices (notably the HP Smart Array 5i Controller) 3747 rc = hpsa_init_reset_devices(pdev);
3487 need a little pause here */ 3748 if (rc)
3488 msleep(HPSA_POST_RESET_PAUSE_MSECS); 3749 return rc;
3489
3490 /* Now try to get the controller to respond to a no-op */
3491 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
3492 if (hpsa_noop(pdev) == 0)
3493 break;
3494 else
3495 dev_warn(&pdev->dev, "no-op failed%s\n",
3496 (i < 11 ? "; re-trying" : ""));
3497 }
3498 }
3499 3750
3500 /* Command structures must be aligned on a 32-byte boundary because 3751 /* Command structures must be aligned on a 32-byte boundary because
3501 * the 5 lower bits of the address are used by the hardware. and by 3752 * the 5 lower bits of the address are used by the hardware. and by
@@ -3507,17 +3758,17 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
3507 if (!h) 3758 if (!h)
3508 return -ENOMEM; 3759 return -ENOMEM;
3509 3760
3761 h->pdev = pdev;
3510 h->busy_initializing = 1; 3762 h->busy_initializing = 1;
3511 INIT_HLIST_HEAD(&h->cmpQ); 3763 INIT_HLIST_HEAD(&h->cmpQ);
3512 INIT_HLIST_HEAD(&h->reqQ); 3764 INIT_HLIST_HEAD(&h->reqQ);
3513 rc = hpsa_pci_init(h, pdev); 3765 rc = hpsa_pci_init(h);
3514 if (rc != 0) 3766 if (rc != 0)
3515 goto clean1; 3767 goto clean1;
3516 3768
3517 sprintf(h->devname, "hpsa%d", number_of_controllers); 3769 sprintf(h->devname, "hpsa%d", number_of_controllers);
3518 h->ctlr = number_of_controllers; 3770 h->ctlr = number_of_controllers;
3519 number_of_controllers++; 3771 number_of_controllers++;
3520 h->pdev = pdev;
3521 3772
3522 /* configure PCI DMA stuff */ 3773 /* configure PCI DMA stuff */
3523 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 3774 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
@@ -3535,8 +3786,13 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
3535 3786
3536 /* make sure the board interrupts are off */ 3787 /* make sure the board interrupts are off */
3537 h->access.set_intr_mask(h, HPSA_INTR_OFF); 3788 h->access.set_intr_mask(h, HPSA_INTR_OFF);
3538 rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr, 3789
3539 IRQF_DISABLED, h->devname, h); 3790 if (h->msix_vector || h->msi_vector)
3791 rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_msi,
3792 IRQF_DISABLED, h->devname, h);
3793 else
3794 rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_intx,
3795 IRQF_DISABLED, h->devname, h);
3540 if (rc) { 3796 if (rc) {
3541 dev_err(&pdev->dev, "unable to get irq %d for %s\n", 3797 dev_err(&pdev->dev, "unable to get irq %d for %s\n",
3542 h->intr[PERF_MODE_INT], h->devname); 3798 h->intr[PERF_MODE_INT], h->devname);
@@ -3663,6 +3919,8 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev)
3663 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ 3919 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
3664 hpsa_shutdown(pdev); 3920 hpsa_shutdown(pdev);
3665 iounmap(h->vaddr); 3921 iounmap(h->vaddr);
3922 iounmap(h->transtable);
3923 iounmap(h->cfgtable);
3666 hpsa_free_sg_chain_blocks(h); 3924 hpsa_free_sg_chain_blocks(h);
3667 pci_free_consistent(h->pdev, 3925 pci_free_consistent(h->pdev,
3668 h->nr_cmds * sizeof(struct CommandList), 3926 h->nr_cmds * sizeof(struct CommandList),
@@ -3742,38 +4000,35 @@ static void calc_bucket_map(int bucket[], int num_buckets,
3742 } 4000 }
3743} 4001}
3744 4002
3745static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) 4003static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h)
3746{ 4004{
3747 u32 trans_support; 4005 int i;
3748 u64 trans_offset; 4006 unsigned long register_value;
4007
4008 /* This is a bit complicated. There are 8 registers on
4009 * the controller which we write to to tell it 8 different
4010 * sizes of commands which there may be. It's a way of
4011 * reducing the DMA done to fetch each command. Encoded into
4012 * each command's tag are 3 bits which communicate to the controller
4013 * which of the eight sizes that command fits within. The size of
4014 * each command depends on how many scatter gather entries there are.
4015 * Each SG entry requires 16 bytes. The eight registers are programmed
4016 * with the number of 16-byte blocks a command of that size requires.
4017 * The smallest command possible requires 5 such 16 byte blocks.
4018 * the largest command possible requires MAXSGENTRIES + 4 16-byte
4019 * blocks. Note, this only extends to the SG entries contained
4020 * within the command block, and does not extend to chained blocks
4021 * of SG elements. bft[] contains the eight values we write to
4022 * the registers. They are not evenly distributed, but have more
4023 * sizes for small commands, and fewer sizes for larger commands.
4024 */
4025 int bft[8] = {5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4};
4026 BUILD_BUG_ON(28 > MAXSGENTRIES + 4);
3749 /* 5 = 1 s/g entry or 4k 4027 /* 5 = 1 s/g entry or 4k
3750 * 6 = 2 s/g entry or 8k 4028 * 6 = 2 s/g entry or 8k
3751 * 8 = 4 s/g entry or 16k 4029 * 8 = 4 s/g entry or 16k
3752 * 10 = 6 s/g entry or 24k 4030 * 10 = 6 s/g entry or 24k
3753 */ 4031 */
3754 int bft[8] = {5, 6, 8, 10, 12, 20, 28, 35}; /* for scatter/gathers */
3755 int i = 0;
3756 int l = 0;
3757 unsigned long register_value;
3758
3759 trans_support = readl(&(h->cfgtable->TransportSupport));
3760 if (!(trans_support & PERFORMANT_MODE))
3761 return;
3762
3763 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
3764 h->max_sg_entries = 32;
3765 /* Performant mode ring buffer and supporting data structures */
3766 h->reply_pool_size = h->max_commands * sizeof(u64);
3767 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
3768 &(h->reply_pool_dhandle));
3769
3770 /* Need a block fetch table for performant mode */
3771 h->blockFetchTable = kmalloc(((h->max_sg_entries+1) *
3772 sizeof(u32)), GFP_KERNEL);
3773
3774 if ((h->reply_pool == NULL)
3775 || (h->blockFetchTable == NULL))
3776 goto clean_up;
3777 4032
3778 h->reply_pool_wraparound = 1; /* spec: init to 1 */ 4033 h->reply_pool_wraparound = 1; /* spec: init to 1 */
3779 4034
@@ -3781,7 +4036,6 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
3781 memset(h->reply_pool, 0, h->reply_pool_size); 4036 memset(h->reply_pool, 0, h->reply_pool_size);
3782 h->reply_pool_head = h->reply_pool; 4037 h->reply_pool_head = h->reply_pool;
3783 4038
3784 trans_offset = readl(&(h->cfgtable->TransMethodOffset));
3785 bft[7] = h->max_sg_entries + 4; 4039 bft[7] = h->max_sg_entries + 4;
3786 calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable); 4040 calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable);
3787 for (i = 0; i < 8; i++) 4041 for (i = 0; i < 8; i++)
@@ -3797,23 +4051,39 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
3797 writel(CFGTBL_Trans_Performant, 4051 writel(CFGTBL_Trans_Performant,
3798 &(h->cfgtable->HostWrite.TransportRequest)); 4052 &(h->cfgtable->HostWrite.TransportRequest));
3799 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 4053 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
3800 /* under certain very rare conditions, this can take awhile. 4054 hpsa_wait_for_mode_change_ack(h);
3801 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3802 * as we enter this code.) */
3803 for (l = 0; l < MAX_CONFIG_WAIT; l++) {
3804 register_value = readl(h->vaddr + SA5_DOORBELL);
3805 if (!(register_value & CFGTBL_ChangeReq))
3806 break;
3807 /* delay and try again */
3808 set_current_state(TASK_INTERRUPTIBLE);
3809 schedule_timeout(10);
3810 }
3811 register_value = readl(&(h->cfgtable->TransportActive)); 4055 register_value = readl(&(h->cfgtable->TransportActive));
3812 if (!(register_value & CFGTBL_Trans_Performant)) { 4056 if (!(register_value & CFGTBL_Trans_Performant)) {
3813 dev_warn(&h->pdev->dev, "unable to get board into" 4057 dev_warn(&h->pdev->dev, "unable to get board into"
3814 " performant mode\n"); 4058 " performant mode\n");
3815 return; 4059 return;
3816 } 4060 }
4061}
4062
4063static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
4064{
4065 u32 trans_support;
4066
4067 trans_support = readl(&(h->cfgtable->TransportSupport));
4068 if (!(trans_support & PERFORMANT_MODE))
4069 return;
4070
4071 hpsa_get_max_perf_mode_cmds(h);
4072 h->max_sg_entries = 32;
4073 /* Performant mode ring buffer and supporting data structures */
4074 h->reply_pool_size = h->max_commands * sizeof(u64);
4075 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
4076 &(h->reply_pool_dhandle));
4077
4078 /* Need a block fetch table for performant mode */
4079 h->blockFetchTable = kmalloc(((h->max_sg_entries+1) *
4080 sizeof(u32)), GFP_KERNEL);
4081
4082 if ((h->reply_pool == NULL)
4083 || (h->blockFetchTable == NULL))
4084 goto clean_up;
4085
4086 hpsa_enter_performant_mode(h);
3817 4087
3818 /* Change the access methods to the performant access methods */ 4088 /* Change the access methods to the performant access methods */
3819 h->access = SA5_performant_access; 4089 h->access = SA5_performant_access;
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 1bb5233b09a0..a203ef65cb50 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -53,7 +53,6 @@ struct ctlr_info {
53 int ctlr; 53 int ctlr;
54 char devname[8]; 54 char devname[8];
55 char *product_name; 55 char *product_name;
56 char firm_ver[4]; /* Firmware version */
57 struct pci_dev *pdev; 56 struct pci_dev *pdev;
58 u32 board_id; 57 u32 board_id;
59 void __iomem *vaddr; 58 void __iomem *vaddr;
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 78de9b6d1e0b..f5c4c3cc0530 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -100,6 +100,7 @@
100/* Configuration Table */ 100/* Configuration Table */
101#define CFGTBL_ChangeReq 0x00000001l 101#define CFGTBL_ChangeReq 0x00000001l
102#define CFGTBL_AccCmds 0x00000001l 102#define CFGTBL_AccCmds 0x00000001l
103#define DOORBELL_CTLR_RESET 0x00000004l
103 104
104#define CFGTBL_Trans_Simple 0x00000002l 105#define CFGTBL_Trans_Simple 0x00000002l
105#define CFGTBL_Trans_Performant 0x00000004l 106#define CFGTBL_Trans_Performant 0x00000004l
@@ -339,6 +340,9 @@ struct CfgTable {
339 u32 MaxPhysicalDevices; 340 u32 MaxPhysicalDevices;
340 u32 MaxPhysicalDrivesPerLogicalUnit; 341 u32 MaxPhysicalDrivesPerLogicalUnit;
341 u32 MaxPerformantModeCommands; 342 u32 MaxPerformantModeCommands;
343 u8 reserved[0x78 - 0x58];
344 u32 misc_fw_support; /* offset 0x78 */
345#define MISC_FW_DOORBELL_RESET (0x02)
342}; 346};
343 347
344#define NUM_BLOCKFETCH_ENTRIES 8 348#define NUM_BLOCKFETCH_ENTRIES 8
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 645f7cdf21ab..0729f150b33a 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1157,7 +1157,7 @@ free_pci_regions:
1157disable_pci_device: 1157disable_pci_device:
1158 pci_disable_device(pcidev); 1158 pci_disable_device(pcidev);
1159 1159
1160 dprintk("scsi%d: hptiop_probe fail\n", host->host_no); 1160 dprintk("scsi%d: hptiop_probe fail\n", host ? host->host_no : 0);
1161 return -ENODEV; 1161 return -ENODEV;
1162} 1162}
1163 1163
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index fef49521cbc3..bd96cecaa619 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -504,12 +504,23 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
504 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS) 504 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
505 vhost->action = action; 505 vhost->action = action;
506 break; 506 break;
507 case IBMVFC_HOST_ACTION_LOGO:
508 case IBMVFC_HOST_ACTION_INIT: 507 case IBMVFC_HOST_ACTION_INIT:
509 case IBMVFC_HOST_ACTION_TGT_DEL: 508 case IBMVFC_HOST_ACTION_TGT_DEL:
509 switch (vhost->action) {
510 case IBMVFC_HOST_ACTION_RESET:
511 case IBMVFC_HOST_ACTION_REENABLE:
512 break;
513 default:
514 vhost->action = action;
515 break;
516 };
517 break;
518 case IBMVFC_HOST_ACTION_LOGO:
510 case IBMVFC_HOST_ACTION_QUERY_TGTS: 519 case IBMVFC_HOST_ACTION_QUERY_TGTS:
511 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: 520 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
512 case IBMVFC_HOST_ACTION_NONE: 521 case IBMVFC_HOST_ACTION_NONE:
522 case IBMVFC_HOST_ACTION_RESET:
523 case IBMVFC_HOST_ACTION_REENABLE:
513 default: 524 default:
514 vhost->action = action; 525 vhost->action = action;
515 break; 526 break;
@@ -641,7 +652,7 @@ static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
641 **/ 652 **/
642static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost) 653static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
643{ 654{
644 long rc; 655 long rc = 0;
645 struct vio_dev *vdev = to_vio_dev(vhost->dev); 656 struct vio_dev *vdev = to_vio_dev(vhost->dev);
646 struct ibmvfc_crq_queue *crq = &vhost->crq; 657 struct ibmvfc_crq_queue *crq = &vhost->crq;
647 658
@@ -649,6 +660,8 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
649 free_irq(vdev->irq, vhost); 660 free_irq(vdev->irq, vhost);
650 tasklet_kill(&vhost->tasklet); 661 tasklet_kill(&vhost->tasklet);
651 do { 662 do {
663 if (rc)
664 msleep(100);
652 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 665 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
653 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 666 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
654 667
@@ -667,11 +680,13 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
667 **/ 680 **/
668static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost) 681static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
669{ 682{
670 int rc; 683 int rc = 0;
671 struct vio_dev *vdev = to_vio_dev(vhost->dev); 684 struct vio_dev *vdev = to_vio_dev(vhost->dev);
672 685
673 /* Re-enable the CRQ */ 686 /* Re-enable the CRQ */
674 do { 687 do {
688 if (rc)
689 msleep(100);
675 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); 690 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
676 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); 691 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
677 692
@@ -690,15 +705,19 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
690 **/ 705 **/
691static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) 706static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
692{ 707{
693 int rc; 708 int rc = 0;
709 unsigned long flags;
694 struct vio_dev *vdev = to_vio_dev(vhost->dev); 710 struct vio_dev *vdev = to_vio_dev(vhost->dev);
695 struct ibmvfc_crq_queue *crq = &vhost->crq; 711 struct ibmvfc_crq_queue *crq = &vhost->crq;
696 712
697 /* Close the CRQ */ 713 /* Close the CRQ */
698 do { 714 do {
715 if (rc)
716 msleep(100);
699 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 717 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
700 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 718 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
701 719
720 spin_lock_irqsave(vhost->host->host_lock, flags);
702 vhost->state = IBMVFC_NO_CRQ; 721 vhost->state = IBMVFC_NO_CRQ;
703 vhost->logged_in = 0; 722 vhost->logged_in = 0;
704 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); 723 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
@@ -716,6 +735,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
716 dev_warn(vhost->dev, "Partner adapter not ready\n"); 735 dev_warn(vhost->dev, "Partner adapter not ready\n");
717 else if (rc != 0) 736 else if (rc != 0)
718 dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc); 737 dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
738 spin_unlock_irqrestore(vhost->host->host_lock, flags);
719 739
720 return rc; 740 return rc;
721} 741}
@@ -821,17 +841,9 @@ static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
821 **/ 841 **/
822static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost) 842static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
823{ 843{
824 int rc;
825
826 scsi_block_requests(vhost->host);
827 ibmvfc_purge_requests(vhost, DID_ERROR); 844 ibmvfc_purge_requests(vhost, DID_ERROR);
828 if ((rc = ibmvfc_reset_crq(vhost)) || 845 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
829 (rc = ibmvfc_send_crq_init(vhost)) || 846 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
830 (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
831 dev_err(vhost->dev, "Error after reset rc=%d\n", rc);
832 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
833 } else
834 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
835} 847}
836 848
837/** 849/**
@@ -2299,6 +2311,7 @@ static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
2299 int rc = FAILED; 2311 int rc = FAILED;
2300 2312
2301 ENTER; 2313 ENTER;
2314 fc_block_scsi_eh(cmd);
2302 ibmvfc_wait_while_resetting(vhost); 2315 ibmvfc_wait_while_resetting(vhost);
2303 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); 2316 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
2304 abort_rc = ibmvfc_abort_task_set(sdev); 2317 abort_rc = ibmvfc_abort_task_set(sdev);
@@ -2325,6 +2338,7 @@ static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
2325 int rc = FAILED; 2338 int rc = FAILED;
2326 2339
2327 ENTER; 2340 ENTER;
2341 fc_block_scsi_eh(cmd);
2328 ibmvfc_wait_while_resetting(vhost); 2342 ibmvfc_wait_while_resetting(vhost);
2329 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET); 2343 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
2330 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN"); 2344 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
@@ -2389,6 +2403,7 @@ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
2389 unsigned long cancel_rc = 0; 2403 unsigned long cancel_rc = 0;
2390 2404
2391 ENTER; 2405 ENTER;
2406 fc_block_scsi_eh(cmd);
2392 ibmvfc_wait_while_resetting(vhost); 2407 ibmvfc_wait_while_resetting(vhost);
2393 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset); 2408 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
2394 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target"); 2409 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
@@ -2410,6 +2425,7 @@ static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
2410 int rc; 2425 int rc;
2411 struct ibmvfc_host *vhost = shost_priv(cmd->device->host); 2426 struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
2412 2427
2428 fc_block_scsi_eh(cmd);
2413 dev_err(vhost->dev, "Resetting connection due to error recovery\n"); 2429 dev_err(vhost->dev, "Resetting connection due to error recovery\n");
2414 rc = ibmvfc_issue_fc_host_lip(vhost->host); 2430 rc = ibmvfc_issue_fc_host_lip(vhost->host);
2415 return rc ? FAILED : SUCCESS; 2431 return rc ? FAILED : SUCCESS;
@@ -2606,22 +2622,13 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
2606 dev_info(vhost->dev, "Re-enabling adapter\n"); 2622 dev_info(vhost->dev, "Re-enabling adapter\n");
2607 vhost->client_migrated = 1; 2623 vhost->client_migrated = 1;
2608 ibmvfc_purge_requests(vhost, DID_REQUEUE); 2624 ibmvfc_purge_requests(vhost, DID_REQUEUE);
2609 if ((rc = ibmvfc_reenable_crq_queue(vhost)) || 2625 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2610 (rc = ibmvfc_send_crq_init(vhost))) { 2626 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
2611 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2612 dev_err(vhost->dev, "Error after enable (rc=%ld)\n", rc);
2613 } else
2614 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2615 } else { 2627 } else {
2616 dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format); 2628 dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format);
2617
2618 ibmvfc_purge_requests(vhost, DID_ERROR); 2629 ibmvfc_purge_requests(vhost, DID_ERROR);
2619 if ((rc = ibmvfc_reset_crq(vhost)) || 2630 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2620 (rc = ibmvfc_send_crq_init(vhost))) { 2631 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
2621 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2622 dev_err(vhost->dev, "Error after reset (rc=%ld)\n", rc);
2623 } else
2624 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2625 } 2632 }
2626 return; 2633 return;
2627 case IBMVFC_CRQ_CMD_RSP: 2634 case IBMVFC_CRQ_CMD_RSP:
@@ -4123,6 +4130,8 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
4123 case IBMVFC_HOST_ACTION_TGT_DEL: 4130 case IBMVFC_HOST_ACTION_TGT_DEL:
4124 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: 4131 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
4125 case IBMVFC_HOST_ACTION_QUERY: 4132 case IBMVFC_HOST_ACTION_QUERY:
4133 case IBMVFC_HOST_ACTION_RESET:
4134 case IBMVFC_HOST_ACTION_REENABLE:
4126 default: 4135 default:
4127 break; 4136 break;
4128 }; 4137 };
@@ -4220,6 +4229,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
4220 struct ibmvfc_target *tgt; 4229 struct ibmvfc_target *tgt;
4221 unsigned long flags; 4230 unsigned long flags;
4222 struct fc_rport *rport; 4231 struct fc_rport *rport;
4232 int rc;
4223 4233
4224 ibmvfc_log_ae(vhost, vhost->events_to_log); 4234 ibmvfc_log_ae(vhost, vhost->events_to_log);
4225 spin_lock_irqsave(vhost->host->host_lock, flags); 4235 spin_lock_irqsave(vhost->host->host_lock, flags);
@@ -4229,6 +4239,27 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
4229 case IBMVFC_HOST_ACTION_LOGO_WAIT: 4239 case IBMVFC_HOST_ACTION_LOGO_WAIT:
4230 case IBMVFC_HOST_ACTION_INIT_WAIT: 4240 case IBMVFC_HOST_ACTION_INIT_WAIT:
4231 break; 4241 break;
4242 case IBMVFC_HOST_ACTION_RESET:
4243 vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
4244 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4245 rc = ibmvfc_reset_crq(vhost);
4246 spin_lock_irqsave(vhost->host->host_lock, flags);
4247 if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
4248 (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
4249 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4250 dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
4251 }
4252 break;
4253 case IBMVFC_HOST_ACTION_REENABLE:
4254 vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
4255 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4256 rc = ibmvfc_reenable_crq_queue(vhost);
4257 spin_lock_irqsave(vhost->host->host_lock, flags);
4258 if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
4259 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4260 dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
4261 }
4262 break;
4232 case IBMVFC_HOST_ACTION_LOGO: 4263 case IBMVFC_HOST_ACTION_LOGO:
4233 vhost->job_step(vhost); 4264 vhost->job_step(vhost);
4234 break; 4265 break;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 7e9742764e4b..d7e8dcd90650 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -29,8 +29,8 @@
29#include "viosrp.h" 29#include "viosrp.h"
30 30
31#define IBMVFC_NAME "ibmvfc" 31#define IBMVFC_NAME "ibmvfc"
32#define IBMVFC_DRIVER_VERSION "1.0.7" 32#define IBMVFC_DRIVER_VERSION "1.0.8"
33#define IBMVFC_DRIVER_DATE "(October 16, 2009)" 33#define IBMVFC_DRIVER_DATE "(June 17, 2010)"
34 34
35#define IBMVFC_DEFAULT_TIMEOUT 60 35#define IBMVFC_DEFAULT_TIMEOUT 60
36#define IBMVFC_ADISC_CANCEL_TIMEOUT 45 36#define IBMVFC_ADISC_CANCEL_TIMEOUT 45
@@ -649,6 +649,8 @@ struct ibmvfc_event_pool {
649 649
650enum ibmvfc_host_action { 650enum ibmvfc_host_action {
651 IBMVFC_HOST_ACTION_NONE = 0, 651 IBMVFC_HOST_ACTION_NONE = 0,
652 IBMVFC_HOST_ACTION_RESET,
653 IBMVFC_HOST_ACTION_REENABLE,
652 IBMVFC_HOST_ACTION_LOGO, 654 IBMVFC_HOST_ACTION_LOGO,
653 IBMVFC_HOST_ACTION_LOGO_WAIT, 655 IBMVFC_HOST_ACTION_LOGO_WAIT,
654 IBMVFC_HOST_ACTION_INIT, 656 IBMVFC_HOST_ACTION_INIT,
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index aad35cc41e49..67f78a470f5f 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -73,6 +73,7 @@
73#include <linux/slab.h> 73#include <linux/slab.h>
74#include <linux/of.h> 74#include <linux/of.h>
75#include <linux/pm.h> 75#include <linux/pm.h>
76#include <linux/kthread.h>
76#include <asm/firmware.h> 77#include <asm/firmware.h>
77#include <asm/vio.h> 78#include <asm/vio.h>
78#include <scsi/scsi.h> 79#include <scsi/scsi.h>
@@ -101,7 +102,7 @@ static int client_reserve = 1;
101 102
102static struct scsi_transport_template *ibmvscsi_transport_template; 103static struct scsi_transport_template *ibmvscsi_transport_template;
103 104
104#define IBMVSCSI_VERSION "1.5.8" 105#define IBMVSCSI_VERSION "1.5.9"
105 106
106static struct ibmvscsi_ops *ibmvscsi_ops; 107static struct ibmvscsi_ops *ibmvscsi_ops;
107 108
@@ -473,23 +474,26 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
473 */ 474 */
474static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code) 475static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
475{ 476{
476 struct srp_event_struct *tmp_evt, *pos; 477 struct srp_event_struct *evt;
477 unsigned long flags; 478 unsigned long flags;
478 479
479 spin_lock_irqsave(hostdata->host->host_lock, flags); 480 spin_lock_irqsave(hostdata->host->host_lock, flags);
480 list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) { 481 while (!list_empty(&hostdata->sent)) {
481 list_del(&tmp_evt->list); 482 evt = list_first_entry(&hostdata->sent, struct srp_event_struct, list);
482 del_timer(&tmp_evt->timer); 483 list_del(&evt->list);
483 if (tmp_evt->cmnd) { 484 del_timer(&evt->timer);
484 tmp_evt->cmnd->result = (error_code << 16); 485
485 unmap_cmd_data(&tmp_evt->iu.srp.cmd, 486 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
486 tmp_evt, 487 if (evt->cmnd) {
487 tmp_evt->hostdata->dev); 488 evt->cmnd->result = (error_code << 16);
488 if (tmp_evt->cmnd_done) 489 unmap_cmd_data(&evt->iu.srp.cmd, evt,
489 tmp_evt->cmnd_done(tmp_evt->cmnd); 490 evt->hostdata->dev);
490 } else if (tmp_evt->done) 491 if (evt->cmnd_done)
491 tmp_evt->done(tmp_evt); 492 evt->cmnd_done(evt->cmnd);
492 free_event_struct(&tmp_evt->hostdata->pool, tmp_evt); 493 } else if (evt->done)
494 evt->done(evt);
495 free_event_struct(&evt->hostdata->pool, evt);
496 spin_lock_irqsave(hostdata->host->host_lock, flags);
493 } 497 }
494 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 498 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
495} 499}
@@ -504,14 +508,8 @@ static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
504 atomic_set(&hostdata->request_limit, 0); 508 atomic_set(&hostdata->request_limit, 0);
505 509
506 purge_requests(hostdata, DID_ERROR); 510 purge_requests(hostdata, DID_ERROR);
507 if ((ibmvscsi_ops->reset_crq_queue(&hostdata->queue, hostdata)) || 511 hostdata->reset_crq = 1;
508 (ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0)) || 512 wake_up(&hostdata->work_wait_q);
509 (vio_enable_interrupts(to_vio_dev(hostdata->dev)))) {
510 atomic_set(&hostdata->request_limit, -1);
511 dev_err(hostdata->dev, "error after reset\n");
512 }
513
514 scsi_unblock_requests(hostdata->host);
515} 513}
516 514
517/** 515/**
@@ -550,6 +548,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
550 u64 *crq_as_u64 = (u64 *) &evt_struct->crq; 548 u64 *crq_as_u64 = (u64 *) &evt_struct->crq;
551 int request_status = 0; 549 int request_status = 0;
552 int rc; 550 int rc;
551 int srp_req = 0;
553 552
554 /* If we have exhausted our request limit, just fail this request, 553 /* If we have exhausted our request limit, just fail this request,
555 * unless it is for a reset or abort. 554 * unless it is for a reset or abort.
@@ -558,6 +557,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
558 * can handle more requests (can_queue) when we actually can't 557 * can handle more requests (can_queue) when we actually can't
559 */ 558 */
560 if (evt_struct->crq.format == VIOSRP_SRP_FORMAT) { 559 if (evt_struct->crq.format == VIOSRP_SRP_FORMAT) {
560 srp_req = 1;
561 request_status = 561 request_status =
562 atomic_dec_if_positive(&hostdata->request_limit); 562 atomic_dec_if_positive(&hostdata->request_limit);
563 /* If request limit was -1 when we started, it is now even 563 /* If request limit was -1 when we started, it is now even
@@ -632,7 +632,8 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
632 goto send_busy; 632 goto send_busy;
633 } 633 }
634 dev_err(hostdata->dev, "send error %d\n", rc); 634 dev_err(hostdata->dev, "send error %d\n", rc);
635 atomic_inc(&hostdata->request_limit); 635 if (srp_req)
636 atomic_inc(&hostdata->request_limit);
636 goto send_error; 637 goto send_error;
637 } 638 }
638 639
@@ -642,7 +643,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
642 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev); 643 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
643 644
644 free_event_struct(&hostdata->pool, evt_struct); 645 free_event_struct(&hostdata->pool, evt_struct);
645 if (request_status != -1) 646 if (srp_req && request_status != -1)
646 atomic_inc(&hostdata->request_limit); 647 atomic_inc(&hostdata->request_limit);
647 return SCSI_MLQUEUE_HOST_BUSY; 648 return SCSI_MLQUEUE_HOST_BUSY;
648 649
@@ -1462,30 +1463,14 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1462 /* We need to re-setup the interpartition connection */ 1463 /* We need to re-setup the interpartition connection */
1463 dev_info(hostdata->dev, "Re-enabling adapter!\n"); 1464 dev_info(hostdata->dev, "Re-enabling adapter!\n");
1464 hostdata->client_migrated = 1; 1465 hostdata->client_migrated = 1;
1466 hostdata->reenable_crq = 1;
1465 purge_requests(hostdata, DID_REQUEUE); 1467 purge_requests(hostdata, DID_REQUEUE);
1466 if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue, 1468 wake_up(&hostdata->work_wait_q);
1467 hostdata)) ||
1468 (ibmvscsi_ops->send_crq(hostdata,
1469 0xC001000000000000LL, 0))) {
1470 atomic_set(&hostdata->request_limit,
1471 -1);
1472 dev_err(hostdata->dev, "error after enable\n");
1473 }
1474 } else { 1469 } else {
1475 dev_err(hostdata->dev, "Virtual adapter failed rc %d!\n", 1470 dev_err(hostdata->dev, "Virtual adapter failed rc %d!\n",
1476 crq->format); 1471 crq->format);
1477 1472 ibmvscsi_reset_host(hostdata);
1478 purge_requests(hostdata, DID_ERROR);
1479 if ((ibmvscsi_ops->reset_crq_queue(&hostdata->queue,
1480 hostdata)) ||
1481 (ibmvscsi_ops->send_crq(hostdata,
1482 0xC001000000000000LL, 0))) {
1483 atomic_set(&hostdata->request_limit,
1484 -1);
1485 dev_err(hostdata->dev, "error after reset\n");
1486 }
1487 } 1473 }
1488 scsi_unblock_requests(hostdata->host);
1489 return; 1474 return;
1490 case 0x80: /* real payload */ 1475 case 0x80: /* real payload */
1491 break; 1476 break;
@@ -1850,6 +1835,75 @@ static unsigned long ibmvscsi_get_desired_dma(struct vio_dev *vdev)
1850 return desired_io; 1835 return desired_io;
1851} 1836}
1852 1837
1838static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata)
1839{
1840 int rc;
1841 char *action = "reset";
1842
1843 if (hostdata->reset_crq) {
1844 smp_rmb();
1845 hostdata->reset_crq = 0;
1846
1847 rc = ibmvscsi_ops->reset_crq_queue(&hostdata->queue, hostdata);
1848 if (!rc)
1849 rc = ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0);
1850 if (!rc)
1851 rc = vio_enable_interrupts(to_vio_dev(hostdata->dev));
1852 } else if (hostdata->reenable_crq) {
1853 smp_rmb();
1854 action = "enable";
1855 rc = ibmvscsi_ops->reenable_crq_queue(&hostdata->queue, hostdata);
1856 hostdata->reenable_crq = 0;
1857 if (!rc)
1858 rc = ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0);
1859 } else
1860 return;
1861
1862 if (rc) {
1863 atomic_set(&hostdata->request_limit, -1);
1864 dev_err(hostdata->dev, "error after %s\n", action);
1865 }
1866
1867 scsi_unblock_requests(hostdata->host);
1868}
1869
1870static int ibmvscsi_work_to_do(struct ibmvscsi_host_data *hostdata)
1871{
1872 if (kthread_should_stop())
1873 return 1;
1874 else if (hostdata->reset_crq) {
1875 smp_rmb();
1876 return 1;
1877 } else if (hostdata->reenable_crq) {
1878 smp_rmb();
1879 return 1;
1880 }
1881
1882 return 0;
1883}
1884
1885static int ibmvscsi_work(void *data)
1886{
1887 struct ibmvscsi_host_data *hostdata = data;
1888 int rc;
1889
1890 set_user_nice(current, -20);
1891
1892 while (1) {
1893 rc = wait_event_interruptible(hostdata->work_wait_q,
1894 ibmvscsi_work_to_do(hostdata));
1895
1896 BUG_ON(rc);
1897
1898 if (kthread_should_stop())
1899 break;
1900
1901 ibmvscsi_do_work(hostdata);
1902 }
1903
1904 return 0;
1905}
1906
1853/** 1907/**
1854 * Called by bus code for each adapter 1908 * Called by bus code for each adapter
1855 */ 1909 */
@@ -1875,6 +1929,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1875 hostdata = shost_priv(host); 1929 hostdata = shost_priv(host);
1876 memset(hostdata, 0x00, sizeof(*hostdata)); 1930 memset(hostdata, 0x00, sizeof(*hostdata));
1877 INIT_LIST_HEAD(&hostdata->sent); 1931 INIT_LIST_HEAD(&hostdata->sent);
1932 init_waitqueue_head(&hostdata->work_wait_q);
1878 hostdata->host = host; 1933 hostdata->host = host;
1879 hostdata->dev = dev; 1934 hostdata->dev = dev;
1880 atomic_set(&hostdata->request_limit, -1); 1935 atomic_set(&hostdata->request_limit, -1);
@@ -1885,10 +1940,19 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1885 goto persist_bufs_failed; 1940 goto persist_bufs_failed;
1886 } 1941 }
1887 1942
1943 hostdata->work_thread = kthread_run(ibmvscsi_work, hostdata, "%s_%d",
1944 "ibmvscsi", host->host_no);
1945
1946 if (IS_ERR(hostdata->work_thread)) {
1947 dev_err(&vdev->dev, "couldn't initialize kthread. rc=%ld\n",
1948 PTR_ERR(hostdata->work_thread));
1949 goto init_crq_failed;
1950 }
1951
1888 rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events); 1952 rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events);
1889 if (rc != 0 && rc != H_RESOURCE) { 1953 if (rc != 0 && rc != H_RESOURCE) {
1890 dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc); 1954 dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
1891 goto init_crq_failed; 1955 goto kill_kthread;
1892 } 1956 }
1893 if (initialize_event_pool(&hostdata->pool, max_events, hostdata) != 0) { 1957 if (initialize_event_pool(&hostdata->pool, max_events, hostdata) != 0) {
1894 dev_err(&vdev->dev, "couldn't initialize event pool\n"); 1958 dev_err(&vdev->dev, "couldn't initialize event pool\n");
@@ -1944,6 +2008,8 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1944 release_event_pool(&hostdata->pool, hostdata); 2008 release_event_pool(&hostdata->pool, hostdata);
1945 init_pool_failed: 2009 init_pool_failed:
1946 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events); 2010 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events);
2011 kill_kthread:
2012 kthread_stop(hostdata->work_thread);
1947 init_crq_failed: 2013 init_crq_failed:
1948 unmap_persist_bufs(hostdata); 2014 unmap_persist_bufs(hostdata);
1949 persist_bufs_failed: 2015 persist_bufs_failed:
@@ -1960,6 +2026,7 @@ static int ibmvscsi_remove(struct vio_dev *vdev)
1960 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, 2026 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata,
1961 max_events); 2027 max_events);
1962 2028
2029 kthread_stop(hostdata->work_thread);
1963 srp_remove_host(hostdata->host); 2030 srp_remove_host(hostdata->host);
1964 scsi_remove_host(hostdata->host); 2031 scsi_remove_host(hostdata->host);
1965 scsi_host_put(hostdata->host); 2032 scsi_host_put(hostdata->host);
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 9cb7c6a773e1..02197a2b22b9 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -91,12 +91,16 @@ struct event_pool {
91struct ibmvscsi_host_data { 91struct ibmvscsi_host_data {
92 atomic_t request_limit; 92 atomic_t request_limit;
93 int client_migrated; 93 int client_migrated;
94 int reset_crq;
95 int reenable_crq;
94 struct device *dev; 96 struct device *dev;
95 struct event_pool pool; 97 struct event_pool pool;
96 struct crq_queue queue; 98 struct crq_queue queue;
97 struct tasklet_struct srp_task; 99 struct tasklet_struct srp_task;
98 struct list_head sent; 100 struct list_head sent;
99 struct Scsi_Host *host; 101 struct Scsi_Host *host;
102 struct task_struct *work_thread;
103 wait_queue_head_t work_wait_q;
100 struct mad_adapter_info_data madapter_info; 104 struct mad_adapter_info_data madapter_info;
101 struct capabilities caps; 105 struct capabilities caps;
102 dma_addr_t caps_addr; 106 dma_addr_t caps_addr;
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index e2056d517e99..2256babe0474 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -956,7 +956,7 @@ static struct srp_function_template ibmvstgt_transport_functions = {
956 .it_nexus_response = ibmvstgt_it_nexus_response, 956 .it_nexus_response = ibmvstgt_it_nexus_response,
957}; 957};
958 958
959static int ibmvstgt_init(void) 959static int __init ibmvstgt_init(void)
960{ 960{
961 int err = -ENOMEM; 961 int err = -ENOMEM;
962 962
@@ -987,7 +987,7 @@ release_transport:
987 return err; 987 return err;
988} 988}
989 989
990static void ibmvstgt_exit(void) 990static void __exit ibmvstgt_exit(void)
991{ 991{
992 printk("Unregister IBM virtual SCSI driver\n"); 992 printk("Unregister IBM virtual SCSI driver\n");
993 993
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index 989b9a8ba72d..f48ae0190d95 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -31,6 +31,7 @@
31#include <asm/prom.h> 31#include <asm/prom.h>
32#include <asm/iommu.h> 32#include <asm/iommu.h>
33#include <asm/hvcall.h> 33#include <asm/hvcall.h>
34#include <linux/delay.h>
34#include <linux/dma-mapping.h> 35#include <linux/dma-mapping.h>
35#include <linux/gfp.h> 36#include <linux/gfp.h>
36#include <linux/interrupt.h> 37#include <linux/interrupt.h>
@@ -71,11 +72,13 @@ static void rpavscsi_release_crq_queue(struct crq_queue *queue,
71 struct ibmvscsi_host_data *hostdata, 72 struct ibmvscsi_host_data *hostdata,
72 int max_requests) 73 int max_requests)
73{ 74{
74 long rc; 75 long rc = 0;
75 struct vio_dev *vdev = to_vio_dev(hostdata->dev); 76 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
76 free_irq(vdev->irq, (void *)hostdata); 77 free_irq(vdev->irq, (void *)hostdata);
77 tasklet_kill(&hostdata->srp_task); 78 tasklet_kill(&hostdata->srp_task);
78 do { 79 do {
80 if (rc)
81 msleep(100);
79 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 82 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
80 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); 83 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
81 dma_unmap_single(hostdata->dev, 84 dma_unmap_single(hostdata->dev,
@@ -200,11 +203,13 @@ static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
200static int rpavscsi_reset_crq_queue(struct crq_queue *queue, 203static int rpavscsi_reset_crq_queue(struct crq_queue *queue,
201 struct ibmvscsi_host_data *hostdata) 204 struct ibmvscsi_host_data *hostdata)
202{ 205{
203 int rc; 206 int rc = 0;
204 struct vio_dev *vdev = to_vio_dev(hostdata->dev); 207 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
205 208
206 /* Close the CRQ */ 209 /* Close the CRQ */
207 do { 210 do {
211 if (rc)
212 msleep(100);
208 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 213 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
209 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); 214 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
210 215
@@ -301,7 +306,10 @@ static int rpavscsi_init_crq_queue(struct crq_queue *queue,
301 306
302 req_irq_failed: 307 req_irq_failed:
303 tasklet_kill(&hostdata->srp_task); 308 tasklet_kill(&hostdata->srp_task);
309 rc = 0;
304 do { 310 do {
311 if (rc)
312 msleep(100);
305 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 313 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
306 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); 314 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
307 reg_crq_failed: 315 reg_crq_failed:
@@ -323,11 +331,13 @@ static int rpavscsi_init_crq_queue(struct crq_queue *queue,
323static int rpavscsi_reenable_crq_queue(struct crq_queue *queue, 331static int rpavscsi_reenable_crq_queue(struct crq_queue *queue,
324 struct ibmvscsi_host_data *hostdata) 332 struct ibmvscsi_host_data *hostdata)
325{ 333{
326 int rc; 334 int rc = 0;
327 struct vio_dev *vdev = to_vio_dev(hostdata->dev); 335 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
328 336
329 /* Re-enable the CRQ */ 337 /* Re-enable the CRQ */
330 do { 338 do {
339 if (rc)
340 msleep(100);
331 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); 341 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
332 } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); 342 } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
333 343
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index f820cffb7f00..52568588039f 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -167,21 +167,22 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
167 .clr_uproc_interrupt_reg32 = 0x0002C, 167 .clr_uproc_interrupt_reg32 = 0x0002C,
168 .init_feedback_reg = 0x0005C, 168 .init_feedback_reg = 0x0005C,
169 .dump_addr_reg = 0x00064, 169 .dump_addr_reg = 0x00064,
170 .dump_data_reg = 0x00068 170 .dump_data_reg = 0x00068,
171 .endian_swap_reg = 0x00084
171 } 172 }
172 }, 173 },
173}; 174};
174 175
175static const struct ipr_chip_t ipr_chip[] = { 176static const struct ipr_chip_t ipr_chip[] = {
176 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] }, 177 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
177 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] }, 178 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
178 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] }, 179 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
179 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] }, 180 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
180 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, &ipr_chip_cfg[0] }, 181 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
181 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] }, 182 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
182 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] }, 183 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
183 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] }, 184 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
184 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] } 185 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
185}; 186};
186 187
187static int ipr_max_bus_speeds [] = { 188static int ipr_max_bus_speeds [] = {
@@ -1167,7 +1168,7 @@ static void ipr_update_res_entry(struct ipr_resource_entry *res,
1167 if (res->ioa_cfg->sis64) { 1168 if (res->ioa_cfg->sis64) {
1168 res->flags = cfgtew->u.cfgte64->flags; 1169 res->flags = cfgtew->u.cfgte64->flags;
1169 res->res_flags = cfgtew->u.cfgte64->res_flags; 1170 res->res_flags = cfgtew->u.cfgte64->res_flags;
1170 res->type = cfgtew->u.cfgte64->res_type & 0x0f; 1171 res->type = cfgtew->u.cfgte64->res_type;
1171 1172
1172 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data, 1173 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1173 sizeof(struct ipr_std_inq_data)); 1174 sizeof(struct ipr_std_inq_data));
@@ -3761,6 +3762,36 @@ static struct device_attribute ipr_update_fw_attr = {
3761 .store = ipr_store_update_fw 3762 .store = ipr_store_update_fw
3762}; 3763};
3763 3764
3765/**
3766 * ipr_show_fw_type - Show the adapter's firmware type.
3767 * @dev: class device struct
3768 * @buf: buffer
3769 *
3770 * Return value:
3771 * number of bytes printed to buffer
3772 **/
3773static ssize_t ipr_show_fw_type(struct device *dev,
3774 struct device_attribute *attr, char *buf)
3775{
3776 struct Scsi_Host *shost = class_to_shost(dev);
3777 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3778 unsigned long lock_flags = 0;
3779 int len;
3780
3781 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3782 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
3783 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3784 return len;
3785}
3786
3787static struct device_attribute ipr_ioa_fw_type_attr = {
3788 .attr = {
3789 .name = "fw_type",
3790 .mode = S_IRUGO,
3791 },
3792 .show = ipr_show_fw_type
3793};
3794
3764static struct device_attribute *ipr_ioa_attrs[] = { 3795static struct device_attribute *ipr_ioa_attrs[] = {
3765 &ipr_fw_version_attr, 3796 &ipr_fw_version_attr,
3766 &ipr_log_level_attr, 3797 &ipr_log_level_attr,
@@ -3768,6 +3799,7 @@ static struct device_attribute *ipr_ioa_attrs[] = {
3768 &ipr_ioa_state_attr, 3799 &ipr_ioa_state_attr,
3769 &ipr_ioa_reset_attr, 3800 &ipr_ioa_reset_attr,
3770 &ipr_update_fw_attr, 3801 &ipr_update_fw_attr,
3802 &ipr_ioa_fw_type_attr,
3771 NULL, 3803 NULL,
3772}; 3804};
3773 3805
@@ -4121,14 +4153,49 @@ static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribut
4121static struct device_attribute ipr_resource_path_attr = { 4153static struct device_attribute ipr_resource_path_attr = {
4122 .attr = { 4154 .attr = {
4123 .name = "resource_path", 4155 .name = "resource_path",
4124 .mode = S_IRUSR, 4156 .mode = S_IRUGO,
4125 }, 4157 },
4126 .show = ipr_show_resource_path 4158 .show = ipr_show_resource_path
4127}; 4159};
4128 4160
4161/**
4162 * ipr_show_resource_type - Show the resource type for this device.
4163 * @dev: device struct
4164 * @buf: buffer
4165 *
4166 * Return value:
4167 * number of bytes printed to buffer
4168 **/
4169static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4170{
4171 struct scsi_device *sdev = to_scsi_device(dev);
4172 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4173 struct ipr_resource_entry *res;
4174 unsigned long lock_flags = 0;
4175 ssize_t len = -ENXIO;
4176
4177 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4178 res = (struct ipr_resource_entry *)sdev->hostdata;
4179
4180 if (res)
4181 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4182
4183 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4184 return len;
4185}
4186
4187static struct device_attribute ipr_resource_type_attr = {
4188 .attr = {
4189 .name = "resource_type",
4190 .mode = S_IRUGO,
4191 },
4192 .show = ipr_show_resource_type
4193};
4194
4129static struct device_attribute *ipr_dev_attrs[] = { 4195static struct device_attribute *ipr_dev_attrs[] = {
4130 &ipr_adapter_handle_attr, 4196 &ipr_adapter_handle_attr,
4131 &ipr_resource_path_attr, 4197 &ipr_resource_path_attr,
4198 &ipr_resource_type_attr,
4132 NULL, 4199 NULL,
4133}; 4200};
4134 4201
@@ -4352,8 +4419,6 @@ static int ipr_slave_configure(struct scsi_device *sdev)
4352 IPR_VSET_RW_TIMEOUT); 4419 IPR_VSET_RW_TIMEOUT);
4353 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); 4420 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4354 } 4421 }
4355 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
4356 sdev->allow_restart = 1;
4357 if (ipr_is_gata(res) && res->sata_port) 4422 if (ipr_is_gata(res) && res->sata_port)
4358 ap = res->sata_port->ap; 4423 ap = res->sata_port->ap;
4359 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4424 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -6770,7 +6835,8 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6770 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 6835 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6771 ipr_init_res_entry(res, &cfgtew); 6836 ipr_init_res_entry(res, &cfgtew);
6772 res->add_to_ml = 1; 6837 res->add_to_ml = 1;
6773 } 6838 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
6839 res->sdev->allow_restart = 1;
6774 6840
6775 if (found) 6841 if (found)
6776 ipr_update_res_entry(res, &cfgtew); 6842 ipr_update_res_entry(res, &cfgtew);
@@ -7169,12 +7235,15 @@ static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7169 stage_time = ioa_cfg->transop_timeout; 7235 stage_time = ioa_cfg->transop_timeout;
7170 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 7236 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7171 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) { 7237 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7172 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 7238 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7173 maskval = IPR_PCII_IPL_STAGE_CHANGE; 7239 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7174 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER; 7240 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7175 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg); 7241 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7176 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 7242 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7177 return IPR_RC_JOB_CONTINUE; 7243 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7244 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7245 return IPR_RC_JOB_CONTINUE;
7246 }
7178 } 7247 }
7179 7248
7180 ipr_cmd->timer.data = (unsigned long) ipr_cmd; 7249 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
@@ -7208,6 +7277,12 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7208 ipr_init_ioa_mem(ioa_cfg); 7277 ipr_init_ioa_mem(ioa_cfg);
7209 7278
7210 ioa_cfg->allow_interrupts = 1; 7279 ioa_cfg->allow_interrupts = 1;
7280 if (ioa_cfg->sis64) {
7281 /* Set the adapter to the correct endian mode. */
7282 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7283 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7284 }
7285
7211 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 7286 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7212 7287
7213 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 7288 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
@@ -7365,6 +7440,7 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7365static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) 7440static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7366{ 7441{
7367 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7442 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7443 volatile u32 int_reg;
7368 int rc; 7444 int rc;
7369 7445
7370 ENTER; 7446 ENTER;
@@ -7383,6 +7459,12 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7383 7459
7384 ipr_fail_all_ops(ioa_cfg); 7460 ipr_fail_all_ops(ioa_cfg);
7385 7461
7462 if (ioa_cfg->sis64) {
7463 /* Set the adapter to the correct endian mode. */
7464 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7465 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7466 }
7467
7386 if (ioa_cfg->ioa_unit_checked) { 7468 if (ioa_cfg->ioa_unit_checked) {
7387 ioa_cfg->ioa_unit_checked = 0; 7469 ioa_cfg->ioa_unit_checked = 0;
7388 ipr_get_unit_check_buffer(ioa_cfg); 7470 ipr_get_unit_check_buffer(ioa_cfg);
@@ -7438,20 +7520,25 @@ static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7438static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd) 7520static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7439{ 7521{
7440 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7522 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7441 int rc; 7523 int rc = PCIBIOS_SUCCESSFUL;
7442 7524
7443 ENTER; 7525 ENTER;
7444 pci_block_user_cfg_access(ioa_cfg->pdev); 7526 pci_block_user_cfg_access(ioa_cfg->pdev);
7445 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7446 7527
7447 if (rc != PCIBIOS_SUCCESSFUL) { 7528 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
7448 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev); 7529 writel(IPR_UPROCI_SIS64_START_BIST,
7449 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); 7530 ioa_cfg->regs.set_uproc_interrupt_reg32);
7450 rc = IPR_RC_JOB_CONTINUE; 7531 else
7451 } else { 7532 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7533
7534 if (rc == PCIBIOS_SUCCESSFUL) {
7452 ipr_cmd->job_step = ipr_reset_bist_done; 7535 ipr_cmd->job_step = ipr_reset_bist_done;
7453 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT); 7536 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7454 rc = IPR_RC_JOB_RETURN; 7537 rc = IPR_RC_JOB_RETURN;
7538 } else {
7539 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7540 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7541 rc = IPR_RC_JOB_CONTINUE;
7455 } 7542 }
7456 7543
7457 LEAVE; 7544 LEAVE;
@@ -7547,7 +7634,7 @@ static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
7547} 7634}
7548 7635
7549/** 7636/**
7550 * ipr_reset_alert_part2 - Alert the adapter of a pending reset 7637 * ipr_reset_alert - Alert the adapter of a pending reset
7551 * @ipr_cmd: ipr command struct 7638 * @ipr_cmd: ipr command struct
7552 * 7639 *
7553 * Description: This function alerts the adapter that it will be reset. 7640 * Description: This function alerts the adapter that it will be reset.
@@ -8318,6 +8405,7 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8318 t->init_feedback_reg = base + p->init_feedback_reg; 8405 t->init_feedback_reg = base + p->init_feedback_reg;
8319 t->dump_addr_reg = base + p->dump_addr_reg; 8406 t->dump_addr_reg = base + p->dump_addr_reg;
8320 t->dump_data_reg = base + p->dump_data_reg; 8407 t->dump_data_reg = base + p->dump_data_reg;
8408 t->endian_swap_reg = base + p->endian_swap_reg;
8321 } 8409 }
8322} 8410}
8323 8411
@@ -8873,6 +8961,8 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
8873 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 8961 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8874 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 }, 8962 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
8875 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 8963 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8964 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
8965 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8876 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0, 8966 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
8877 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET }, 8967 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
8878 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, 8968 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index b965f3587c9d..4d31625ab9cf 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -62,12 +62,12 @@
62#define IPR_SUBS_DEV_ID_2780 0x0264 62#define IPR_SUBS_DEV_ID_2780 0x0264
63#define IPR_SUBS_DEV_ID_5702 0x0266 63#define IPR_SUBS_DEV_ID_5702 0x0266
64#define IPR_SUBS_DEV_ID_5703 0x0278 64#define IPR_SUBS_DEV_ID_5703 0x0278
65#define IPR_SUBS_DEV_ID_572E 0x028D 65#define IPR_SUBS_DEV_ID_572E 0x028D
66#define IPR_SUBS_DEV_ID_573E 0x02D3 66#define IPR_SUBS_DEV_ID_573E 0x02D3
67#define IPR_SUBS_DEV_ID_573D 0x02D4 67#define IPR_SUBS_DEV_ID_573D 0x02D4
68#define IPR_SUBS_DEV_ID_571A 0x02C0 68#define IPR_SUBS_DEV_ID_571A 0x02C0
69#define IPR_SUBS_DEV_ID_571B 0x02BE 69#define IPR_SUBS_DEV_ID_571B 0x02BE
70#define IPR_SUBS_DEV_ID_571E 0x02BF 70#define IPR_SUBS_DEV_ID_571E 0x02BF
71#define IPR_SUBS_DEV_ID_571F 0x02D5 71#define IPR_SUBS_DEV_ID_571F 0x02D5
72#define IPR_SUBS_DEV_ID_572A 0x02C1 72#define IPR_SUBS_DEV_ID_572A 0x02C1
73#define IPR_SUBS_DEV_ID_572B 0x02C2 73#define IPR_SUBS_DEV_ID_572B 0x02C2
@@ -82,6 +82,7 @@
82#define IPR_SUBS_DEV_ID_57B4 0x033B 82#define IPR_SUBS_DEV_ID_57B4 0x033B
83#define IPR_SUBS_DEV_ID_57B2 0x035F 83#define IPR_SUBS_DEV_ID_57B2 0x035F
84#define IPR_SUBS_DEV_ID_57C6 0x0357 84#define IPR_SUBS_DEV_ID_57C6 0x0357
85#define IPR_SUBS_DEV_ID_57CC 0x035C
85 86
86#define IPR_SUBS_DEV_ID_57B5 0x033C 87#define IPR_SUBS_DEV_ID_57B5 0x033C
87#define IPR_SUBS_DEV_ID_57CE 0x035E 88#define IPR_SUBS_DEV_ID_57CE 0x035E
@@ -272,6 +273,7 @@ IPR_PCII_NO_HOST_RRQ | IPR_PCII_IOARRIN_LOST | IPR_PCII_MMIO_ERROR)
272 273
273#define IPR_UPROCI_RESET_ALERT (0x80000000 >> 7) 274#define IPR_UPROCI_RESET_ALERT (0x80000000 >> 7)
274#define IPR_UPROCI_IO_DEBUG_ALERT (0x80000000 >> 9) 275#define IPR_UPROCI_IO_DEBUG_ALERT (0x80000000 >> 9)
276#define IPR_UPROCI_SIS64_START_BIST (0x80000000 >> 23)
275 277
276#define IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC 200000 /* 200 ms */ 278#define IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC 200000 /* 200 ms */
277#define IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC 200000 /* 200 ms */ 279#define IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC 200000 /* 200 ms */
@@ -996,7 +998,7 @@ struct ipr_hostrcb64_fabric_desc {
996 __be16 length; 998 __be16 length;
997 u8 descriptor_id; 999 u8 descriptor_id;
998 1000
999 u8 reserved; 1001 u8 reserved[2];
1000 u8 path_state; 1002 u8 path_state;
1001 1003
1002 u8 reserved2[2]; 1004 u8 reserved2[2];
@@ -1054,7 +1056,7 @@ struct ipr_hostrcb64_error {
1054 __be64 fd_lun; 1056 __be64 fd_lun;
1055 u8 fd_res_path[8]; 1057 u8 fd_res_path[8];
1056 __be64 time_stamp; 1058 __be64 time_stamp;
1057 u8 reserved[2]; 1059 u8 reserved[16];
1058 union { 1060 union {
1059 struct ipr_hostrcb_type_ff_error type_ff_error; 1061 struct ipr_hostrcb_type_ff_error type_ff_error;
1060 struct ipr_hostrcb_type_12_error type_12_error; 1062 struct ipr_hostrcb_type_12_error type_12_error;
@@ -1254,6 +1256,9 @@ struct ipr_interrupt_offsets {
1254 1256
1255 unsigned long dump_addr_reg; 1257 unsigned long dump_addr_reg;
1256 unsigned long dump_data_reg; 1258 unsigned long dump_data_reg;
1259
1260#define IPR_ENDIAN_SWAP_KEY 0x00080800
1261 unsigned long endian_swap_reg;
1257}; 1262};
1258 1263
1259struct ipr_interrupts { 1264struct ipr_interrupts {
@@ -1279,6 +1284,8 @@ struct ipr_interrupts {
1279 1284
1280 void __iomem *dump_addr_reg; 1285 void __iomem *dump_addr_reg;
1281 void __iomem *dump_data_reg; 1286 void __iomem *dump_data_reg;
1287
1288 void __iomem *endian_swap_reg;
1282}; 1289};
1283 1290
1284struct ipr_chip_cfg_t { 1291struct ipr_chip_cfg_t {
@@ -1296,6 +1303,9 @@ struct ipr_chip_t {
1296 u16 sis_type; 1303 u16 sis_type;
1297#define IPR_SIS32 0x00 1304#define IPR_SIS32 0x00
1298#define IPR_SIS64 0x01 1305#define IPR_SIS64 0x01
1306 u16 bist_method;
1307#define IPR_PCI_CFG 0x00
1308#define IPR_MMIO 0x01
1299 const struct ipr_chip_cfg_t *cfg; 1309 const struct ipr_chip_cfg_t *cfg;
1300}; 1310};
1301 1311
@@ -1855,4 +1865,12 @@ static inline int ipr_sdt_is_fmt2(u32 sdt_word)
1855 return 0; 1865 return 0;
1856} 1866}
1857 1867
1868#ifndef writeq
1869static inline void writeq(u64 val, void __iomem *addr)
1870{
1871 writel(((u32) (val >> 32)), addr);
1872 writel(((u32) (val)), (addr + 4));
1873}
1858#endif 1874#endif
1875
1876#endif /* _IPR_H */
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index c7985da88099..32f67c4b03fc 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -63,27 +63,25 @@ static void fc_disc_restart(struct fc_disc *);
63void fc_disc_stop_rports(struct fc_disc *disc) 63void fc_disc_stop_rports(struct fc_disc *disc)
64{ 64{
65 struct fc_lport *lport; 65 struct fc_lport *lport;
66 struct fc_rport_priv *rdata, *next; 66 struct fc_rport_priv *rdata;
67 67
68 lport = disc->lport; 68 lport = fc_disc_lport(disc);
69 69
70 mutex_lock(&disc->disc_mutex); 70 mutex_lock(&disc->disc_mutex);
71 list_for_each_entry_safe(rdata, next, &disc->rports, peers) 71 list_for_each_entry_rcu(rdata, &disc->rports, peers)
72 lport->tt.rport_logoff(rdata); 72 lport->tt.rport_logoff(rdata);
73 mutex_unlock(&disc->disc_mutex); 73 mutex_unlock(&disc->disc_mutex);
74} 74}
75 75
76/** 76/**
77 * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN) 77 * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN)
78 * @sp: The sequence of the RSCN exchange 78 * @disc: The discovery object to which the RSCN applies
79 * @fp: The RSCN frame 79 * @fp: The RSCN frame
80 * @lport: The local port that the request will be sent on
81 * 80 *
82 * Locking Note: This function expects that the disc_mutex is locked 81 * Locking Note: This function expects that the disc_mutex is locked
83 * before it is called. 82 * before it is called.
84 */ 83 */
85static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, 84static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp)
86 struct fc_disc *disc)
87{ 85{
88 struct fc_lport *lport; 86 struct fc_lport *lport;
89 struct fc_els_rscn *rp; 87 struct fc_els_rscn *rp;
@@ -96,7 +94,7 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
96 LIST_HEAD(disc_ports); 94 LIST_HEAD(disc_ports);
97 struct fc_disc_port *dp, *next; 95 struct fc_disc_port *dp, *next;
98 96
99 lport = disc->lport; 97 lport = fc_disc_lport(disc);
100 98
101 FC_DISC_DBG(disc, "Received an RSCN event\n"); 99 FC_DISC_DBG(disc, "Received an RSCN event\n");
102 100
@@ -151,7 +149,7 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
151 break; 149 break;
152 } 150 }
153 } 151 }
154 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); 152 lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
155 153
156 /* 154 /*
157 * If not doing a complete rediscovery, do GPN_ID on 155 * If not doing a complete rediscovery, do GPN_ID on
@@ -177,25 +175,22 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
177 return; 175 return;
178reject: 176reject:
179 FC_DISC_DBG(disc, "Received a bad RSCN frame\n"); 177 FC_DISC_DBG(disc, "Received a bad RSCN frame\n");
180 rjt_data.fp = NULL;
181 rjt_data.reason = ELS_RJT_LOGIC; 178 rjt_data.reason = ELS_RJT_LOGIC;
182 rjt_data.explan = ELS_EXPL_NONE; 179 rjt_data.explan = ELS_EXPL_NONE;
183 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); 180 lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
184 fc_frame_free(fp); 181 fc_frame_free(fp);
185} 182}
186 183
187/** 184/**
188 * fc_disc_recv_req() - Handle incoming requests 185 * fc_disc_recv_req() - Handle incoming requests
189 * @sp: The sequence of the request exchange
190 * @fp: The request frame
191 * @lport: The local port receiving the request 186 * @lport: The local port receiving the request
187 * @fp: The request frame
192 * 188 *
193 * Locking Note: This function is called from the EM and will lock 189 * Locking Note: This function is called from the EM and will lock
194 * the disc_mutex before calling the handler for the 190 * the disc_mutex before calling the handler for the
195 * request. 191 * request.
196 */ 192 */
197static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp, 193static void fc_disc_recv_req(struct fc_lport *lport, struct fc_frame *fp)
198 struct fc_lport *lport)
199{ 194{
200 u8 op; 195 u8 op;
201 struct fc_disc *disc = &lport->disc; 196 struct fc_disc *disc = &lport->disc;
@@ -204,7 +199,7 @@ static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp,
204 switch (op) { 199 switch (op) {
205 case ELS_RSCN: 200 case ELS_RSCN:
206 mutex_lock(&disc->disc_mutex); 201 mutex_lock(&disc->disc_mutex);
207 fc_disc_recv_rscn_req(sp, fp, disc); 202 fc_disc_recv_rscn_req(disc, fp);
208 mutex_unlock(&disc->disc_mutex); 203 mutex_unlock(&disc->disc_mutex);
209 break; 204 break;
210 default: 205 default:
@@ -275,7 +270,7 @@ static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
275 */ 270 */
276static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event) 271static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
277{ 272{
278 struct fc_lport *lport = disc->lport; 273 struct fc_lport *lport = fc_disc_lport(disc);
279 struct fc_rport_priv *rdata; 274 struct fc_rport_priv *rdata;
280 275
281 FC_DISC_DBG(disc, "Discovery complete\n"); 276 FC_DISC_DBG(disc, "Discovery complete\n");
@@ -292,7 +287,7 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
292 * Skip ports which were never discovered. These are the dNS port 287 * Skip ports which were never discovered. These are the dNS port
293 * and ports which were created by PLOGI. 288 * and ports which were created by PLOGI.
294 */ 289 */
295 list_for_each_entry(rdata, &disc->rports, peers) { 290 list_for_each_entry_rcu(rdata, &disc->rports, peers) {
296 if (!rdata->disc_id) 291 if (!rdata->disc_id)
297 continue; 292 continue;
298 if (rdata->disc_id == disc->disc_id) 293 if (rdata->disc_id == disc->disc_id)
@@ -313,7 +308,7 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
313 */ 308 */
314static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp) 309static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
315{ 310{
316 struct fc_lport *lport = disc->lport; 311 struct fc_lport *lport = fc_disc_lport(disc);
317 unsigned long delay = 0; 312 unsigned long delay = 0;
318 313
319 FC_DISC_DBG(disc, "Error %ld, retries %d/%d\n", 314 FC_DISC_DBG(disc, "Error %ld, retries %d/%d\n",
@@ -353,7 +348,7 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
353static void fc_disc_gpn_ft_req(struct fc_disc *disc) 348static void fc_disc_gpn_ft_req(struct fc_disc *disc)
354{ 349{
355 struct fc_frame *fp; 350 struct fc_frame *fp;
356 struct fc_lport *lport = disc->lport; 351 struct fc_lport *lport = fc_disc_lport(disc);
357 352
358 WARN_ON(!fc_lport_test_ready(lport)); 353 WARN_ON(!fc_lport_test_ready(lport));
359 354
@@ -396,7 +391,7 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
396 struct fc_rport_identifiers ids; 391 struct fc_rport_identifiers ids;
397 struct fc_rport_priv *rdata; 392 struct fc_rport_priv *rdata;
398 393
399 lport = disc->lport; 394 lport = fc_disc_lport(disc);
400 disc->seq_count++; 395 disc->seq_count++;
401 396
402 /* 397 /*
@@ -733,7 +728,7 @@ int fc_disc_init(struct fc_lport *lport)
733 mutex_init(&disc->disc_mutex); 728 mutex_init(&disc->disc_mutex);
734 INIT_LIST_HEAD(&disc->rports); 729 INIT_LIST_HEAD(&disc->rports);
735 730
736 disc->lport = lport; 731 disc->priv = lport;
737 732
738 return 0; 733 return 0;
739} 734}
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index e9412b710fab..9b25969e2ad0 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -64,7 +64,7 @@ struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did,
64 } 64 }
65 65
66 fc_fill_fc_hdr(fp, r_ctl, did, lport->port_id, fh_type, 66 fc_fill_fc_hdr(fp, r_ctl, did, lport->port_id, fh_type,
67 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 67 FC_FCTL_REQ, 0);
68 68
69 return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec); 69 return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
70} 70}
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 104e0fba7c43..b8560ad8cf66 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -129,11 +129,11 @@ struct fc_exch_mgr_anchor {
129}; 129};
130 130
131static void fc_exch_rrq(struct fc_exch *); 131static void fc_exch_rrq(struct fc_exch *);
132static void fc_seq_ls_acc(struct fc_seq *); 132static void fc_seq_ls_acc(struct fc_frame *);
133static void fc_seq_ls_rjt(struct fc_seq *, enum fc_els_rjt_reason, 133static void fc_seq_ls_rjt(struct fc_frame *, enum fc_els_rjt_reason,
134 enum fc_els_rjt_explan); 134 enum fc_els_rjt_explan);
135static void fc_exch_els_rec(struct fc_seq *, struct fc_frame *); 135static void fc_exch_els_rec(struct fc_frame *);
136static void fc_exch_els_rrq(struct fc_seq *, struct fc_frame *); 136static void fc_exch_els_rrq(struct fc_frame *);
137 137
138/* 138/*
139 * Internal implementation notes. 139 * Internal implementation notes.
@@ -464,6 +464,7 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
464 464
465 f_ctl = ntoh24(fh->fh_f_ctl); 465 f_ctl = ntoh24(fh->fh_f_ctl);
466 fc_exch_setup_hdr(ep, fp, f_ctl); 466 fc_exch_setup_hdr(ep, fp, f_ctl);
467 fr_encaps(fp) = ep->encaps;
467 468
468 /* 469 /*
469 * update sequence count if this frame is carrying 470 * update sequence count if this frame is carrying
@@ -1002,28 +1003,30 @@ static void fc_exch_set_addr(struct fc_exch *ep,
1002/** 1003/**
1003 * fc_seq_els_rsp_send() - Send an ELS response using infomation from 1004 * fc_seq_els_rsp_send() - Send an ELS response using infomation from
1004 * the existing sequence/exchange. 1005 * the existing sequence/exchange.
1005 * @sp: The sequence/exchange to get information from 1006 * @fp: The received frame
1006 * @els_cmd: The ELS command to be sent 1007 * @els_cmd: The ELS command to be sent
1007 * @els_data: The ELS data to be sent 1008 * @els_data: The ELS data to be sent
1009 *
1010 * The received frame is not freed.
1008 */ 1011 */
1009static void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd, 1012static void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
1010 struct fc_seq_els_data *els_data) 1013 struct fc_seq_els_data *els_data)
1011{ 1014{
1012 switch (els_cmd) { 1015 switch (els_cmd) {
1013 case ELS_LS_RJT: 1016 case ELS_LS_RJT:
1014 fc_seq_ls_rjt(sp, els_data->reason, els_data->explan); 1017 fc_seq_ls_rjt(fp, els_data->reason, els_data->explan);
1015 break; 1018 break;
1016 case ELS_LS_ACC: 1019 case ELS_LS_ACC:
1017 fc_seq_ls_acc(sp); 1020 fc_seq_ls_acc(fp);
1018 break; 1021 break;
1019 case ELS_RRQ: 1022 case ELS_RRQ:
1020 fc_exch_els_rrq(sp, els_data->fp); 1023 fc_exch_els_rrq(fp);
1021 break; 1024 break;
1022 case ELS_REC: 1025 case ELS_REC:
1023 fc_exch_els_rec(sp, els_data->fp); 1026 fc_exch_els_rec(fp);
1024 break; 1027 break;
1025 default: 1028 default:
1026 FC_EXCH_DBG(fc_seq_exch(sp), "Invalid ELS CMD:%x\n", els_cmd); 1029 FC_LPORT_DBG(fr_dev(fp), "Invalid ELS CMD:%x\n", els_cmd);
1027 } 1030 }
1028} 1031}
1029 1032
@@ -1230,11 +1233,35 @@ free:
1230} 1233}
1231 1234
1232/** 1235/**
1233 * fc_exch_recv_req() - Handler for an incoming request where is other 1236 * fc_seq_assign() - Assign exchange and sequence for incoming request
1234 * end is originating the sequence 1237 * @lport: The local port that received the request
1238 * @fp: The request frame
1239 *
1240 * On success, the sequence pointer will be returned and also in fr_seq(@fp).
1241 */
1242static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
1243{
1244 struct fc_exch_mgr_anchor *ema;
1245
1246 WARN_ON(lport != fr_dev(fp));
1247 WARN_ON(fr_seq(fp));
1248 fr_seq(fp) = NULL;
1249
1250 list_for_each_entry(ema, &lport->ema_list, ema_list)
1251 if ((!ema->match || ema->match(fp)) &&
1252 fc_seq_lookup_recip(lport, ema->mp, fp) != FC_RJT_NONE)
1253 break;
1254 return fr_seq(fp);
1255}
1256
1257/**
1258 * fc_exch_recv_req() - Handler for an incoming request
1235 * @lport: The local port that received the request 1259 * @lport: The local port that received the request
1236 * @mp: The EM that the exchange is on 1260 * @mp: The EM that the exchange is on
1237 * @fp: The request frame 1261 * @fp: The request frame
1262 *
1263 * This is used when the other end is originating the exchange
1264 * and the sequence.
1238 */ 1265 */
1239static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp, 1266static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
1240 struct fc_frame *fp) 1267 struct fc_frame *fp)
@@ -1252,13 +1279,23 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
1252 fc_frame_free(fp); 1279 fc_frame_free(fp);
1253 return; 1280 return;
1254 } 1281 }
1282 fr_dev(fp) = lport;
1283
1284 BUG_ON(fr_seq(fp)); /* XXX remove later */
1285
1286 /*
1287 * If the RX_ID is 0xffff, don't allocate an exchange.
1288 * The upper-level protocol may request one later, if needed.
1289 */
1290 if (fh->fh_rx_id == htons(FC_XID_UNKNOWN))
1291 return lport->tt.lport_recv(lport, fp);
1255 1292
1256 fr_seq(fp) = NULL;
1257 reject = fc_seq_lookup_recip(lport, mp, fp); 1293 reject = fc_seq_lookup_recip(lport, mp, fp);
1258 if (reject == FC_RJT_NONE) { 1294 if (reject == FC_RJT_NONE) {
1259 sp = fr_seq(fp); /* sequence will be held */ 1295 sp = fr_seq(fp); /* sequence will be held */
1260 ep = fc_seq_exch(sp); 1296 ep = fc_seq_exch(sp);
1261 fc_seq_send_ack(sp, fp); 1297 fc_seq_send_ack(sp, fp);
1298 ep->encaps = fr_encaps(fp);
1262 1299
1263 /* 1300 /*
1264 * Call the receive function. 1301 * Call the receive function.
@@ -1274,7 +1311,7 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
1274 if (ep->resp) 1311 if (ep->resp)
1275 ep->resp(sp, fp, ep->arg); 1312 ep->resp(sp, fp, ep->arg);
1276 else 1313 else
1277 lport->tt.lport_recv(lport, sp, fp); 1314 lport->tt.lport_recv(lport, fp);
1278 fc_exch_release(ep); /* release from lookup */ 1315 fc_exch_release(ep); /* release from lookup */
1279 } else { 1316 } else {
1280 FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n", 1317 FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n",
@@ -1542,53 +1579,55 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
1542 1579
1543/** 1580/**
1544 * fc_seq_ls_acc() - Accept sequence with LS_ACC 1581 * fc_seq_ls_acc() - Accept sequence with LS_ACC
1545 * @req_sp: The request sequence 1582 * @rx_fp: The received frame, not freed here.
1546 * 1583 *
1547 * If this fails due to allocation or transmit congestion, assume the 1584 * If this fails due to allocation or transmit congestion, assume the
1548 * originator will repeat the sequence. 1585 * originator will repeat the sequence.
1549 */ 1586 */
1550static void fc_seq_ls_acc(struct fc_seq *req_sp) 1587static void fc_seq_ls_acc(struct fc_frame *rx_fp)
1551{ 1588{
1552 struct fc_seq *sp; 1589 struct fc_lport *lport;
1553 struct fc_els_ls_acc *acc; 1590 struct fc_els_ls_acc *acc;
1554 struct fc_frame *fp; 1591 struct fc_frame *fp;
1555 1592
1556 sp = fc_seq_start_next(req_sp); 1593 lport = fr_dev(rx_fp);
1557 fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc)); 1594 fp = fc_frame_alloc(lport, sizeof(*acc));
1558 if (fp) { 1595 if (!fp)
1559 acc = fc_frame_payload_get(fp, sizeof(*acc)); 1596 return;
1560 memset(acc, 0, sizeof(*acc)); 1597 acc = fc_frame_payload_get(fp, sizeof(*acc));
1561 acc->la_cmd = ELS_LS_ACC; 1598 memset(acc, 0, sizeof(*acc));
1562 fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS); 1599 acc->la_cmd = ELS_LS_ACC;
1563 } 1600 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
1601 lport->tt.frame_send(lport, fp);
1564} 1602}
1565 1603
1566/** 1604/**
1567 * fc_seq_ls_rjt() - Reject a sequence with ELS LS_RJT 1605 * fc_seq_ls_rjt() - Reject a sequence with ELS LS_RJT
1568 * @req_sp: The request sequence 1606 * @rx_fp: The received frame, not freed here.
1569 * @reason: The reason the sequence is being rejected 1607 * @reason: The reason the sequence is being rejected
1570 * @explan: The explaination for the rejection 1608 * @explan: The explanation for the rejection
1571 * 1609 *
1572 * If this fails due to allocation or transmit congestion, assume the 1610 * If this fails due to allocation or transmit congestion, assume the
1573 * originator will repeat the sequence. 1611 * originator will repeat the sequence.
1574 */ 1612 */
1575static void fc_seq_ls_rjt(struct fc_seq *req_sp, enum fc_els_rjt_reason reason, 1613static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason,
1576 enum fc_els_rjt_explan explan) 1614 enum fc_els_rjt_explan explan)
1577{ 1615{
1578 struct fc_seq *sp; 1616 struct fc_lport *lport;
1579 struct fc_els_ls_rjt *rjt; 1617 struct fc_els_ls_rjt *rjt;
1580 struct fc_frame *fp; 1618 struct fc_frame *fp;
1581 1619
1582 sp = fc_seq_start_next(req_sp); 1620 lport = fr_dev(rx_fp);
1583 fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*rjt)); 1621 fp = fc_frame_alloc(lport, sizeof(*rjt));
1584 if (fp) { 1622 if (!fp)
1585 rjt = fc_frame_payload_get(fp, sizeof(*rjt)); 1623 return;
1586 memset(rjt, 0, sizeof(*rjt)); 1624 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1587 rjt->er_cmd = ELS_LS_RJT; 1625 memset(rjt, 0, sizeof(*rjt));
1588 rjt->er_reason = reason; 1626 rjt->er_cmd = ELS_LS_RJT;
1589 rjt->er_explan = explan; 1627 rjt->er_reason = reason;
1590 fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS); 1628 rjt->er_explan = explan;
1591 } 1629 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
1630 lport->tt.frame_send(lport, fp);
1592} 1631}
1593 1632
1594/** 1633/**
@@ -1691,17 +1730,33 @@ void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did)
1691EXPORT_SYMBOL(fc_exch_mgr_reset); 1730EXPORT_SYMBOL(fc_exch_mgr_reset);
1692 1731
1693/** 1732/**
1733 * fc_exch_lookup() - find an exchange
1734 * @lport: The local port
1735 * @xid: The exchange ID
1736 *
1737 * Returns exchange pointer with hold for caller, or NULL if not found.
1738 */
1739static struct fc_exch *fc_exch_lookup(struct fc_lport *lport, u32 xid)
1740{
1741 struct fc_exch_mgr_anchor *ema;
1742
1743 list_for_each_entry(ema, &lport->ema_list, ema_list)
1744 if (ema->mp->min_xid <= xid && xid <= ema->mp->max_xid)
1745 return fc_exch_find(ema->mp, xid);
1746 return NULL;
1747}
1748
1749/**
1694 * fc_exch_els_rec() - Handler for ELS REC (Read Exchange Concise) requests 1750 * fc_exch_els_rec() - Handler for ELS REC (Read Exchange Concise) requests
1695 * @sp: The sequence the REC is on 1751 * @rfp: The REC frame, not freed here.
1696 * @rfp: The REC frame
1697 * 1752 *
1698 * Note that the requesting port may be different than the S_ID in the request. 1753 * Note that the requesting port may be different than the S_ID in the request.
1699 */ 1754 */
1700static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp) 1755static void fc_exch_els_rec(struct fc_frame *rfp)
1701{ 1756{
1757 struct fc_lport *lport;
1702 struct fc_frame *fp; 1758 struct fc_frame *fp;
1703 struct fc_exch *ep; 1759 struct fc_exch *ep;
1704 struct fc_exch_mgr *em;
1705 struct fc_els_rec *rp; 1760 struct fc_els_rec *rp;
1706 struct fc_els_rec_acc *acc; 1761 struct fc_els_rec_acc *acc;
1707 enum fc_els_rjt_reason reason = ELS_RJT_LOGIC; 1762 enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
@@ -1710,6 +1765,7 @@ static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp)
1710 u16 rxid; 1765 u16 rxid;
1711 u16 oxid; 1766 u16 oxid;
1712 1767
1768 lport = fr_dev(rfp);
1713 rp = fc_frame_payload_get(rfp, sizeof(*rp)); 1769 rp = fc_frame_payload_get(rfp, sizeof(*rp));
1714 explan = ELS_EXPL_INV_LEN; 1770 explan = ELS_EXPL_INV_LEN;
1715 if (!rp) 1771 if (!rp)
@@ -1718,35 +1774,19 @@ static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp)
1718 rxid = ntohs(rp->rec_rx_id); 1774 rxid = ntohs(rp->rec_rx_id);
1719 oxid = ntohs(rp->rec_ox_id); 1775 oxid = ntohs(rp->rec_ox_id);
1720 1776
1721 /* 1777 ep = fc_exch_lookup(lport,
1722 * Currently it's hard to find the local S_ID from the exchange 1778 sid == fc_host_port_id(lport->host) ? oxid : rxid);
1723 * manager. This will eventually be fixed, but for now it's easier
1724 * to lookup the subject exchange twice, once as if we were
1725 * the initiator, and then again if we weren't.
1726 */
1727 em = fc_seq_exch(sp)->em;
1728 ep = fc_exch_find(em, oxid);
1729 explan = ELS_EXPL_OXID_RXID; 1779 explan = ELS_EXPL_OXID_RXID;
1730 if (ep && ep->oid == sid) { 1780 if (!ep)
1731 if (ep->rxid != FC_XID_UNKNOWN && 1781 goto reject;
1732 rxid != FC_XID_UNKNOWN && 1782 if (ep->oid != sid || oxid != ep->oxid)
1733 ep->rxid != rxid) 1783 goto rel;
1734 goto rel; 1784 if (rxid != FC_XID_UNKNOWN && rxid != ep->rxid)
1735 } else { 1785 goto rel;
1736 if (ep) 1786 fp = fc_frame_alloc(lport, sizeof(*acc));
1737 fc_exch_release(ep); 1787 if (!fp)
1738 ep = NULL;
1739 if (rxid != FC_XID_UNKNOWN)
1740 ep = fc_exch_find(em, rxid);
1741 if (!ep)
1742 goto reject;
1743 }
1744
1745 fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc));
1746 if (!fp) {
1747 fc_exch_done(sp);
1748 goto out; 1788 goto out;
1749 } 1789
1750 acc = fc_frame_payload_get(fp, sizeof(*acc)); 1790 acc = fc_frame_payload_get(fp, sizeof(*acc));
1751 memset(acc, 0, sizeof(*acc)); 1791 memset(acc, 0, sizeof(*acc));
1752 acc->reca_cmd = ELS_LS_ACC; 1792 acc->reca_cmd = ELS_LS_ACC;
@@ -1761,18 +1801,16 @@ static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp)
1761 acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP | 1801 acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP |
1762 ESB_ST_SEQ_INIT | 1802 ESB_ST_SEQ_INIT |
1763 ESB_ST_COMPLETE)); 1803 ESB_ST_COMPLETE));
1764 sp = fc_seq_start_next(sp); 1804 fc_fill_reply_hdr(fp, rfp, FC_RCTL_ELS_REP, 0);
1765 fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS); 1805 lport->tt.frame_send(lport, fp);
1766out: 1806out:
1767 fc_exch_release(ep); 1807 fc_exch_release(ep);
1768 fc_frame_free(rfp);
1769 return; 1808 return;
1770 1809
1771rel: 1810rel:
1772 fc_exch_release(ep); 1811 fc_exch_release(ep);
1773reject: 1812reject:
1774 fc_seq_ls_rjt(sp, reason, explan); 1813 fc_seq_ls_rjt(rfp, reason, explan);
1775 fc_frame_free(rfp);
1776} 1814}
1777 1815
1778/** 1816/**
@@ -1947,20 +1985,20 @@ retry:
1947 spin_unlock_bh(&ep->ex_lock); 1985 spin_unlock_bh(&ep->ex_lock);
1948} 1986}
1949 1987
1950
1951/** 1988/**
1952 * fc_exch_els_rrq() - Handler for ELS RRQ (Reset Recovery Qualifier) requests 1989 * fc_exch_els_rrq() - Handler for ELS RRQ (Reset Recovery Qualifier) requests
1953 * @sp: The sequence that the RRQ is on 1990 * @fp: The RRQ frame, not freed here.
1954 * @fp: The RRQ frame
1955 */ 1991 */
1956static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp) 1992static void fc_exch_els_rrq(struct fc_frame *fp)
1957{ 1993{
1994 struct fc_lport *lport;
1958 struct fc_exch *ep = NULL; /* request or subject exchange */ 1995 struct fc_exch *ep = NULL; /* request or subject exchange */
1959 struct fc_els_rrq *rp; 1996 struct fc_els_rrq *rp;
1960 u32 sid; 1997 u32 sid;
1961 u16 xid; 1998 u16 xid;
1962 enum fc_els_rjt_explan explan; 1999 enum fc_els_rjt_explan explan;
1963 2000
2001 lport = fr_dev(fp);
1964 rp = fc_frame_payload_get(fp, sizeof(*rp)); 2002 rp = fc_frame_payload_get(fp, sizeof(*rp));
1965 explan = ELS_EXPL_INV_LEN; 2003 explan = ELS_EXPL_INV_LEN;
1966 if (!rp) 2004 if (!rp)
@@ -1969,11 +2007,10 @@ static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp)
1969 /* 2007 /*
1970 * lookup subject exchange. 2008 * lookup subject exchange.
1971 */ 2009 */
1972 ep = fc_seq_exch(sp);
1973 sid = ntoh24(rp->rrq_s_id); /* subject source */ 2010 sid = ntoh24(rp->rrq_s_id); /* subject source */
1974 xid = ep->did == sid ? ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id); 2011 xid = fc_host_port_id(lport->host) == sid ?
1975 ep = fc_exch_find(ep->em, xid); 2012 ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id);
1976 2013 ep = fc_exch_lookup(lport, xid);
1977 explan = ELS_EXPL_OXID_RXID; 2014 explan = ELS_EXPL_OXID_RXID;
1978 if (!ep) 2015 if (!ep)
1979 goto reject; 2016 goto reject;
@@ -2004,15 +2041,14 @@ static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp)
2004 /* 2041 /*
2005 * Send LS_ACC. 2042 * Send LS_ACC.
2006 */ 2043 */
2007 fc_seq_ls_acc(sp); 2044 fc_seq_ls_acc(fp);
2008 goto out; 2045 goto out;
2009 2046
2010unlock_reject: 2047unlock_reject:
2011 spin_unlock_bh(&ep->ex_lock); 2048 spin_unlock_bh(&ep->ex_lock);
2012reject: 2049reject:
2013 fc_seq_ls_rjt(sp, ELS_RJT_LOGIC, explan); 2050 fc_seq_ls_rjt(fp, ELS_RJT_LOGIC, explan);
2014out: 2051out:
2015 fc_frame_free(fp);
2016 if (ep) 2052 if (ep)
2017 fc_exch_release(ep); /* drop hold from fc_exch_find */ 2053 fc_exch_release(ep); /* drop hold from fc_exch_find */
2018} 2054}
@@ -2243,7 +2279,7 @@ void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
2243 fc_exch_recv_seq_resp(ema->mp, fp); 2279 fc_exch_recv_seq_resp(ema->mp, fp);
2244 else if (f_ctl & FC_FC_SEQ_CTX) 2280 else if (f_ctl & FC_FC_SEQ_CTX)
2245 fc_exch_recv_resp(ema->mp, fp); 2281 fc_exch_recv_resp(ema->mp, fp);
2246 else 2282 else /* no EX_CTX and no SEQ_CTX */
2247 fc_exch_recv_req(lport, ema->mp, fp); 2283 fc_exch_recv_req(lport, ema->mp, fp);
2248 break; 2284 break;
2249 default: 2285 default:
@@ -2281,6 +2317,9 @@ int fc_exch_init(struct fc_lport *lport)
2281 if (!lport->tt.seq_exch_abort) 2317 if (!lport->tt.seq_exch_abort)
2282 lport->tt.seq_exch_abort = fc_seq_exch_abort; 2318 lport->tt.seq_exch_abort = fc_seq_exch_abort;
2283 2319
2320 if (!lport->tt.seq_assign)
2321 lport->tt.seq_assign = fc_seq_assign;
2322
2284 return 0; 2323 return 0;
2285} 2324}
2286EXPORT_SYMBOL(fc_exch_init); 2325EXPORT_SYMBOL(fc_exch_init);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index ec1f66c4a9d4..eac4d09314eb 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -580,10 +580,8 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
580 fsp, seq_blen, lport->lso_max, t_blen); 580 fsp, seq_blen, lport->lso_max, t_blen);
581 } 581 }
582 582
583 WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD);
584 if (t_blen > 512) 583 if (t_blen > 512)
585 t_blen &= ~(512 - 1); /* round down to block size */ 584 t_blen &= ~(512 - 1); /* round down to block size */
586 WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); /* won't go below 256 */
587 sc = fsp->cmd; 585 sc = fsp->cmd;
588 586
589 remaining = seq_blen; 587 remaining = seq_blen;
@@ -745,7 +743,7 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
745 fh = fc_frame_header_get(fp); 743 fh = fc_frame_header_get(fp);
746 r_ctl = fh->fh_r_ctl; 744 r_ctl = fh->fh_r_ctl;
747 745
748 if (!(lport->state & LPORT_ST_READY)) 746 if (lport->state != LPORT_ST_READY)
749 goto out; 747 goto out;
750 if (fc_fcp_lock_pkt(fsp)) 748 if (fc_fcp_lock_pkt(fsp))
751 goto out; 749 goto out;
@@ -1110,7 +1108,7 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
1110 1108
1111 fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id, 1109 fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id,
1112 rpriv->local_port->port_id, FC_TYPE_FCP, 1110 rpriv->local_port->port_id, FC_TYPE_FCP,
1113 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1111 FC_FCTL_REQ, 0);
1114 1112
1115 seq = lport->tt.exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy, 1113 seq = lport->tt.exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy,
1116 fsp, 0); 1114 fsp, 0);
@@ -1383,7 +1381,7 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
1383 fr_seq(fp) = fsp->seq_ptr; 1381 fr_seq(fp) = fsp->seq_ptr;
1384 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id, 1382 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id,
1385 rpriv->local_port->port_id, FC_TYPE_ELS, 1383 rpriv->local_port->port_id, FC_TYPE_ELS,
1386 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1384 FC_FCTL_REQ, 0);
1387 if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC, 1385 if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC,
1388 fc_fcp_rec_resp, fsp, 1386 fc_fcp_rec_resp, fsp,
1389 jiffies_to_msecs(FC_SCSI_REC_TOV))) { 1387 jiffies_to_msecs(FC_SCSI_REC_TOV))) {
@@ -1641,7 +1639,7 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
1641 1639
1642 fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id, 1640 fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id,
1643 rpriv->local_port->port_id, FC_TYPE_FCP, 1641 rpriv->local_port->port_id, FC_TYPE_FCP,
1644 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1642 FC_FCTL_REQ, 0);
1645 1643
1646 seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL, 1644 seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL,
1647 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV)); 1645 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV));
@@ -1973,6 +1971,11 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1973 break; 1971 break;
1974 } 1972 }
1975 1973
1974 if (lport->state != LPORT_ST_READY && fsp->status_code != FC_COMPLETE) {
1975 sc_cmd->result = (DID_REQUEUE << 16);
1976 FC_FCP_DBG(fsp, "Returning DID_REQUEUE to scsi-ml\n");
1977 }
1978
1976 spin_lock_irqsave(&si->scsi_queue_lock, flags); 1979 spin_lock_irqsave(&si->scsi_queue_lock, flags);
1977 list_del(&fsp->list); 1980 list_del(&fsp->list);
1978 spin_unlock_irqrestore(&si->scsi_queue_lock, flags); 1981 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c
index 39f4b6ab04b4..6a48c28e4420 100644
--- a/drivers/scsi/libfc/fc_libfc.c
+++ b/drivers/scsi/libfc/fc_libfc.c
@@ -23,6 +23,7 @@
23#include <linux/crc32.h> 23#include <linux/crc32.h>
24 24
25#include <scsi/libfc.h> 25#include <scsi/libfc.h>
26#include <scsi/fc_encode.h>
26 27
27#include "fc_libfc.h" 28#include "fc_libfc.h"
28 29
@@ -132,3 +133,80 @@ u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
132 } 133 }
133 return copy_len; 134 return copy_len;
134} 135}
136
137/**
138 * fc_fill_hdr() - fill FC header fields based on request
139 * @fp: reply frame containing header to be filled in
140 * @in_fp: request frame containing header to use in filling in reply
141 * @r_ctl: R_CTL value for header
142 * @f_ctl: F_CTL value for header, with 0 pad
143 * @seq_cnt: sequence count for the header, ignored if frame has a sequence
144 * @parm_offset: parameter / offset value
145 */
146void fc_fill_hdr(struct fc_frame *fp, const struct fc_frame *in_fp,
147 enum fc_rctl r_ctl, u32 f_ctl, u16 seq_cnt, u32 parm_offset)
148{
149 struct fc_frame_header *fh;
150 struct fc_frame_header *in_fh;
151 struct fc_seq *sp;
152 u32 fill;
153
154 fh = __fc_frame_header_get(fp);
155 in_fh = __fc_frame_header_get(in_fp);
156
157 if (f_ctl & FC_FC_END_SEQ) {
158 fill = -fr_len(fp) & 3;
159 if (fill) {
160 /* TODO, this may be a problem with fragmented skb */
161 memset(skb_put(fp_skb(fp), fill), 0, fill);
162 f_ctl |= fill;
163 }
164 fr_eof(fp) = FC_EOF_T;
165 } else {
166 WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */
167 fr_eof(fp) = FC_EOF_N;
168 }
169
170 fh->fh_r_ctl = r_ctl;
171 memcpy(fh->fh_d_id, in_fh->fh_s_id, sizeof(fh->fh_d_id));
172 memcpy(fh->fh_s_id, in_fh->fh_d_id, sizeof(fh->fh_s_id));
173 fh->fh_type = in_fh->fh_type;
174 hton24(fh->fh_f_ctl, f_ctl);
175 fh->fh_ox_id = in_fh->fh_ox_id;
176 fh->fh_rx_id = in_fh->fh_rx_id;
177 fh->fh_cs_ctl = 0;
178 fh->fh_df_ctl = 0;
179 fh->fh_parm_offset = htonl(parm_offset);
180
181 sp = fr_seq(in_fp);
182 if (sp) {
183 fr_seq(fp) = sp;
184 fh->fh_seq_id = sp->id;
185 seq_cnt = sp->cnt;
186 } else {
187 fh->fh_seq_id = 0;
188 }
189 fh->fh_seq_cnt = ntohs(seq_cnt);
190 fr_sof(fp) = seq_cnt ? FC_SOF_N3 : FC_SOF_I3;
191 fr_encaps(fp) = fr_encaps(in_fp);
192}
193EXPORT_SYMBOL(fc_fill_hdr);
194
195/**
196 * fc_fill_reply_hdr() - fill FC reply header fields based on request
197 * @fp: reply frame containing header to be filled in
198 * @in_fp: request frame containing header to use in filling in reply
199 * @r_ctl: R_CTL value for reply
200 * @parm_offset: parameter / offset value
201 */
202void fc_fill_reply_hdr(struct fc_frame *fp, const struct fc_frame *in_fp,
203 enum fc_rctl r_ctl, u32 parm_offset)
204{
205 struct fc_seq *sp;
206
207 sp = fr_seq(in_fp);
208 if (sp)
209 fr_seq(fp) = fr_dev(in_fp)->tt.seq_start_next(sp);
210 fc_fill_hdr(fp, in_fp, r_ctl, FC_FCTL_RESP, 0, parm_offset);
211}
212EXPORT_SYMBOL(fc_fill_reply_hdr);
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
index f5c0ca4b6ef8..16d2162dda1f 100644
--- a/drivers/scsi/libfc/fc_libfc.h
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -52,7 +52,7 @@ extern unsigned int fc_debug_logging;
52#define FC_DISC_DBG(disc, fmt, args...) \ 52#define FC_DISC_DBG(disc, fmt, args...) \
53 FC_CHECK_LOGGING(FC_DISC_LOGGING, \ 53 FC_CHECK_LOGGING(FC_DISC_LOGGING, \
54 printk(KERN_INFO "host%u: disc: " fmt, \ 54 printk(KERN_INFO "host%u: disc: " fmt, \
55 (disc)->lport->host->host_no, \ 55 fc_disc_lport(disc)->host->host_no, \
56 ##args)) 56 ##args))
57 57
58#define FC_RPORT_ID_DBG(lport, port_id, fmt, args...) \ 58#define FC_RPORT_ID_DBG(lport, port_id, fmt, args...) \
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 79c9e3ccd341..6eb334a8a7fa 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -375,41 +375,36 @@ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
375 375
376/** 376/**
377 * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report. 377 * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report.
378 * @sp: The sequence in the RLIR exchange
379 * @fp: The RLIR request frame
380 * @lport: Fibre Channel local port recieving the RLIR 378 * @lport: Fibre Channel local port recieving the RLIR
379 * @fp: The RLIR request frame
381 * 380 *
382 * Locking Note: The lport lock is expected to be held before calling 381 * Locking Note: The lport lock is expected to be held before calling
383 * this function. 382 * this function.
384 */ 383 */
385static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp, 384static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp)
386 struct fc_lport *lport)
387{ 385{
388 FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n", 386 FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
389 fc_lport_state(lport)); 387 fc_lport_state(lport));
390 388
391 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); 389 lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
392 fc_frame_free(fp); 390 fc_frame_free(fp);
393} 391}
394 392
395/** 393/**
396 * fc_lport_recv_echo_req() - Handle received ECHO request 394 * fc_lport_recv_echo_req() - Handle received ECHO request
397 * @sp: The sequence in the ECHO exchange
398 * @fp: ECHO request frame
399 * @lport: The local port recieving the ECHO 395 * @lport: The local port recieving the ECHO
396 * @fp: ECHO request frame
400 * 397 *
401 * Locking Note: The lport lock is expected to be held before calling 398 * Locking Note: The lport lock is expected to be held before calling
402 * this function. 399 * this function.
403 */ 400 */
404static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, 401static void fc_lport_recv_echo_req(struct fc_lport *lport,
405 struct fc_lport *lport) 402 struct fc_frame *in_fp)
406{ 403{
407 struct fc_frame *fp; 404 struct fc_frame *fp;
408 struct fc_exch *ep = fc_seq_exch(sp);
409 unsigned int len; 405 unsigned int len;
410 void *pp; 406 void *pp;
411 void *dp; 407 void *dp;
412 u32 f_ctl;
413 408
414 FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n", 409 FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n",
415 fc_lport_state(lport)); 410 fc_lport_state(lport));
@@ -425,29 +420,24 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
425 dp = fc_frame_payload_get(fp, len); 420 dp = fc_frame_payload_get(fp, len);
426 memcpy(dp, pp, len); 421 memcpy(dp, pp, len);
427 *((__be32 *)dp) = htonl(ELS_LS_ACC << 24); 422 *((__be32 *)dp) = htonl(ELS_LS_ACC << 24);
428 sp = lport->tt.seq_start_next(sp); 423 fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
429 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ; 424 lport->tt.frame_send(lport, fp);
430 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
431 FC_TYPE_ELS, f_ctl, 0);
432 lport->tt.seq_send(lport, sp, fp);
433 } 425 }
434 fc_frame_free(in_fp); 426 fc_frame_free(in_fp);
435} 427}
436 428
437/** 429/**
438 * fc_lport_recv_rnid_req() - Handle received Request Node ID data request 430 * fc_lport_recv_rnid_req() - Handle received Request Node ID data request
439 * @sp: The sequence in the RNID exchange
440 * @fp: The RNID request frame
441 * @lport: The local port recieving the RNID 431 * @lport: The local port recieving the RNID
432 * @fp: The RNID request frame
442 * 433 *
443 * Locking Note: The lport lock is expected to be held before calling 434 * Locking Note: The lport lock is expected to be held before calling
444 * this function. 435 * this function.
445 */ 436 */
446static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp, 437static void fc_lport_recv_rnid_req(struct fc_lport *lport,
447 struct fc_lport *lport) 438 struct fc_frame *in_fp)
448{ 439{
449 struct fc_frame *fp; 440 struct fc_frame *fp;
450 struct fc_exch *ep = fc_seq_exch(sp);
451 struct fc_els_rnid *req; 441 struct fc_els_rnid *req;
452 struct { 442 struct {
453 struct fc_els_rnid_resp rnid; 443 struct fc_els_rnid_resp rnid;
@@ -457,17 +447,15 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
457 struct fc_seq_els_data rjt_data; 447 struct fc_seq_els_data rjt_data;
458 u8 fmt; 448 u8 fmt;
459 size_t len; 449 size_t len;
460 u32 f_ctl;
461 450
462 FC_LPORT_DBG(lport, "Received RNID request while in state %s\n", 451 FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
463 fc_lport_state(lport)); 452 fc_lport_state(lport));
464 453
465 req = fc_frame_payload_get(in_fp, sizeof(*req)); 454 req = fc_frame_payload_get(in_fp, sizeof(*req));
466 if (!req) { 455 if (!req) {
467 rjt_data.fp = NULL;
468 rjt_data.reason = ELS_RJT_LOGIC; 456 rjt_data.reason = ELS_RJT_LOGIC;
469 rjt_data.explan = ELS_EXPL_NONE; 457 rjt_data.explan = ELS_EXPL_NONE;
470 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); 458 lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
471 } else { 459 } else {
472 fmt = req->rnid_fmt; 460 fmt = req->rnid_fmt;
473 len = sizeof(*rp); 461 len = sizeof(*rp);
@@ -490,12 +478,8 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
490 memcpy(&rp->gen, &lport->rnid_gen, 478 memcpy(&rp->gen, &lport->rnid_gen,
491 sizeof(rp->gen)); 479 sizeof(rp->gen));
492 } 480 }
493 sp = lport->tt.seq_start_next(sp); 481 fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
494 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ; 482 lport->tt.frame_send(lport, fp);
495 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
496 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
497 FC_TYPE_ELS, f_ctl, 0);
498 lport->tt.seq_send(lport, sp, fp);
499 } 483 }
500 } 484 }
501 fc_frame_free(in_fp); 485 fc_frame_free(in_fp);
@@ -503,17 +487,15 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
503 487
504/** 488/**
505 * fc_lport_recv_logo_req() - Handle received fabric LOGO request 489 * fc_lport_recv_logo_req() - Handle received fabric LOGO request
506 * @sp: The sequence in the LOGO exchange
507 * @fp: The LOGO request frame
508 * @lport: The local port recieving the LOGO 490 * @lport: The local port recieving the LOGO
491 * @fp: The LOGO request frame
509 * 492 *
510 * Locking Note: The lport lock is exected to be held before calling 493 * Locking Note: The lport lock is exected to be held before calling
511 * this function. 494 * this function.
512 */ 495 */
513static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp, 496static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
514 struct fc_lport *lport)
515{ 497{
516 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); 498 lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
517 fc_lport_enter_reset(lport); 499 fc_lport_enter_reset(lport);
518 fc_frame_free(fp); 500 fc_frame_free(fp);
519} 501}
@@ -755,10 +737,37 @@ static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
755} 737}
756 738
757/** 739/**
740 * fc_lport_set_port_id() - set the local port Port ID for point-to-multipoint
741 * @lport: The local port which will have its Port ID set.
742 * @port_id: The new port ID.
743 *
744 * Called by the lower-level driver when transport sets the local port_id.
745 * This is used in VN_port to VN_port mode for FCoE, and causes FLOGI and
746 * discovery to be skipped.
747 */
748void fc_lport_set_local_id(struct fc_lport *lport, u32 port_id)
749{
750 mutex_lock(&lport->lp_mutex);
751
752 fc_lport_set_port_id(lport, port_id, NULL);
753
754 switch (lport->state) {
755 case LPORT_ST_RESET:
756 case LPORT_ST_FLOGI:
757 if (port_id)
758 fc_lport_enter_ready(lport);
759 break;
760 default:
761 break;
762 }
763 mutex_unlock(&lport->lp_mutex);
764}
765EXPORT_SYMBOL(fc_lport_set_local_id);
766
767/**
758 * fc_lport_recv_flogi_req() - Receive a FLOGI request 768 * fc_lport_recv_flogi_req() - Receive a FLOGI request
759 * @sp_in: The sequence the FLOGI is on
760 * @rx_fp: The FLOGI frame
761 * @lport: The local port that recieved the request 769 * @lport: The local port that recieved the request
770 * @rx_fp: The FLOGI frame
762 * 771 *
763 * A received FLOGI request indicates a point-to-point connection. 772 * A received FLOGI request indicates a point-to-point connection.
764 * Accept it with the common service parameters indicating our N port. 773 * Accept it with the common service parameters indicating our N port.
@@ -767,26 +776,21 @@ static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
767 * Locking Note: The lport lock is expected to be held before calling 776 * Locking Note: The lport lock is expected to be held before calling
768 * this function. 777 * this function.
769 */ 778 */
770static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, 779static void fc_lport_recv_flogi_req(struct fc_lport *lport,
771 struct fc_frame *rx_fp, 780 struct fc_frame *rx_fp)
772 struct fc_lport *lport)
773{ 781{
774 struct fc_frame *fp; 782 struct fc_frame *fp;
775 struct fc_frame_header *fh; 783 struct fc_frame_header *fh;
776 struct fc_seq *sp;
777 struct fc_exch *ep;
778 struct fc_els_flogi *flp; 784 struct fc_els_flogi *flp;
779 struct fc_els_flogi *new_flp; 785 struct fc_els_flogi *new_flp;
780 u64 remote_wwpn; 786 u64 remote_wwpn;
781 u32 remote_fid; 787 u32 remote_fid;
782 u32 local_fid; 788 u32 local_fid;
783 u32 f_ctl;
784 789
785 FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n", 790 FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
786 fc_lport_state(lport)); 791 fc_lport_state(lport));
787 792
788 fh = fc_frame_header_get(rx_fp); 793 remote_fid = fc_frame_sid(rx_fp);
789 remote_fid = ntoh24(fh->fh_s_id);
790 flp = fc_frame_payload_get(rx_fp, sizeof(*flp)); 794 flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
791 if (!flp) 795 if (!flp)
792 goto out; 796 goto out;
@@ -817,7 +821,6 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
817 821
818 fp = fc_frame_alloc(lport, sizeof(*flp)); 822 fp = fc_frame_alloc(lport, sizeof(*flp));
819 if (fp) { 823 if (fp) {
820 sp = lport->tt.seq_start_next(fr_seq(rx_fp));
821 new_flp = fc_frame_payload_get(fp, sizeof(*flp)); 824 new_flp = fc_frame_payload_get(fp, sizeof(*flp));
822 fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI); 825 fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI);
823 new_flp->fl_cmd = (u8) ELS_LS_ACC; 826 new_flp->fl_cmd = (u8) ELS_LS_ACC;
@@ -826,27 +829,24 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
826 * Send the response. If this fails, the originator should 829 * Send the response. If this fails, the originator should
827 * repeat the sequence. 830 * repeat the sequence.
828 */ 831 */
829 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ; 832 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
830 ep = fc_seq_exch(sp); 833 fh = fc_frame_header_get(fp);
831 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, remote_fid, local_fid, 834 hton24(fh->fh_s_id, local_fid);
832 FC_TYPE_ELS, f_ctl, 0); 835 hton24(fh->fh_d_id, remote_fid);
833 lport->tt.seq_send(lport, sp, fp); 836 lport->tt.frame_send(lport, fp);
834 837
835 } else { 838 } else {
836 fc_lport_error(lport, fp); 839 fc_lport_error(lport, fp);
837 } 840 }
838 fc_lport_ptp_setup(lport, remote_fid, remote_wwpn, 841 fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
839 get_unaligned_be64(&flp->fl_wwnn)); 842 get_unaligned_be64(&flp->fl_wwnn));
840
841out: 843out:
842 sp = fr_seq(rx_fp);
843 fc_frame_free(rx_fp); 844 fc_frame_free(rx_fp);
844} 845}
845 846
846/** 847/**
847 * fc_lport_recv_req() - The generic lport request handler 848 * fc_lport_recv_req() - The generic lport request handler
848 * @lport: The local port that received the request 849 * @lport: The local port that received the request
849 * @sp: The sequence the request is on
850 * @fp: The request frame 850 * @fp: The request frame
851 * 851 *
852 * This function will see if the lport handles the request or 852 * This function will see if the lport handles the request or
@@ -855,11 +855,10 @@ out:
855 * Locking Note: This function should not be called with the lport 855 * Locking Note: This function should not be called with the lport
856 * lock held becuase it will grab the lock. 856 * lock held becuase it will grab the lock.
857 */ 857 */
858static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, 858static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
859 struct fc_frame *fp)
860{ 859{
861 struct fc_frame_header *fh = fc_frame_header_get(fp); 860 struct fc_frame_header *fh = fc_frame_header_get(fp);
862 void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *); 861 void (*recv)(struct fc_lport *, struct fc_frame *);
863 862
864 mutex_lock(&lport->lp_mutex); 863 mutex_lock(&lport->lp_mutex);
865 864
@@ -878,11 +877,11 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
878 recv = lport->tt.rport_recv_req; 877 recv = lport->tt.rport_recv_req;
879 switch (fc_frame_payload_op(fp)) { 878 switch (fc_frame_payload_op(fp)) {
880 case ELS_FLOGI: 879 case ELS_FLOGI:
881 recv = fc_lport_recv_flogi_req; 880 if (!lport->point_to_multipoint)
881 recv = fc_lport_recv_flogi_req;
882 break; 882 break;
883 case ELS_LOGO: 883 case ELS_LOGO:
884 fh = fc_frame_header_get(fp); 884 if (fc_frame_sid(fp) == FC_FID_FLOGI)
885 if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI)
886 recv = fc_lport_recv_logo_req; 885 recv = fc_lport_recv_logo_req;
887 break; 886 break;
888 case ELS_RSCN: 887 case ELS_RSCN:
@@ -899,19 +898,13 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
899 break; 898 break;
900 } 899 }
901 900
902 recv(sp, fp, lport); 901 recv(lport, fp);
903 } else { 902 } else {
904 FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n", 903 FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n",
905 fr_eof(fp)); 904 fr_eof(fp));
906 fc_frame_free(fp); 905 fc_frame_free(fp);
907 } 906 }
908 mutex_unlock(&lport->lp_mutex); 907 mutex_unlock(&lport->lp_mutex);
909
910 /*
911 * The common exch_done for all request may not be good
912 * if any request requires longer hold on exhange. XXX
913 */
914 lport->tt.exch_done(sp);
915} 908}
916 909
917/** 910/**
@@ -954,7 +947,7 @@ static void fc_lport_reset_locked(struct fc_lport *lport)
954 lport->tt.exch_mgr_reset(lport, 0, 0); 947 lport->tt.exch_mgr_reset(lport, 0, 0);
955 fc_host_fabric_name(lport->host) = 0; 948 fc_host_fabric_name(lport->host) = 0;
956 949
957 if (lport->port_id) 950 if (lport->port_id && (!lport->point_to_multipoint || !lport->link_up))
958 fc_lport_set_port_id(lport, 0, NULL); 951 fc_lport_set_port_id(lport, 0, NULL);
959} 952}
960 953
@@ -1019,38 +1012,24 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
1019 PTR_ERR(fp), fc_lport_state(lport), 1012 PTR_ERR(fp), fc_lport_state(lport),
1020 lport->retry_count); 1013 lport->retry_count);
1021 1014
1022 if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) { 1015 if (PTR_ERR(fp) == -FC_EX_CLOSED)
1023 /* 1016 return;
1024 * Memory allocation failure, or the exchange timed out. 1017
1025 * Retry after delay 1018 /*
1026 */ 1019 * Memory allocation failure, or the exchange timed out
1027 if (lport->retry_count < lport->max_retry_count) { 1020 * or we received LS_RJT.
1028 lport->retry_count++; 1021 * Retry after delay
1029 if (!fp) 1022 */
1030 delay = msecs_to_jiffies(500); 1023 if (lport->retry_count < lport->max_retry_count) {
1031 else 1024 lport->retry_count++;
1032 delay = msecs_to_jiffies(lport->e_d_tov); 1025 if (!fp)
1033 1026 delay = msecs_to_jiffies(500);
1034 schedule_delayed_work(&lport->retry_work, delay); 1027 else
1035 } else { 1028 delay = msecs_to_jiffies(lport->e_d_tov);
1036 switch (lport->state) { 1029
1037 case LPORT_ST_DISABLED: 1030 schedule_delayed_work(&lport->retry_work, delay);
1038 case LPORT_ST_READY: 1031 } else
1039 case LPORT_ST_RESET: 1032 fc_lport_enter_reset(lport);
1040 case LPORT_ST_RNN_ID:
1041 case LPORT_ST_RSNN_NN:
1042 case LPORT_ST_RSPN_ID:
1043 case LPORT_ST_RFT_ID:
1044 case LPORT_ST_RFF_ID:
1045 case LPORT_ST_SCR:
1046 case LPORT_ST_DNS:
1047 case LPORT_ST_FLOGI:
1048 case LPORT_ST_LOGO:
1049 fc_lport_enter_reset(lport);
1050 break;
1051 }
1052 }
1053 }
1054} 1033}
1055 1034
1056/** 1035/**
@@ -1440,7 +1419,6 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1440 void *lp_arg) 1419 void *lp_arg)
1441{ 1420{
1442 struct fc_lport *lport = lp_arg; 1421 struct fc_lport *lport = lp_arg;
1443 struct fc_frame_header *fh;
1444 struct fc_els_flogi *flp; 1422 struct fc_els_flogi *flp;
1445 u32 did; 1423 u32 did;
1446 u16 csp_flags; 1424 u16 csp_flags;
@@ -1468,9 +1446,14 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1468 goto err; 1446 goto err;
1469 } 1447 }
1470 1448
1471 fh = fc_frame_header_get(fp); 1449 did = fc_frame_did(fp);
1472 did = ntoh24(fh->fh_d_id); 1450
1473 if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) { 1451 if (!did) {
1452 FC_LPORT_DBG(lport, "Bad FLOGI response\n");
1453 goto out;
1454 }
1455
1456 if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
1474 flp = fc_frame_payload_get(fp, sizeof(*flp)); 1457 flp = fc_frame_payload_get(fp, sizeof(*flp));
1475 if (flp) { 1458 if (flp) {
1476 mfs = ntohs(flp->fl_csp.sp_bb_data) & 1459 mfs = ntohs(flp->fl_csp.sp_bb_data) &
@@ -1495,7 +1478,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1495 "Port (%6.6x) entered " 1478 "Port (%6.6x) entered "
1496 "point-to-point mode\n", 1479 "point-to-point mode\n",
1497 lport->host->host_no, did); 1480 lport->host->host_no, did);
1498 fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id), 1481 fc_lport_ptp_setup(lport, fc_frame_sid(fp),
1499 get_unaligned_be64( 1482 get_unaligned_be64(
1500 &flp->fl_wwpn), 1483 &flp->fl_wwpn),
1501 get_unaligned_be64( 1484 get_unaligned_be64(
@@ -1509,9 +1492,8 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1509 fc_lport_enter_dns(lport); 1492 fc_lport_enter_dns(lport);
1510 } 1493 }
1511 } 1494 }
1512 } else { 1495 } else
1513 FC_LPORT_DBG(lport, "Bad FLOGI response\n"); 1496 fc_lport_error(lport, fp);
1514 }
1515 1497
1516out: 1498out:
1517 fc_frame_free(fp); 1499 fc_frame_free(fp);
@@ -1536,6 +1518,12 @@ void fc_lport_enter_flogi(struct fc_lport *lport)
1536 1518
1537 fc_lport_state_enter(lport, LPORT_ST_FLOGI); 1519 fc_lport_state_enter(lport, LPORT_ST_FLOGI);
1538 1520
1521 if (lport->point_to_multipoint) {
1522 if (lport->port_id)
1523 fc_lport_enter_ready(lport);
1524 return;
1525 }
1526
1539 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); 1527 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
1540 if (!fp) 1528 if (!fp)
1541 return fc_lport_error(lport, fp); 1529 return fc_lport_error(lport, fp);
@@ -1701,8 +1689,7 @@ static int fc_lport_els_request(struct fc_bsg_job *job,
1701 hton24(fh->fh_d_id, did); 1689 hton24(fh->fh_d_id, did);
1702 hton24(fh->fh_s_id, lport->port_id); 1690 hton24(fh->fh_s_id, lport->port_id);
1703 fh->fh_type = FC_TYPE_ELS; 1691 fh->fh_type = FC_TYPE_ELS;
1704 hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ | 1692 hton24(fh->fh_f_ctl, FC_FCTL_REQ);
1705 FC_FC_END_SEQ | FC_FC_SEQ_INIT);
1706 fh->fh_cs_ctl = 0; 1693 fh->fh_cs_ctl = 0;
1707 fh->fh_df_ctl = 0; 1694 fh->fh_df_ctl = 0;
1708 fh->fh_parm_offset = 0; 1695 fh->fh_parm_offset = 0;
@@ -1761,8 +1748,7 @@ static int fc_lport_ct_request(struct fc_bsg_job *job,
1761 hton24(fh->fh_d_id, did); 1748 hton24(fh->fh_d_id, did);
1762 hton24(fh->fh_s_id, lport->port_id); 1749 hton24(fh->fh_s_id, lport->port_id);
1763 fh->fh_type = FC_TYPE_CT; 1750 fh->fh_type = FC_TYPE_CT;
1764 hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ | 1751 hton24(fh->fh_f_ctl, FC_FCTL_REQ);
1765 FC_FC_END_SEQ | FC_FC_SEQ_INIT);
1766 fh->fh_cs_ctl = 0; 1752 fh->fh_cs_ctl = 0;
1767 fh->fh_df_ctl = 0; 1753 fh->fh_df_ctl = 0;
1768 fh->fh_parm_offset = 0; 1754 fh->fh_parm_offset = 0;
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 39e440f0f54a..25479cc7f170 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -60,6 +60,7 @@
60 60
61struct workqueue_struct *rport_event_queue; 61struct workqueue_struct *rport_event_queue;
62 62
63static void fc_rport_enter_flogi(struct fc_rport_priv *);
63static void fc_rport_enter_plogi(struct fc_rport_priv *); 64static void fc_rport_enter_plogi(struct fc_rport_priv *);
64static void fc_rport_enter_prli(struct fc_rport_priv *); 65static void fc_rport_enter_prli(struct fc_rport_priv *);
65static void fc_rport_enter_rtv(struct fc_rport_priv *); 66static void fc_rport_enter_rtv(struct fc_rport_priv *);
@@ -67,14 +68,10 @@ static void fc_rport_enter_ready(struct fc_rport_priv *);
67static void fc_rport_enter_logo(struct fc_rport_priv *); 68static void fc_rport_enter_logo(struct fc_rport_priv *);
68static void fc_rport_enter_adisc(struct fc_rport_priv *); 69static void fc_rport_enter_adisc(struct fc_rport_priv *);
69 70
70static void fc_rport_recv_plogi_req(struct fc_lport *, 71static void fc_rport_recv_plogi_req(struct fc_lport *, struct fc_frame *);
71 struct fc_seq *, struct fc_frame *); 72static void fc_rport_recv_prli_req(struct fc_rport_priv *, struct fc_frame *);
72static void fc_rport_recv_prli_req(struct fc_rport_priv *, 73static void fc_rport_recv_prlo_req(struct fc_rport_priv *, struct fc_frame *);
73 struct fc_seq *, struct fc_frame *); 74static void fc_rport_recv_logo_req(struct fc_lport *, struct fc_frame *);
74static void fc_rport_recv_prlo_req(struct fc_rport_priv *,
75 struct fc_seq *, struct fc_frame *);
76static void fc_rport_recv_logo_req(struct fc_lport *,
77 struct fc_seq *, struct fc_frame *);
78static void fc_rport_timeout(struct work_struct *); 75static void fc_rport_timeout(struct work_struct *);
79static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *); 76static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *);
80static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *); 77static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *);
@@ -82,27 +79,29 @@ static void fc_rport_work(struct work_struct *);
82 79
83static const char *fc_rport_state_names[] = { 80static const char *fc_rport_state_names[] = {
84 [RPORT_ST_INIT] = "Init", 81 [RPORT_ST_INIT] = "Init",
82 [RPORT_ST_FLOGI] = "FLOGI",
83 [RPORT_ST_PLOGI_WAIT] = "PLOGI_WAIT",
85 [RPORT_ST_PLOGI] = "PLOGI", 84 [RPORT_ST_PLOGI] = "PLOGI",
86 [RPORT_ST_PRLI] = "PRLI", 85 [RPORT_ST_PRLI] = "PRLI",
87 [RPORT_ST_RTV] = "RTV", 86 [RPORT_ST_RTV] = "RTV",
88 [RPORT_ST_READY] = "Ready", 87 [RPORT_ST_READY] = "Ready",
89 [RPORT_ST_LOGO] = "LOGO",
90 [RPORT_ST_ADISC] = "ADISC", 88 [RPORT_ST_ADISC] = "ADISC",
91 [RPORT_ST_DELETE] = "Delete", 89 [RPORT_ST_DELETE] = "Delete",
92 [RPORT_ST_RESTART] = "Restart",
93}; 90};
94 91
95/** 92/**
96 * fc_rport_lookup() - Lookup a remote port by port_id 93 * fc_rport_lookup() - Lookup a remote port by port_id
97 * @lport: The local port to lookup the remote port on 94 * @lport: The local port to lookup the remote port on
98 * @port_id: The remote port ID to look up 95 * @port_id: The remote port ID to look up
96 *
97 * The caller must hold either disc_mutex or rcu_read_lock().
99 */ 98 */
100static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport, 99static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
101 u32 port_id) 100 u32 port_id)
102{ 101{
103 struct fc_rport_priv *rdata; 102 struct fc_rport_priv *rdata;
104 103
105 list_for_each_entry(rdata, &lport->disc.rports, peers) 104 list_for_each_entry_rcu(rdata, &lport->disc.rports, peers)
106 if (rdata->ids.port_id == port_id) 105 if (rdata->ids.port_id == port_id)
107 return rdata; 106 return rdata;
108 return NULL; 107 return NULL;
@@ -126,7 +125,7 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
126 if (rdata) 125 if (rdata)
127 return rdata; 126 return rdata;
128 127
129 rdata = kzalloc(sizeof(*rdata), GFP_KERNEL); 128 rdata = kzalloc(sizeof(*rdata) + lport->rport_priv_size, GFP_KERNEL);
130 if (!rdata) 129 if (!rdata)
131 return NULL; 130 return NULL;
132 131
@@ -147,11 +146,23 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
147 INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout); 146 INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
148 INIT_WORK(&rdata->event_work, fc_rport_work); 147 INIT_WORK(&rdata->event_work, fc_rport_work);
149 if (port_id != FC_FID_DIR_SERV) 148 if (port_id != FC_FID_DIR_SERV)
150 list_add(&rdata->peers, &lport->disc.rports); 149 list_add_rcu(&rdata->peers, &lport->disc.rports);
151 return rdata; 150 return rdata;
152} 151}
153 152
154/** 153/**
154 * fc_rport_free_rcu() - Free a remote port
155 * @rcu: The rcu_head structure inside the remote port
156 */
157static void fc_rport_free_rcu(struct rcu_head *rcu)
158{
159 struct fc_rport_priv *rdata;
160
161 rdata = container_of(rcu, struct fc_rport_priv, rcu);
162 kfree(rdata);
163}
164
165/**
155 * fc_rport_destroy() - Free a remote port after last reference is released 166 * fc_rport_destroy() - Free a remote port after last reference is released
156 * @kref: The remote port's kref 167 * @kref: The remote port's kref
157 */ 168 */
@@ -160,7 +171,7 @@ static void fc_rport_destroy(struct kref *kref)
160 struct fc_rport_priv *rdata; 171 struct fc_rport_priv *rdata;
161 172
162 rdata = container_of(kref, struct fc_rport_priv, kref); 173 rdata = container_of(kref, struct fc_rport_priv, kref);
163 kfree(rdata); 174 call_rcu(&rdata->rcu, fc_rport_free_rcu);
164} 175}
165 176
166/** 177/**
@@ -194,7 +205,7 @@ EXPORT_SYMBOL(fc_set_rport_loss_tmo);
194/** 205/**
195 * fc_plogi_get_maxframe() - Get the maximum payload from the common service 206 * fc_plogi_get_maxframe() - Get the maximum payload from the common service
196 * parameters in a FLOGI frame 207 * parameters in a FLOGI frame
197 * @flp: The FLOGI payload 208 * @flp: The FLOGI or PLOGI payload
198 * @maxval: The maximum frame size upper limit; this may be less than what 209 * @maxval: The maximum frame size upper limit; this may be less than what
199 * is in the service parameters 210 * is in the service parameters
200 */ 211 */
@@ -246,7 +257,6 @@ static void fc_rport_work(struct work_struct *work)
246 struct fc_rport_operations *rport_ops; 257 struct fc_rport_operations *rport_ops;
247 struct fc_rport_identifiers ids; 258 struct fc_rport_identifiers ids;
248 struct fc_rport *rport; 259 struct fc_rport *rport;
249 int restart = 0;
250 260
251 mutex_lock(&rdata->rp_mutex); 261 mutex_lock(&rdata->rp_mutex);
252 event = rdata->event; 262 event = rdata->event;
@@ -259,6 +269,7 @@ static void fc_rport_work(struct work_struct *work)
259 case RPORT_EV_READY: 269 case RPORT_EV_READY:
260 ids = rdata->ids; 270 ids = rdata->ids;
261 rdata->event = RPORT_EV_NONE; 271 rdata->event = RPORT_EV_NONE;
272 rdata->major_retries = 0;
262 kref_get(&rdata->kref); 273 kref_get(&rdata->kref);
263 mutex_unlock(&rdata->rp_mutex); 274 mutex_unlock(&rdata->rp_mutex);
264 275
@@ -298,24 +309,6 @@ static void fc_rport_work(struct work_struct *work)
298 port_id = rdata->ids.port_id; 309 port_id = rdata->ids.port_id;
299 mutex_unlock(&rdata->rp_mutex); 310 mutex_unlock(&rdata->rp_mutex);
300 311
301 if (port_id != FC_FID_DIR_SERV) {
302 /*
303 * We must drop rp_mutex before taking disc_mutex.
304 * Re-evaluate state to allow for restart.
305 * A transition to RESTART state must only happen
306 * while disc_mutex is held and rdata is on the list.
307 */
308 mutex_lock(&lport->disc.disc_mutex);
309 mutex_lock(&rdata->rp_mutex);
310 if (rdata->rp_state == RPORT_ST_RESTART)
311 restart = 1;
312 else
313 list_del(&rdata->peers);
314 rdata->event = RPORT_EV_NONE;
315 mutex_unlock(&rdata->rp_mutex);
316 mutex_unlock(&lport->disc.disc_mutex);
317 }
318
319 if (rport_ops && rport_ops->event_callback) { 312 if (rport_ops && rport_ops->event_callback) {
320 FC_RPORT_DBG(rdata, "callback ev %d\n", event); 313 FC_RPORT_DBG(rdata, "callback ev %d\n", event);
321 rport_ops->event_callback(lport, rdata, event); 314 rport_ops->event_callback(lport, rdata, event);
@@ -336,13 +329,37 @@ static void fc_rport_work(struct work_struct *work)
336 mutex_unlock(&rdata->rp_mutex); 329 mutex_unlock(&rdata->rp_mutex);
337 fc_remote_port_delete(rport); 330 fc_remote_port_delete(rport);
338 } 331 }
339 if (restart) { 332
340 mutex_lock(&rdata->rp_mutex); 333 mutex_lock(&lport->disc.disc_mutex);
341 FC_RPORT_DBG(rdata, "work restart\n"); 334 mutex_lock(&rdata->rp_mutex);
342 fc_rport_enter_plogi(rdata); 335 if (rdata->rp_state == RPORT_ST_DELETE) {
336 if (port_id == FC_FID_DIR_SERV) {
337 rdata->event = RPORT_EV_NONE;
338 mutex_unlock(&rdata->rp_mutex);
339 } else if ((rdata->flags & FC_RP_STARTED) &&
340 rdata->major_retries <
341 lport->max_rport_retry_count) {
342 rdata->major_retries++;
343 rdata->event = RPORT_EV_NONE;
344 FC_RPORT_DBG(rdata, "work restart\n");
345 fc_rport_enter_flogi(rdata);
346 mutex_unlock(&rdata->rp_mutex);
347 } else {
348 FC_RPORT_DBG(rdata, "work delete\n");
349 list_del_rcu(&rdata->peers);
350 mutex_unlock(&rdata->rp_mutex);
351 kref_put(&rdata->kref, lport->tt.rport_destroy);
352 }
353 } else {
354 /*
355 * Re-open for events. Reissue READY event if ready.
356 */
357 rdata->event = RPORT_EV_NONE;
358 if (rdata->rp_state == RPORT_ST_READY)
359 fc_rport_enter_ready(rdata);
343 mutex_unlock(&rdata->rp_mutex); 360 mutex_unlock(&rdata->rp_mutex);
344 } else 361 }
345 kref_put(&rdata->kref, lport->tt.rport_destroy); 362 mutex_unlock(&lport->disc.disc_mutex);
346 break; 363 break;
347 364
348 default: 365 default:
@@ -367,20 +384,18 @@ int fc_rport_login(struct fc_rport_priv *rdata)
367{ 384{
368 mutex_lock(&rdata->rp_mutex); 385 mutex_lock(&rdata->rp_mutex);
369 386
387 rdata->flags |= FC_RP_STARTED;
370 switch (rdata->rp_state) { 388 switch (rdata->rp_state) {
371 case RPORT_ST_READY: 389 case RPORT_ST_READY:
372 FC_RPORT_DBG(rdata, "ADISC port\n"); 390 FC_RPORT_DBG(rdata, "ADISC port\n");
373 fc_rport_enter_adisc(rdata); 391 fc_rport_enter_adisc(rdata);
374 break; 392 break;
375 case RPORT_ST_RESTART:
376 break;
377 case RPORT_ST_DELETE: 393 case RPORT_ST_DELETE:
378 FC_RPORT_DBG(rdata, "Restart deleted port\n"); 394 FC_RPORT_DBG(rdata, "Restart deleted port\n");
379 fc_rport_state_enter(rdata, RPORT_ST_RESTART);
380 break; 395 break;
381 default: 396 default:
382 FC_RPORT_DBG(rdata, "Login to port\n"); 397 FC_RPORT_DBG(rdata, "Login to port\n");
383 fc_rport_enter_plogi(rdata); 398 fc_rport_enter_flogi(rdata);
384 break; 399 break;
385 } 400 }
386 mutex_unlock(&rdata->rp_mutex); 401 mutex_unlock(&rdata->rp_mutex);
@@ -431,15 +446,12 @@ int fc_rport_logoff(struct fc_rport_priv *rdata)
431 446
432 FC_RPORT_DBG(rdata, "Remove port\n"); 447 FC_RPORT_DBG(rdata, "Remove port\n");
433 448
449 rdata->flags &= ~FC_RP_STARTED;
434 if (rdata->rp_state == RPORT_ST_DELETE) { 450 if (rdata->rp_state == RPORT_ST_DELETE) {
435 FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n"); 451 FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
436 goto out; 452 goto out;
437 } 453 }
438 454 fc_rport_enter_logo(rdata);
439 if (rdata->rp_state == RPORT_ST_RESTART)
440 FC_RPORT_DBG(rdata, "Port in Restart state, deleting\n");
441 else
442 fc_rport_enter_logo(rdata);
443 455
444 /* 456 /*
445 * Change the state to Delete so that we discard 457 * Change the state to Delete so that we discard
@@ -485,6 +497,9 @@ static void fc_rport_timeout(struct work_struct *work)
485 mutex_lock(&rdata->rp_mutex); 497 mutex_lock(&rdata->rp_mutex);
486 498
487 switch (rdata->rp_state) { 499 switch (rdata->rp_state) {
500 case RPORT_ST_FLOGI:
501 fc_rport_enter_flogi(rdata);
502 break;
488 case RPORT_ST_PLOGI: 503 case RPORT_ST_PLOGI:
489 fc_rport_enter_plogi(rdata); 504 fc_rport_enter_plogi(rdata);
490 break; 505 break;
@@ -494,16 +509,13 @@ static void fc_rport_timeout(struct work_struct *work)
494 case RPORT_ST_RTV: 509 case RPORT_ST_RTV:
495 fc_rport_enter_rtv(rdata); 510 fc_rport_enter_rtv(rdata);
496 break; 511 break;
497 case RPORT_ST_LOGO:
498 fc_rport_enter_logo(rdata);
499 break;
500 case RPORT_ST_ADISC: 512 case RPORT_ST_ADISC:
501 fc_rport_enter_adisc(rdata); 513 fc_rport_enter_adisc(rdata);
502 break; 514 break;
515 case RPORT_ST_PLOGI_WAIT:
503 case RPORT_ST_READY: 516 case RPORT_ST_READY:
504 case RPORT_ST_INIT: 517 case RPORT_ST_INIT:
505 case RPORT_ST_DELETE: 518 case RPORT_ST_DELETE:
506 case RPORT_ST_RESTART:
507 break; 519 break;
508 } 520 }
509 521
@@ -525,8 +537,9 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
525 fc_rport_state(rdata), rdata->retries); 537 fc_rport_state(rdata), rdata->retries);
526 538
527 switch (rdata->rp_state) { 539 switch (rdata->rp_state) {
540 case RPORT_ST_FLOGI:
528 case RPORT_ST_PLOGI: 541 case RPORT_ST_PLOGI:
529 case RPORT_ST_LOGO: 542 rdata->flags &= ~FC_RP_STARTED;
530 fc_rport_enter_delete(rdata, RPORT_EV_FAILED); 543 fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
531 break; 544 break;
532 case RPORT_ST_RTV: 545 case RPORT_ST_RTV:
@@ -536,8 +549,8 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
536 case RPORT_ST_ADISC: 549 case RPORT_ST_ADISC:
537 fc_rport_enter_logo(rdata); 550 fc_rport_enter_logo(rdata);
538 break; 551 break;
552 case RPORT_ST_PLOGI_WAIT:
539 case RPORT_ST_DELETE: 553 case RPORT_ST_DELETE:
540 case RPORT_ST_RESTART:
541 case RPORT_ST_READY: 554 case RPORT_ST_READY:
542 case RPORT_ST_INIT: 555 case RPORT_ST_INIT:
543 break; 556 break;
@@ -579,7 +592,250 @@ static void fc_rport_error_retry(struct fc_rport_priv *rdata,
579} 592}
580 593
581/** 594/**
582 * fc_rport_plogi_recv_resp() - Handler for ELS PLOGI responses 595 * fc_rport_login_complete() - Handle parameters and completion of p-mp login.
596 * @rdata: The remote port which we logged into or which logged into us.
597 * @fp: The FLOGI or PLOGI request or response frame
598 *
599 * Returns non-zero error if a problem is detected with the frame.
600 * Does not free the frame.
601 *
602 * This is only used in point-to-multipoint mode for FIP currently.
603 */
604static int fc_rport_login_complete(struct fc_rport_priv *rdata,
605 struct fc_frame *fp)
606{
607 struct fc_lport *lport = rdata->local_port;
608 struct fc_els_flogi *flogi;
609 unsigned int e_d_tov;
610 u16 csp_flags;
611
612 flogi = fc_frame_payload_get(fp, sizeof(*flogi));
613 if (!flogi)
614 return -EINVAL;
615
616 csp_flags = ntohs(flogi->fl_csp.sp_features);
617
618 if (fc_frame_payload_op(fp) == ELS_FLOGI) {
619 if (csp_flags & FC_SP_FT_FPORT) {
620 FC_RPORT_DBG(rdata, "Fabric bit set in FLOGI\n");
621 return -EINVAL;
622 }
623 } else {
624
625 /*
626 * E_D_TOV is not valid on an incoming FLOGI request.
627 */
628 e_d_tov = ntohl(flogi->fl_csp.sp_e_d_tov);
629 if (csp_flags & FC_SP_FT_EDTR)
630 e_d_tov /= 1000000;
631 if (e_d_tov > rdata->e_d_tov)
632 rdata->e_d_tov = e_d_tov;
633 }
634 rdata->maxframe_size = fc_plogi_get_maxframe(flogi, lport->mfs);
635 return 0;
636}
637
638/**
639 * fc_rport_flogi_resp() - Handle response to FLOGI request for p-mp mode
640 * @sp: The sequence that the FLOGI was on
641 * @fp: The FLOGI response frame
642 * @rp_arg: The remote port that received the FLOGI response
643 */
644void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
645 void *rp_arg)
646{
647 struct fc_rport_priv *rdata = rp_arg;
648 struct fc_lport *lport = rdata->local_port;
649 struct fc_els_flogi *flogi;
650 unsigned int r_a_tov;
651
652 FC_RPORT_DBG(rdata, "Received a FLOGI %s\n", fc_els_resp_type(fp));
653
654 if (fp == ERR_PTR(-FC_EX_CLOSED))
655 return;
656
657 mutex_lock(&rdata->rp_mutex);
658
659 if (rdata->rp_state != RPORT_ST_FLOGI) {
660 FC_RPORT_DBG(rdata, "Received a FLOGI response, but in state "
661 "%s\n", fc_rport_state(rdata));
662 if (IS_ERR(fp))
663 goto err;
664 goto out;
665 }
666
667 if (IS_ERR(fp)) {
668 fc_rport_error(rdata, fp);
669 goto err;
670 }
671
672 if (fc_frame_payload_op(fp) != ELS_LS_ACC)
673 goto bad;
674 if (fc_rport_login_complete(rdata, fp))
675 goto bad;
676
677 flogi = fc_frame_payload_get(fp, sizeof(*flogi));
678 if (!flogi)
679 goto bad;
680 r_a_tov = ntohl(flogi->fl_csp.sp_r_a_tov);
681 if (r_a_tov > rdata->r_a_tov)
682 rdata->r_a_tov = r_a_tov;
683
684 if (rdata->ids.port_name < lport->wwpn)
685 fc_rport_enter_plogi(rdata);
686 else
687 fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
688out:
689 fc_frame_free(fp);
690err:
691 mutex_unlock(&rdata->rp_mutex);
692 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
693 return;
694bad:
695 FC_RPORT_DBG(rdata, "Bad FLOGI response\n");
696 fc_rport_error_retry(rdata, fp);
697 goto out;
698}
699
700/**
701 * fc_rport_enter_flogi() - Send a FLOGI request to the remote port for p-mp
702 * @rdata: The remote port to send a FLOGI to
703 *
704 * Locking Note: The rport lock is expected to be held before calling
705 * this routine.
706 */
707static void fc_rport_enter_flogi(struct fc_rport_priv *rdata)
708{
709 struct fc_lport *lport = rdata->local_port;
710 struct fc_frame *fp;
711
712 if (!lport->point_to_multipoint)
713 return fc_rport_enter_plogi(rdata);
714
715 FC_RPORT_DBG(rdata, "Entered FLOGI state from %s state\n",
716 fc_rport_state(rdata));
717
718 fc_rport_state_enter(rdata, RPORT_ST_FLOGI);
719
720 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
721 if (!fp)
722 return fc_rport_error_retry(rdata, fp);
723
724 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_FLOGI,
725 fc_rport_flogi_resp, rdata,
726 2 * lport->r_a_tov))
727 fc_rport_error_retry(rdata, NULL);
728 else
729 kref_get(&rdata->kref);
730}
731
732/**
733 * fc_rport_recv_flogi_req() - Handle Fabric Login (FLOGI) request in p-mp mode
734 * @lport: The local port that received the PLOGI request
735 * @rx_fp: The PLOGI request frame
736 */
737static void fc_rport_recv_flogi_req(struct fc_lport *lport,
738 struct fc_frame *rx_fp)
739{
740 struct fc_disc *disc;
741 struct fc_els_flogi *flp;
742 struct fc_rport_priv *rdata;
743 struct fc_frame *fp = rx_fp;
744 struct fc_seq_els_data rjt_data;
745 u32 sid;
746
747 sid = fc_frame_sid(fp);
748
749 FC_RPORT_ID_DBG(lport, sid, "Received FLOGI request\n");
750
751 disc = &lport->disc;
752 mutex_lock(&disc->disc_mutex);
753
754 if (!lport->point_to_multipoint) {
755 rjt_data.reason = ELS_RJT_UNSUP;
756 rjt_data.explan = ELS_EXPL_NONE;
757 goto reject;
758 }
759
760 flp = fc_frame_payload_get(fp, sizeof(*flp));
761 if (!flp) {
762 rjt_data.reason = ELS_RJT_LOGIC;
763 rjt_data.explan = ELS_EXPL_INV_LEN;
764 goto reject;
765 }
766
767 rdata = lport->tt.rport_lookup(lport, sid);
768 if (!rdata) {
769 rjt_data.reason = ELS_RJT_FIP;
770 rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR;
771 goto reject;
772 }
773 mutex_lock(&rdata->rp_mutex);
774
775 FC_RPORT_DBG(rdata, "Received FLOGI in %s state\n",
776 fc_rport_state(rdata));
777
778 switch (rdata->rp_state) {
779 case RPORT_ST_INIT:
780 case RPORT_ST_DELETE:
781 mutex_unlock(&rdata->rp_mutex);
782 rjt_data.reason = ELS_RJT_FIP;
783 rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR;
784 goto reject;
785 case RPORT_ST_FLOGI:
786 case RPORT_ST_PLOGI_WAIT:
787 case RPORT_ST_PLOGI:
788 break;
789 case RPORT_ST_PRLI:
790 case RPORT_ST_RTV:
791 case RPORT_ST_READY:
792 case RPORT_ST_ADISC:
793 /*
794 * Set the remote port to be deleted and to then restart.
795 * This queues work to be sure exchanges are reset.
796 */
797 fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
798 mutex_unlock(&rdata->rp_mutex);
799 rjt_data.reason = ELS_RJT_BUSY;
800 rjt_data.explan = ELS_EXPL_NONE;
801 goto reject;
802 }
803 if (fc_rport_login_complete(rdata, fp)) {
804 mutex_unlock(&rdata->rp_mutex);
805 rjt_data.reason = ELS_RJT_LOGIC;
806 rjt_data.explan = ELS_EXPL_NONE;
807 goto reject;
808 }
809
810 fp = fc_frame_alloc(lport, sizeof(*flp));
811 if (!fp)
812 goto out;
813
814 fc_flogi_fill(lport, fp);
815 flp = fc_frame_payload_get(fp, sizeof(*flp));
816 flp->fl_cmd = ELS_LS_ACC;
817
818 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
819 lport->tt.frame_send(lport, fp);
820
821 if (rdata->ids.port_name < lport->wwpn)
822 fc_rport_enter_plogi(rdata);
823 else
824 fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
825out:
826 mutex_unlock(&rdata->rp_mutex);
827 mutex_unlock(&disc->disc_mutex);
828 fc_frame_free(rx_fp);
829 return;
830
831reject:
832 mutex_unlock(&disc->disc_mutex);
833 lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
834 fc_frame_free(rx_fp);
835}
836
837/**
838 * fc_rport_plogi_resp() - Handler for ELS PLOGI responses
583 * @sp: The sequence the PLOGI is on 839 * @sp: The sequence the PLOGI is on
584 * @fp: The PLOGI response frame 840 * @fp: The PLOGI response frame
585 * @rdata_arg: The remote port that sent the PLOGI response 841 * @rdata_arg: The remote port that sent the PLOGI response
@@ -594,7 +850,6 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
594 struct fc_rport_priv *rdata = rdata_arg; 850 struct fc_rport_priv *rdata = rdata_arg;
595 struct fc_lport *lport = rdata->local_port; 851 struct fc_lport *lport = rdata->local_port;
596 struct fc_els_flogi *plp = NULL; 852 struct fc_els_flogi *plp = NULL;
597 unsigned int tov;
598 u16 csp_seq; 853 u16 csp_seq;
599 u16 cssp_seq; 854 u16 cssp_seq;
600 u8 op; 855 u8 op;
@@ -622,11 +877,8 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
622 rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn); 877 rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn);
623 rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn); 878 rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn);
624 879
625 tov = ntohl(plp->fl_csp.sp_e_d_tov); 880 if (lport->point_to_multipoint)
626 if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR) 881 fc_rport_login_complete(rdata, fp);
627 tov /= 1000000;
628 if (tov > rdata->e_d_tov)
629 rdata->e_d_tov = tov;
630 csp_seq = ntohs(plp->fl_csp.sp_tot_seq); 882 csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
631 cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq); 883 cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
632 if (cssp_seq < csp_seq) 884 if (cssp_seq < csp_seq)
@@ -664,6 +916,7 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
664 rdata->maxframe_size = FC_MIN_MAX_PAYLOAD; 916 rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
665 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); 917 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
666 if (!fp) { 918 if (!fp) {
919 FC_RPORT_DBG(rdata, "%s frame alloc failed\n", __func__);
667 fc_rport_error_retry(rdata, fp); 920 fc_rport_error_retry(rdata, fp);
668 return; 921 return;
669 } 922 }
@@ -698,6 +951,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
698 u32 roles = FC_RPORT_ROLE_UNKNOWN; 951 u32 roles = FC_RPORT_ROLE_UNKNOWN;
699 u32 fcp_parm = 0; 952 u32 fcp_parm = 0;
700 u8 op; 953 u8 op;
954 u8 resp_code = 0;
701 955
702 mutex_lock(&rdata->rp_mutex); 956 mutex_lock(&rdata->rp_mutex);
703 957
@@ -722,11 +976,25 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
722 op = fc_frame_payload_op(fp); 976 op = fc_frame_payload_op(fp);
723 if (op == ELS_LS_ACC) { 977 if (op == ELS_LS_ACC) {
724 pp = fc_frame_payload_get(fp, sizeof(*pp)); 978 pp = fc_frame_payload_get(fp, sizeof(*pp));
725 if (pp && pp->prli.prli_spp_len >= sizeof(pp->spp)) { 979 if (!pp)
726 fcp_parm = ntohl(pp->spp.spp_params); 980 goto out;
727 if (fcp_parm & FCP_SPPF_RETRY) 981
728 rdata->flags |= FC_RP_FLAGS_RETRY; 982 resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
983 FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x\n",
984 pp->spp.spp_flags);
985 if (resp_code != FC_SPP_RESP_ACK) {
986 if (resp_code == FC_SPP_RESP_CONF)
987 fc_rport_error(rdata, fp);
988 else
989 fc_rport_error_retry(rdata, fp);
990 goto out;
729 } 991 }
992 if (pp->prli.prli_spp_len < sizeof(pp->spp))
993 goto out;
994
995 fcp_parm = ntohl(pp->spp.spp_params);
996 if (fcp_parm & FCP_SPPF_RETRY)
997 rdata->flags |= FC_RP_FLAGS_RETRY;
730 998
731 rdata->supported_classes = FC_COS_CLASS3; 999 rdata->supported_classes = FC_COS_CLASS3;
732 if (fcp_parm & FCP_SPPF_INIT_FCN) 1000 if (fcp_parm & FCP_SPPF_INIT_FCN)
@@ -739,55 +1007,9 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
739 1007
740 } else { 1008 } else {
741 FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n"); 1009 FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n");
742 fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
743 }
744
745out:
746 fc_frame_free(fp);
747err:
748 mutex_unlock(&rdata->rp_mutex);
749 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
750}
751
752/**
753 * fc_rport_logo_resp() - Handler for logout (LOGO) responses
754 * @sp: The sequence the LOGO was on
755 * @fp: The LOGO response frame
756 * @rdata_arg: The remote port that sent the LOGO response
757 *
758 * Locking Note: This function will be called without the rport lock
759 * held, but it will lock, call an _enter_* function or fc_rport_error
760 * and then unlock the rport.
761 */
762static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
763 void *rdata_arg)
764{
765 struct fc_rport_priv *rdata = rdata_arg;
766 u8 op;
767
768 mutex_lock(&rdata->rp_mutex);
769
770 FC_RPORT_DBG(rdata, "Received a LOGO %s\n", fc_els_resp_type(fp));
771
772 if (rdata->rp_state != RPORT_ST_LOGO) {
773 FC_RPORT_DBG(rdata, "Received a LOGO response, but in state "
774 "%s\n", fc_rport_state(rdata));
775 if (IS_ERR(fp))
776 goto err;
777 goto out;
778 }
779
780 if (IS_ERR(fp)) {
781 fc_rport_error_retry(rdata, fp); 1010 fc_rport_error_retry(rdata, fp);
782 goto err;
783 } 1011 }
784 1012
785 op = fc_frame_payload_op(fp);
786 if (op != ELS_LS_ACC)
787 FC_RPORT_DBG(rdata, "Bad ELS response op %x for LOGO command\n",
788 op);
789 fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
790
791out: 1013out:
792 fc_frame_free(fp); 1014 fc_frame_free(fp);
793err: 1015err:
@@ -937,6 +1159,24 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
937} 1159}
938 1160
939/** 1161/**
1162 * fc_rport_logo_resp() - Handler for logout (LOGO) responses
1163 * @sp: The sequence the LOGO was on
1164 * @fp: The LOGO response frame
1165 * @lport_arg: The local port
1166 */
1167static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
1168 void *lport_arg)
1169{
1170 struct fc_lport *lport = lport_arg;
1171
1172 FC_RPORT_ID_DBG(lport, fc_seq_exch(sp)->did,
1173 "Received a LOGO %s\n", fc_els_resp_type(fp));
1174 if (IS_ERR(fp))
1175 return;
1176 fc_frame_free(fp);
1177}
1178
1179/**
940 * fc_rport_enter_logo() - Send a logout (LOGO) request 1180 * fc_rport_enter_logo() - Send a logout (LOGO) request
941 * @rdata: The remote port to send the LOGO request to 1181 * @rdata: The remote port to send the LOGO request to
942 * 1182 *
@@ -948,23 +1188,14 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
948 struct fc_lport *lport = rdata->local_port; 1188 struct fc_lport *lport = rdata->local_port;
949 struct fc_frame *fp; 1189 struct fc_frame *fp;
950 1190
951 FC_RPORT_DBG(rdata, "Port entered LOGO state from %s state\n", 1191 FC_RPORT_DBG(rdata, "Port sending LOGO from %s state\n",
952 fc_rport_state(rdata)); 1192 fc_rport_state(rdata));
953 1193
954 fc_rport_state_enter(rdata, RPORT_ST_LOGO);
955
956 fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo)); 1194 fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
957 if (!fp) { 1195 if (!fp)
958 fc_rport_error_retry(rdata, fp);
959 return; 1196 return;
960 } 1197 (void)lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
961 1198 fc_rport_logo_resp, lport, 0);
962 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
963 fc_rport_logo_resp, rdata,
964 2 * lport->r_a_tov))
965 fc_rport_error_retry(rdata, NULL);
966 else
967 kref_get(&rdata->kref);
968} 1199}
969 1200
970/** 1201/**
@@ -1013,7 +1244,7 @@ static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
1013 get_unaligned_be64(&adisc->adisc_wwpn) != rdata->ids.port_name || 1244 get_unaligned_be64(&adisc->adisc_wwpn) != rdata->ids.port_name ||
1014 get_unaligned_be64(&adisc->adisc_wwnn) != rdata->ids.node_name) { 1245 get_unaligned_be64(&adisc->adisc_wwnn) != rdata->ids.node_name) {
1015 FC_RPORT_DBG(rdata, "ADISC error or mismatch\n"); 1246 FC_RPORT_DBG(rdata, "ADISC error or mismatch\n");
1016 fc_rport_enter_plogi(rdata); 1247 fc_rport_enter_flogi(rdata);
1017 } else { 1248 } else {
1018 FC_RPORT_DBG(rdata, "ADISC OK\n"); 1249 FC_RPORT_DBG(rdata, "ADISC OK\n");
1019 fc_rport_enter_ready(rdata); 1250 fc_rport_enter_ready(rdata);
@@ -1058,29 +1289,25 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
1058/** 1289/**
1059 * fc_rport_recv_adisc_req() - Handler for Address Discovery (ADISC) requests 1290 * fc_rport_recv_adisc_req() - Handler for Address Discovery (ADISC) requests
1060 * @rdata: The remote port that sent the ADISC request 1291 * @rdata: The remote port that sent the ADISC request
1061 * @sp: The sequence the ADISC request was on
1062 * @in_fp: The ADISC request frame 1292 * @in_fp: The ADISC request frame
1063 * 1293 *
1064 * Locking Note: Called with the lport and rport locks held. 1294 * Locking Note: Called with the lport and rport locks held.
1065 */ 1295 */
1066static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata, 1296static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata,
1067 struct fc_seq *sp, struct fc_frame *in_fp) 1297 struct fc_frame *in_fp)
1068{ 1298{
1069 struct fc_lport *lport = rdata->local_port; 1299 struct fc_lport *lport = rdata->local_port;
1070 struct fc_frame *fp; 1300 struct fc_frame *fp;
1071 struct fc_exch *ep = fc_seq_exch(sp);
1072 struct fc_els_adisc *adisc; 1301 struct fc_els_adisc *adisc;
1073 struct fc_seq_els_data rjt_data; 1302 struct fc_seq_els_data rjt_data;
1074 u32 f_ctl;
1075 1303
1076 FC_RPORT_DBG(rdata, "Received ADISC request\n"); 1304 FC_RPORT_DBG(rdata, "Received ADISC request\n");
1077 1305
1078 adisc = fc_frame_payload_get(in_fp, sizeof(*adisc)); 1306 adisc = fc_frame_payload_get(in_fp, sizeof(*adisc));
1079 if (!adisc) { 1307 if (!adisc) {
1080 rjt_data.fp = NULL;
1081 rjt_data.reason = ELS_RJT_PROT; 1308 rjt_data.reason = ELS_RJT_PROT;
1082 rjt_data.explan = ELS_EXPL_INV_LEN; 1309 rjt_data.explan = ELS_EXPL_INV_LEN;
1083 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); 1310 lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
1084 goto drop; 1311 goto drop;
1085 } 1312 }
1086 1313
@@ -1090,11 +1317,8 @@ static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata,
1090 fc_adisc_fill(lport, fp); 1317 fc_adisc_fill(lport, fp);
1091 adisc = fc_frame_payload_get(fp, sizeof(*adisc)); 1318 adisc = fc_frame_payload_get(fp, sizeof(*adisc));
1092 adisc->adisc_cmd = ELS_LS_ACC; 1319 adisc->adisc_cmd = ELS_LS_ACC;
1093 sp = lport->tt.seq_start_next(sp); 1320 fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
1094 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT; 1321 lport->tt.frame_send(lport, fp);
1095 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1096 FC_TYPE_ELS, f_ctl, 0);
1097 lport->tt.seq_send(lport, sp, fp);
1098drop: 1322drop:
1099 fc_frame_free(in_fp); 1323 fc_frame_free(in_fp);
1100} 1324}
@@ -1102,25 +1326,22 @@ drop:
1102/** 1326/**
1103 * fc_rport_recv_rls_req() - Handle received Read Link Status request 1327 * fc_rport_recv_rls_req() - Handle received Read Link Status request
1104 * @rdata: The remote port that sent the RLS request 1328 * @rdata: The remote port that sent the RLS request
1105 * @sp: The sequence that the RLS was on
1106 * @rx_fp: The PRLI request frame 1329 * @rx_fp: The PRLI request frame
1107 * 1330 *
1108 * Locking Note: The rport lock is expected to be held before calling 1331 * Locking Note: The rport lock is expected to be held before calling
1109 * this function. 1332 * this function.
1110 */ 1333 */
1111static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata, 1334static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata,
1112 struct fc_seq *sp, struct fc_frame *rx_fp) 1335 struct fc_frame *rx_fp)
1113 1336
1114{ 1337{
1115 struct fc_lport *lport = rdata->local_port; 1338 struct fc_lport *lport = rdata->local_port;
1116 struct fc_frame *fp; 1339 struct fc_frame *fp;
1117 struct fc_exch *ep = fc_seq_exch(sp);
1118 struct fc_els_rls *rls; 1340 struct fc_els_rls *rls;
1119 struct fc_els_rls_resp *rsp; 1341 struct fc_els_rls_resp *rsp;
1120 struct fc_els_lesb *lesb; 1342 struct fc_els_lesb *lesb;
1121 struct fc_seq_els_data rjt_data; 1343 struct fc_seq_els_data rjt_data;
1122 struct fc_host_statistics *hst; 1344 struct fc_host_statistics *hst;
1123 u32 f_ctl;
1124 1345
1125 FC_RPORT_DBG(rdata, "Received RLS request while in state %s\n", 1346 FC_RPORT_DBG(rdata, "Received RLS request while in state %s\n",
1126 fc_rport_state(rdata)); 1347 fc_rport_state(rdata));
@@ -1157,16 +1378,12 @@ static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata,
1157 lesb->lesb_inv_crc = htonl(hst->invalid_crc_count); 1378 lesb->lesb_inv_crc = htonl(hst->invalid_crc_count);
1158 } 1379 }
1159 1380
1160 sp = lport->tt.seq_start_next(sp); 1381 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
1161 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ; 1382 lport->tt.frame_send(lport, fp);
1162 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1163 FC_TYPE_ELS, f_ctl, 0);
1164 lport->tt.seq_send(lport, sp, fp);
1165 goto out; 1383 goto out;
1166 1384
1167out_rjt: 1385out_rjt:
1168 rjt_data.fp = NULL; 1386 lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
1169 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1170out: 1387out:
1171 fc_frame_free(rx_fp); 1388 fc_frame_free(rx_fp);
1172} 1389}
@@ -1174,7 +1391,6 @@ out:
1174/** 1391/**
1175 * fc_rport_recv_els_req() - Handler for validated ELS requests 1392 * fc_rport_recv_els_req() - Handler for validated ELS requests
1176 * @lport: The local port that received the ELS request 1393 * @lport: The local port that received the ELS request
1177 * @sp: The sequence that the ELS request was on
1178 * @fp: The ELS request frame 1394 * @fp: The ELS request frame
1179 * 1395 *
1180 * Handle incoming ELS requests that require port login. 1396 * Handle incoming ELS requests that require port login.
@@ -1182,21 +1398,13 @@ out:
1182 * 1398 *
1183 * Locking Note: Called with the lport lock held. 1399 * Locking Note: Called with the lport lock held.
1184 */ 1400 */
1185static void fc_rport_recv_els_req(struct fc_lport *lport, 1401static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
1186 struct fc_seq *sp, struct fc_frame *fp)
1187{ 1402{
1188 struct fc_rport_priv *rdata; 1403 struct fc_rport_priv *rdata;
1189 struct fc_frame_header *fh;
1190 struct fc_seq_els_data els_data; 1404 struct fc_seq_els_data els_data;
1191 1405
1192 els_data.fp = NULL;
1193 els_data.reason = ELS_RJT_UNAB;
1194 els_data.explan = ELS_EXPL_PLOGI_REQD;
1195
1196 fh = fc_frame_header_get(fp);
1197
1198 mutex_lock(&lport->disc.disc_mutex); 1406 mutex_lock(&lport->disc.disc_mutex);
1199 rdata = lport->tt.rport_lookup(lport, ntoh24(fh->fh_s_id)); 1407 rdata = lport->tt.rport_lookup(lport, fc_frame_sid(fp));
1200 if (!rdata) { 1408 if (!rdata) {
1201 mutex_unlock(&lport->disc.disc_mutex); 1409 mutex_unlock(&lport->disc.disc_mutex);
1202 goto reject; 1410 goto reject;
@@ -1217,24 +1425,24 @@ static void fc_rport_recv_els_req(struct fc_lport *lport,
1217 1425
1218 switch (fc_frame_payload_op(fp)) { 1426 switch (fc_frame_payload_op(fp)) {
1219 case ELS_PRLI: 1427 case ELS_PRLI:
1220 fc_rport_recv_prli_req(rdata, sp, fp); 1428 fc_rport_recv_prli_req(rdata, fp);
1221 break; 1429 break;
1222 case ELS_PRLO: 1430 case ELS_PRLO:
1223 fc_rport_recv_prlo_req(rdata, sp, fp); 1431 fc_rport_recv_prlo_req(rdata, fp);
1224 break; 1432 break;
1225 case ELS_ADISC: 1433 case ELS_ADISC:
1226 fc_rport_recv_adisc_req(rdata, sp, fp); 1434 fc_rport_recv_adisc_req(rdata, fp);
1227 break; 1435 break;
1228 case ELS_RRQ: 1436 case ELS_RRQ:
1229 els_data.fp = fp; 1437 lport->tt.seq_els_rsp_send(fp, ELS_RRQ, NULL);
1230 lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data); 1438 fc_frame_free(fp);
1231 break; 1439 break;
1232 case ELS_REC: 1440 case ELS_REC:
1233 els_data.fp = fp; 1441 lport->tt.seq_els_rsp_send(fp, ELS_REC, NULL);
1234 lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data); 1442 fc_frame_free(fp);
1235 break; 1443 break;
1236 case ELS_RLS: 1444 case ELS_RLS:
1237 fc_rport_recv_rls_req(rdata, sp, fp); 1445 fc_rport_recv_rls_req(rdata, fp);
1238 break; 1446 break;
1239 default: 1447 default:
1240 fc_frame_free(fp); /* can't happen */ 1448 fc_frame_free(fp); /* can't happen */
@@ -1245,35 +1453,38 @@ static void fc_rport_recv_els_req(struct fc_lport *lport,
1245 return; 1453 return;
1246 1454
1247reject: 1455reject:
1248 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data); 1456 els_data.reason = ELS_RJT_UNAB;
1457 els_data.explan = ELS_EXPL_PLOGI_REQD;
1458 lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
1249 fc_frame_free(fp); 1459 fc_frame_free(fp);
1250} 1460}
1251 1461
1252/** 1462/**
1253 * fc_rport_recv_req() - Handler for requests 1463 * fc_rport_recv_req() - Handler for requests
1254 * @sp: The sequence the request was on
1255 * @fp: The request frame
1256 * @lport: The local port that received the request 1464 * @lport: The local port that received the request
1465 * @fp: The request frame
1257 * 1466 *
1258 * Locking Note: Called with the lport lock held. 1467 * Locking Note: Called with the lport lock held.
1259 */ 1468 */
1260void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, 1469void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
1261 struct fc_lport *lport)
1262{ 1470{
1263 struct fc_seq_els_data els_data; 1471 struct fc_seq_els_data els_data;
1264 1472
1265 /* 1473 /*
1266 * Handle PLOGI and LOGO requests separately, since they 1474 * Handle FLOGI, PLOGI and LOGO requests separately, since they
1267 * don't require prior login. 1475 * don't require prior login.
1268 * Check for unsupported opcodes first and reject them. 1476 * Check for unsupported opcodes first and reject them.
1269 * For some ops, it would be incorrect to reject with "PLOGI required". 1477 * For some ops, it would be incorrect to reject with "PLOGI required".
1270 */ 1478 */
1271 switch (fc_frame_payload_op(fp)) { 1479 switch (fc_frame_payload_op(fp)) {
1480 case ELS_FLOGI:
1481 fc_rport_recv_flogi_req(lport, fp);
1482 break;
1272 case ELS_PLOGI: 1483 case ELS_PLOGI:
1273 fc_rport_recv_plogi_req(lport, sp, fp); 1484 fc_rport_recv_plogi_req(lport, fp);
1274 break; 1485 break;
1275 case ELS_LOGO: 1486 case ELS_LOGO:
1276 fc_rport_recv_logo_req(lport, sp, fp); 1487 fc_rport_recv_logo_req(lport, fp);
1277 break; 1488 break;
1278 case ELS_PRLI: 1489 case ELS_PRLI:
1279 case ELS_PRLO: 1490 case ELS_PRLO:
@@ -1281,14 +1492,13 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
1281 case ELS_RRQ: 1492 case ELS_RRQ:
1282 case ELS_REC: 1493 case ELS_REC:
1283 case ELS_RLS: 1494 case ELS_RLS:
1284 fc_rport_recv_els_req(lport, sp, fp); 1495 fc_rport_recv_els_req(lport, fp);
1285 break; 1496 break;
1286 default: 1497 default:
1287 fc_frame_free(fp);
1288 els_data.fp = NULL;
1289 els_data.reason = ELS_RJT_UNSUP; 1498 els_data.reason = ELS_RJT_UNSUP;
1290 els_data.explan = ELS_EXPL_NONE; 1499 els_data.explan = ELS_EXPL_NONE;
1291 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data); 1500 lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
1501 fc_frame_free(fp);
1292 break; 1502 break;
1293 } 1503 }
1294} 1504}
@@ -1296,26 +1506,21 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
1296/** 1506/**
1297 * fc_rport_recv_plogi_req() - Handler for Port Login (PLOGI) requests 1507 * fc_rport_recv_plogi_req() - Handler for Port Login (PLOGI) requests
1298 * @lport: The local port that received the PLOGI request 1508 * @lport: The local port that received the PLOGI request
1299 * @sp: The sequence that the PLOGI request was on
1300 * @rx_fp: The PLOGI request frame 1509 * @rx_fp: The PLOGI request frame
1301 * 1510 *
1302 * Locking Note: The rport lock is held before calling this function. 1511 * Locking Note: The rport lock is held before calling this function.
1303 */ 1512 */
1304static void fc_rport_recv_plogi_req(struct fc_lport *lport, 1513static void fc_rport_recv_plogi_req(struct fc_lport *lport,
1305 struct fc_seq *sp, struct fc_frame *rx_fp) 1514 struct fc_frame *rx_fp)
1306{ 1515{
1307 struct fc_disc *disc; 1516 struct fc_disc *disc;
1308 struct fc_rport_priv *rdata; 1517 struct fc_rport_priv *rdata;
1309 struct fc_frame *fp = rx_fp; 1518 struct fc_frame *fp = rx_fp;
1310 struct fc_exch *ep;
1311 struct fc_frame_header *fh;
1312 struct fc_els_flogi *pl; 1519 struct fc_els_flogi *pl;
1313 struct fc_seq_els_data rjt_data; 1520 struct fc_seq_els_data rjt_data;
1314 u32 sid, f_ctl; 1521 u32 sid;
1315 1522
1316 rjt_data.fp = NULL; 1523 sid = fc_frame_sid(fp);
1317 fh = fc_frame_header_get(fp);
1318 sid = ntoh24(fh->fh_s_id);
1319 1524
1320 FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n"); 1525 FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n");
1321 1526
@@ -1358,6 +1563,9 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport,
1358 case RPORT_ST_INIT: 1563 case RPORT_ST_INIT:
1359 FC_RPORT_DBG(rdata, "Received PLOGI in INIT state\n"); 1564 FC_RPORT_DBG(rdata, "Received PLOGI in INIT state\n");
1360 break; 1565 break;
1566 case RPORT_ST_PLOGI_WAIT:
1567 FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI_WAIT state\n");
1568 break;
1361 case RPORT_ST_PLOGI: 1569 case RPORT_ST_PLOGI:
1362 FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state\n"); 1570 FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state\n");
1363 if (rdata->ids.port_name < lport->wwpn) { 1571 if (rdata->ids.port_name < lport->wwpn) {
@@ -1375,9 +1583,8 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport,
1375 "- ignored for now\n", rdata->rp_state); 1583 "- ignored for now\n", rdata->rp_state);
1376 /* XXX TBD - should reset */ 1584 /* XXX TBD - should reset */
1377 break; 1585 break;
1586 case RPORT_ST_FLOGI:
1378 case RPORT_ST_DELETE: 1587 case RPORT_ST_DELETE:
1379 case RPORT_ST_LOGO:
1380 case RPORT_ST_RESTART:
1381 FC_RPORT_DBG(rdata, "Received PLOGI in state %s - send busy\n", 1588 FC_RPORT_DBG(rdata, "Received PLOGI in state %s - send busy\n",
1382 fc_rport_state(rdata)); 1589 fc_rport_state(rdata));
1383 mutex_unlock(&rdata->rp_mutex); 1590 mutex_unlock(&rdata->rp_mutex);
@@ -1390,50 +1597,41 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport,
1390 * Get session payload size from incoming PLOGI. 1597 * Get session payload size from incoming PLOGI.
1391 */ 1598 */
1392 rdata->maxframe_size = fc_plogi_get_maxframe(pl, lport->mfs); 1599 rdata->maxframe_size = fc_plogi_get_maxframe(pl, lport->mfs);
1393 fc_frame_free(rx_fp);
1394 1600
1395 /* 1601 /*
1396 * Send LS_ACC. If this fails, the originator should retry. 1602 * Send LS_ACC. If this fails, the originator should retry.
1397 */ 1603 */
1398 sp = lport->tt.seq_start_next(sp);
1399 if (!sp)
1400 goto out;
1401 fp = fc_frame_alloc(lport, sizeof(*pl)); 1604 fp = fc_frame_alloc(lport, sizeof(*pl));
1402 if (!fp) 1605 if (!fp)
1403 goto out; 1606 goto out;
1404 1607
1405 fc_plogi_fill(lport, fp, ELS_LS_ACC); 1608 fc_plogi_fill(lport, fp, ELS_LS_ACC);
1406 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT; 1609 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
1407 ep = fc_seq_exch(sp); 1610 lport->tt.frame_send(lport, fp);
1408 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1409 FC_TYPE_ELS, f_ctl, 0);
1410 lport->tt.seq_send(lport, sp, fp);
1411 fc_rport_enter_prli(rdata); 1611 fc_rport_enter_prli(rdata);
1412out: 1612out:
1413 mutex_unlock(&rdata->rp_mutex); 1613 mutex_unlock(&rdata->rp_mutex);
1614 fc_frame_free(rx_fp);
1414 return; 1615 return;
1415 1616
1416reject: 1617reject:
1417 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); 1618 lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
1418 fc_frame_free(fp); 1619 fc_frame_free(fp);
1419} 1620}
1420 1621
1421/** 1622/**
1422 * fc_rport_recv_prli_req() - Handler for process login (PRLI) requests 1623 * fc_rport_recv_prli_req() - Handler for process login (PRLI) requests
1423 * @rdata: The remote port that sent the PRLI request 1624 * @rdata: The remote port that sent the PRLI request
1424 * @sp: The sequence that the PRLI was on
1425 * @rx_fp: The PRLI request frame 1625 * @rx_fp: The PRLI request frame
1426 * 1626 *
1427 * Locking Note: The rport lock is exected to be held before calling 1627 * Locking Note: The rport lock is exected to be held before calling
1428 * this function. 1628 * this function.
1429 */ 1629 */
1430static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, 1630static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
1431 struct fc_seq *sp, struct fc_frame *rx_fp) 1631 struct fc_frame *rx_fp)
1432{ 1632{
1433 struct fc_lport *lport = rdata->local_port; 1633 struct fc_lport *lport = rdata->local_port;
1434 struct fc_exch *ep;
1435 struct fc_frame *fp; 1634 struct fc_frame *fp;
1436 struct fc_frame_header *fh;
1437 struct { 1635 struct {
1438 struct fc_els_prli prli; 1636 struct fc_els_prli prli;
1439 struct fc_els_spp spp; 1637 struct fc_els_spp spp;
@@ -1444,17 +1642,13 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
1444 unsigned int plen; 1642 unsigned int plen;
1445 enum fc_els_spp_resp resp; 1643 enum fc_els_spp_resp resp;
1446 struct fc_seq_els_data rjt_data; 1644 struct fc_seq_els_data rjt_data;
1447 u32 f_ctl;
1448 u32 fcp_parm; 1645 u32 fcp_parm;
1449 u32 roles = FC_RPORT_ROLE_UNKNOWN; 1646 u32 roles = FC_RPORT_ROLE_UNKNOWN;
1450 1647
1451 rjt_data.fp = NULL;
1452 fh = fc_frame_header_get(rx_fp);
1453
1454 FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n", 1648 FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
1455 fc_rport_state(rdata)); 1649 fc_rport_state(rdata));
1456 1650
1457 len = fr_len(rx_fp) - sizeof(*fh); 1651 len = fr_len(rx_fp) - sizeof(struct fc_frame_header);
1458 pp = fc_frame_payload_get(rx_fp, sizeof(*pp)); 1652 pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
1459 if (!pp) 1653 if (!pp)
1460 goto reject_len; 1654 goto reject_len;
@@ -1475,8 +1669,6 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
1475 rjt_data.explan = ELS_EXPL_INSUF_RES; 1669 rjt_data.explan = ELS_EXPL_INSUF_RES;
1476 goto reject; 1670 goto reject;
1477 } 1671 }
1478 sp = lport->tt.seq_start_next(sp);
1479 WARN_ON(!sp);
1480 pp = fc_frame_payload_get(fp, len); 1672 pp = fc_frame_payload_get(fp, len);
1481 WARN_ON(!pp); 1673 WARN_ON(!pp);
1482 memset(pp, 0, len); 1674 memset(pp, 0, len);
@@ -1529,12 +1721,8 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
1529 /* 1721 /*
1530 * Send LS_ACC. If this fails, the originator should retry. 1722 * Send LS_ACC. If this fails, the originator should retry.
1531 */ 1723 */
1532 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ; 1724 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
1533 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT; 1725 lport->tt.frame_send(lport, fp);
1534 ep = fc_seq_exch(sp);
1535 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1536 FC_TYPE_ELS, f_ctl, 0);
1537 lport->tt.seq_send(lport, sp, fp);
1538 1726
1539 switch (rdata->rp_state) { 1727 switch (rdata->rp_state) {
1540 case RPORT_ST_PRLI: 1728 case RPORT_ST_PRLI:
@@ -1549,7 +1737,7 @@ reject_len:
1549 rjt_data.reason = ELS_RJT_PROT; 1737 rjt_data.reason = ELS_RJT_PROT;
1550 rjt_data.explan = ELS_EXPL_INV_LEN; 1738 rjt_data.explan = ELS_EXPL_INV_LEN;
1551reject: 1739reject:
1552 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); 1740 lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
1553drop: 1741drop:
1554 fc_frame_free(rx_fp); 1742 fc_frame_free(rx_fp);
1555} 1743}
@@ -1557,54 +1745,90 @@ drop:
1557/** 1745/**
1558 * fc_rport_recv_prlo_req() - Handler for process logout (PRLO) requests 1746 * fc_rport_recv_prlo_req() - Handler for process logout (PRLO) requests
1559 * @rdata: The remote port that sent the PRLO request 1747 * @rdata: The remote port that sent the PRLO request
1560 * @sp: The sequence that the PRLO was on 1748 * @rx_fp: The PRLO request frame
1561 * @fp: The PRLO request frame
1562 * 1749 *
1563 * Locking Note: The rport lock is exected to be held before calling 1750 * Locking Note: The rport lock is exected to be held before calling
1564 * this function. 1751 * this function.
1565 */ 1752 */
1566static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, 1753static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
1567 struct fc_seq *sp, 1754 struct fc_frame *rx_fp)
1568 struct fc_frame *fp)
1569{ 1755{
1570 struct fc_lport *lport = rdata->local_port; 1756 struct fc_lport *lport = rdata->local_port;
1571 1757 struct fc_frame *fp;
1572 struct fc_frame_header *fh; 1758 struct {
1759 struct fc_els_prlo prlo;
1760 struct fc_els_spp spp;
1761 } *pp;
1762 struct fc_els_spp *rspp; /* request service param page */
1763 struct fc_els_spp *spp; /* response spp */
1764 unsigned int len;
1765 unsigned int plen;
1573 struct fc_seq_els_data rjt_data; 1766 struct fc_seq_els_data rjt_data;
1574 1767
1575 fh = fc_frame_header_get(fp);
1576
1577 FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n", 1768 FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n",
1578 fc_rport_state(rdata)); 1769 fc_rport_state(rdata));
1579 1770
1580 rjt_data.fp = NULL; 1771 len = fr_len(rx_fp) - sizeof(struct fc_frame_header);
1581 rjt_data.reason = ELS_RJT_UNAB; 1772 pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
1582 rjt_data.explan = ELS_EXPL_NONE; 1773 if (!pp)
1583 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); 1774 goto reject_len;
1584 fc_frame_free(fp); 1775 plen = ntohs(pp->prlo.prlo_len);
1776 if (plen != 20)
1777 goto reject_len;
1778 if (plen < len)
1779 len = plen;
1780
1781 rspp = &pp->spp;
1782
1783 fp = fc_frame_alloc(lport, len);
1784 if (!fp) {
1785 rjt_data.reason = ELS_RJT_UNAB;
1786 rjt_data.explan = ELS_EXPL_INSUF_RES;
1787 goto reject;
1788 }
1789
1790 pp = fc_frame_payload_get(fp, len);
1791 WARN_ON(!pp);
1792 memset(pp, 0, len);
1793 pp->prlo.prlo_cmd = ELS_LS_ACC;
1794 pp->prlo.prlo_obs = 0x10;
1795 pp->prlo.prlo_len = htons(len);
1796 spp = &pp->spp;
1797 spp->spp_type = rspp->spp_type;
1798 spp->spp_type_ext = rspp->spp_type_ext;
1799 spp->spp_flags = FC_SPP_RESP_ACK;
1800
1801 fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
1802
1803 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
1804 lport->tt.frame_send(lport, fp);
1805 goto drop;
1806
1807reject_len:
1808 rjt_data.reason = ELS_RJT_PROT;
1809 rjt_data.explan = ELS_EXPL_INV_LEN;
1810reject:
1811 lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
1812drop:
1813 fc_frame_free(rx_fp);
1585} 1814}
1586 1815
1587/** 1816/**
1588 * fc_rport_recv_logo_req() - Handler for logout (LOGO) requests 1817 * fc_rport_recv_logo_req() - Handler for logout (LOGO) requests
1589 * @lport: The local port that received the LOGO request 1818 * @lport: The local port that received the LOGO request
1590 * @sp: The sequence that the LOGO request was on
1591 * @fp: The LOGO request frame 1819 * @fp: The LOGO request frame
1592 * 1820 *
1593 * Locking Note: The rport lock is exected to be held before calling 1821 * Locking Note: The rport lock is exected to be held before calling
1594 * this function. 1822 * this function.
1595 */ 1823 */
1596static void fc_rport_recv_logo_req(struct fc_lport *lport, 1824static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
1597 struct fc_seq *sp,
1598 struct fc_frame *fp)
1599{ 1825{
1600 struct fc_frame_header *fh;
1601 struct fc_rport_priv *rdata; 1826 struct fc_rport_priv *rdata;
1602 u32 sid; 1827 u32 sid;
1603 1828
1604 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); 1829 lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
1605 1830
1606 fh = fc_frame_header_get(fp); 1831 sid = fc_frame_sid(fp);
1607 sid = ntoh24(fh->fh_s_id);
1608 1832
1609 mutex_lock(&lport->disc.disc_mutex); 1833 mutex_lock(&lport->disc.disc_mutex);
1610 rdata = lport->tt.rport_lookup(lport, sid); 1834 rdata = lport->tt.rport_lookup(lport, sid);
@@ -1614,13 +1838,6 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport,
1614 fc_rport_state(rdata)); 1838 fc_rport_state(rdata));
1615 1839
1616 fc_rport_enter_delete(rdata, RPORT_EV_LOGO); 1840 fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
1617
1618 /*
1619 * If the remote port was created due to discovery, set state
1620 * to log back in. It may have seen a stale RSCN about us.
1621 */
1622 if (rdata->disc_id)
1623 fc_rport_state_enter(rdata, RPORT_ST_RESTART);
1624 mutex_unlock(&rdata->rp_mutex); 1841 mutex_unlock(&rdata->rp_mutex);
1625 } else 1842 } else
1626 FC_RPORT_ID_DBG(lport, sid, 1843 FC_RPORT_ID_DBG(lport, sid,
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 8c496b56556c..042153cbbde1 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -71,7 +71,7 @@ static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
71 case SAS_SG_ERR: 71 case SAS_SG_ERR:
72 return AC_ERR_INVALID; 72 return AC_ERR_INVALID;
73 73
74 case SAM_CHECK_COND: 74 case SAM_STAT_CHECK_CONDITION:
75 case SAS_OPEN_TO: 75 case SAS_OPEN_TO:
76 case SAS_OPEN_REJECT: 76 case SAS_OPEN_REJECT:
77 SAS_DPRINTK("%s: Saw error %d. What to do?\n", 77 SAS_DPRINTK("%s: Saw error %d. What to do?\n",
@@ -107,7 +107,7 @@ static void sas_ata_task_done(struct sas_task *task)
107 sas_ha = dev->port->ha; 107 sas_ha = dev->port->ha;
108 108
109 spin_lock_irqsave(dev->sata_dev.ap->lock, flags); 109 spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
110 if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_GOOD) { 110 if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD) {
111 ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf); 111 ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
112 qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command); 112 qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
113 dev->sata_dev.sstatus = resp->sstatus; 113 dev->sata_dev.sstatus = resp->sstatus;
@@ -511,12 +511,12 @@ static int sas_execute_task(struct sas_task *task, void *buffer, int size,
511 goto ex_err; 511 goto ex_err;
512 } 512 }
513 } 513 }
514 if (task->task_status.stat == SAM_BUSY || 514 if (task->task_status.stat == SAM_STAT_BUSY ||
515 task->task_status.stat == SAM_TASK_SET_FULL || 515 task->task_status.stat == SAM_STAT_TASK_SET_FULL ||
516 task->task_status.stat == SAS_QUEUE_FULL) { 516 task->task_status.stat == SAS_QUEUE_FULL) {
517 SAS_DPRINTK("task: q busy, sleeping...\n"); 517 SAS_DPRINTK("task: q busy, sleeping...\n");
518 schedule_timeout_interruptible(HZ); 518 schedule_timeout_interruptible(HZ);
519 } else if (task->task_status.stat == SAM_CHECK_COND) { 519 } else if (task->task_status.stat == SAM_STAT_CHECK_CONDITION) {
520 struct scsi_sense_hdr shdr; 520 struct scsi_sense_hdr shdr;
521 521
522 if (!scsi_normalize_sense(ts->buf, ts->buf_valid_size, 522 if (!scsi_normalize_sense(ts->buf, ts->buf_valid_size,
@@ -549,7 +549,7 @@ static int sas_execute_task(struct sas_task *task, void *buffer, int size,
549 shdr.asc, shdr.ascq); 549 shdr.asc, shdr.ascq);
550 } 550 }
551 } else if (task->task_status.resp != SAS_TASK_COMPLETE || 551 } else if (task->task_status.resp != SAS_TASK_COMPLETE ||
552 task->task_status.stat != SAM_GOOD) { 552 task->task_status.stat != SAM_STAT_GOOD) {
553 SAS_DPRINTK("task finished with resp:0x%x, " 553 SAS_DPRINTK("task finished with resp:0x%x, "
554 "stat:0x%x\n", 554 "stat:0x%x\n",
555 task->task_status.resp, 555 task->task_status.resp,
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index c65af02dcfe8..83dd5070a15c 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -107,7 +107,7 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
107 } 107 }
108 } 108 }
109 if (task->task_status.resp == SAS_TASK_COMPLETE && 109 if (task->task_status.resp == SAS_TASK_COMPLETE &&
110 task->task_status.stat == SAM_GOOD) { 110 task->task_status.stat == SAM_STAT_GOOD) {
111 res = 0; 111 res = 0;
112 break; 112 break;
113 } if (task->task_status.resp == SAS_TASK_COMPLETE && 113 } if (task->task_status.resp == SAS_TASK_COMPLETE &&
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index a7890c6d878e..f0cfba9a1fc8 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -113,10 +113,10 @@ static void sas_scsi_task_done(struct sas_task *task)
113 case SAS_ABORTED_TASK: 113 case SAS_ABORTED_TASK:
114 hs = DID_ABORT; 114 hs = DID_ABORT;
115 break; 115 break;
116 case SAM_CHECK_COND: 116 case SAM_STAT_CHECK_CONDITION:
117 memcpy(sc->sense_buffer, ts->buf, 117 memcpy(sc->sense_buffer, ts->buf,
118 min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size)); 118 min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
119 stat = SAM_CHECK_COND; 119 stat = SAM_STAT_CHECK_CONDITION;
120 break; 120 break;
121 default: 121 default:
122 stat = ts->stat; 122 stat = ts->stat;
diff --git a/drivers/scsi/libsas/sas_task.c b/drivers/scsi/libsas/sas_task.c
index 594524d5bfa1..b13a3346894c 100644
--- a/drivers/scsi/libsas/sas_task.c
+++ b/drivers/scsi/libsas/sas_task.c
@@ -15,13 +15,13 @@ void sas_ssp_task_response(struct device *dev, struct sas_task *task,
15 else if (iu->datapres == 1) 15 else if (iu->datapres == 1)
16 tstat->stat = iu->resp_data[3]; 16 tstat->stat = iu->resp_data[3];
17 else if (iu->datapres == 2) { 17 else if (iu->datapres == 2) {
18 tstat->stat = SAM_CHECK_COND; 18 tstat->stat = SAM_STAT_CHECK_CONDITION;
19 tstat->buf_valid_size = 19 tstat->buf_valid_size =
20 min_t(int, SAS_STATUS_BUF_SIZE, 20 min_t(int, SAS_STATUS_BUF_SIZE,
21 be32_to_cpu(iu->sense_data_len)); 21 be32_to_cpu(iu->sense_data_len));
22 memcpy(tstat->buf, iu->sense_data, tstat->buf_valid_size); 22 memcpy(tstat->buf, iu->sense_data, tstat->buf_valid_size);
23 23
24 if (iu->status != SAM_CHECK_COND) 24 if (iu->status != SAM_STAT_CHECK_CONDITION)
25 dev_printk(KERN_WARNING, dev, 25 dev_printk(KERN_WARNING, dev,
26 "dev %llx sent sense data, but " 26 "dev %llx sent sense data, but "
27 "stat(%x) is not CHECK CONDITION\n", 27 "stat(%x) is not CHECK CONDITION\n",
@@ -30,7 +30,7 @@ void sas_ssp_task_response(struct device *dev, struct sas_task *task,
30 } 30 }
31 else 31 else
32 /* when datapres contains corrupt/unknown value... */ 32 /* when datapres contains corrupt/unknown value... */
33 tstat->stat = SAM_CHECK_COND; 33 tstat->stat = SAM_STAT_CHECK_CONDITION;
34} 34}
35EXPORT_SYMBOL_GPL(sas_ssp_task_response); 35EXPORT_SYMBOL_GPL(sas_ssp_task_response);
36 36
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index e35a4c71eb9a..3482d5a5aed2 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -20,7 +20,6 @@
20 *******************************************************************/ 20 *******************************************************************/
21 21
22#include <scsi/scsi_host.h> 22#include <scsi/scsi_host.h>
23
24struct lpfc_sli2_slim; 23struct lpfc_sli2_slim;
25 24
26#define LPFC_PCI_DEV_LP 0x1 25#define LPFC_PCI_DEV_LP 0x1
@@ -49,7 +48,7 @@ struct lpfc_sli2_slim;
49#define LPFC_TGTQ_INTERVAL 40000 /* Min amount of time between tgt 48#define LPFC_TGTQ_INTERVAL 40000 /* Min amount of time between tgt
50 queue depth change in millisecs */ 49 queue depth change in millisecs */
51#define LPFC_TGTQ_RAMPUP_PCENT 5 /* Target queue rampup in percentage */ 50#define LPFC_TGTQ_RAMPUP_PCENT 5 /* Target queue rampup in percentage */
52#define LPFC_MIN_TGT_QDEPTH 100 51#define LPFC_MIN_TGT_QDEPTH 10
53#define LPFC_MAX_TGT_QDEPTH 0xFFFF 52#define LPFC_MAX_TGT_QDEPTH 0xFFFF
54 53
55#define LPFC_MAX_BUCKET_COUNT 20 /* Maximum no. of buckets for stat data 54#define LPFC_MAX_BUCKET_COUNT 20 /* Maximum no. of buckets for stat data
@@ -376,6 +375,7 @@ struct lpfc_vport {
376#define WORKER_FABRIC_BLOCK_TMO 0x400 /* hba: fabric block timeout */ 375#define WORKER_FABRIC_BLOCK_TMO 0x400 /* hba: fabric block timeout */
377#define WORKER_RAMP_DOWN_QUEUE 0x800 /* hba: Decrease Q depth */ 376#define WORKER_RAMP_DOWN_QUEUE 0x800 /* hba: Decrease Q depth */
378#define WORKER_RAMP_UP_QUEUE 0x1000 /* hba: Increase Q depth */ 377#define WORKER_RAMP_UP_QUEUE 0x1000 /* hba: Increase Q depth */
378#define WORKER_SERVICE_TXQ 0x2000 /* hba: IOCBs on the txq */
379 379
380 struct timer_list fc_fdmitmo; 380 struct timer_list fc_fdmitmo;
381 struct timer_list els_tmofunc; 381 struct timer_list els_tmofunc;
@@ -400,6 +400,7 @@ struct lpfc_vport {
400 uint32_t cfg_max_luns; 400 uint32_t cfg_max_luns;
401 uint32_t cfg_enable_da_id; 401 uint32_t cfg_enable_da_id;
402 uint32_t cfg_max_scsicmpl_time; 402 uint32_t cfg_max_scsicmpl_time;
403 uint32_t cfg_tgt_queue_depth;
403 404
404 uint32_t dev_loss_tmo_changed; 405 uint32_t dev_loss_tmo_changed;
405 406
@@ -510,9 +511,9 @@ struct lpfc_hba {
510 void (*lpfc_stop_port) 511 void (*lpfc_stop_port)
511 (struct lpfc_hba *); 512 (struct lpfc_hba *);
512 int (*lpfc_hba_init_link) 513 int (*lpfc_hba_init_link)
513 (struct lpfc_hba *); 514 (struct lpfc_hba *, uint32_t);
514 int (*lpfc_hba_down_link) 515 int (*lpfc_hba_down_link)
515 (struct lpfc_hba *); 516 (struct lpfc_hba *, uint32_t);
516 517
517 /* SLI4 specific HBA data structure */ 518 /* SLI4 specific HBA data structure */
518 struct lpfc_sli4_hba sli4_hba; 519 struct lpfc_sli4_hba sli4_hba;
@@ -525,7 +526,6 @@ struct lpfc_hba {
525#define LPFC_SLI3_NPIV_ENABLED 0x02 526#define LPFC_SLI3_NPIV_ENABLED 0x02
526#define LPFC_SLI3_VPORT_TEARDOWN 0x04 527#define LPFC_SLI3_VPORT_TEARDOWN 0x04
527#define LPFC_SLI3_CRP_ENABLED 0x08 528#define LPFC_SLI3_CRP_ENABLED 0x08
528#define LPFC_SLI3_INB_ENABLED 0x10
529#define LPFC_SLI3_BG_ENABLED 0x20 529#define LPFC_SLI3_BG_ENABLED 0x20
530#define LPFC_SLI3_DSS_ENABLED 0x40 530#define LPFC_SLI3_DSS_ENABLED 0x40
531 uint32_t iocb_cmd_size; 531 uint32_t iocb_cmd_size;
@@ -557,9 +557,6 @@ struct lpfc_hba {
557 557
558 MAILBOX_t *mbox; 558 MAILBOX_t *mbox;
559 uint32_t *mbox_ext; 559 uint32_t *mbox_ext;
560 uint32_t *inb_ha_copy;
561 uint32_t *inb_counter;
562 uint32_t inb_last_counter;
563 uint32_t ha_copy; 560 uint32_t ha_copy;
564 struct _PCB *pcb; 561 struct _PCB *pcb;
565 struct _IOCB *IOCBs; 562 struct _IOCB *IOCBs;
@@ -628,6 +625,7 @@ struct lpfc_hba {
628 uint32_t cfg_hostmem_hgp; 625 uint32_t cfg_hostmem_hgp;
629 uint32_t cfg_log_verbose; 626 uint32_t cfg_log_verbose;
630 uint32_t cfg_aer_support; 627 uint32_t cfg_aer_support;
628 uint32_t cfg_iocb_cnt;
631 uint32_t cfg_suppress_link_up; 629 uint32_t cfg_suppress_link_up;
632#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */ 630#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
633#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */ 631#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */
@@ -816,6 +814,9 @@ struct lpfc_hba {
816 814
817 uint8_t menlo_flag; /* menlo generic flags */ 815 uint8_t menlo_flag; /* menlo generic flags */
818#define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */ 816#define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */
817 uint32_t iocb_cnt;
818 uint32_t iocb_max;
819 atomic_t sdev_cnt;
819}; 820};
820 821
821static inline struct Scsi_Host * 822static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index bf33b315f93e..868874c28f99 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -506,10 +506,10 @@ lpfc_link_state_store(struct device *dev, struct device_attribute *attr,
506 506
507 if ((strncmp(buf, "up", sizeof("up") - 1) == 0) && 507 if ((strncmp(buf, "up", sizeof("up") - 1) == 0) &&
508 (phba->link_state == LPFC_LINK_DOWN)) 508 (phba->link_state == LPFC_LINK_DOWN))
509 status = phba->lpfc_hba_init_link(phba); 509 status = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
510 else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) && 510 else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) &&
511 (phba->link_state >= LPFC_LINK_UP)) 511 (phba->link_state >= LPFC_LINK_UP))
512 status = phba->lpfc_hba_down_link(phba); 512 status = phba->lpfc_hba_down_link(phba, MBX_NOWAIT);
513 513
514 if (status == 0) 514 if (status == 0)
515 return strlen(buf); 515 return strlen(buf);
@@ -864,7 +864,6 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
864 uint32_t *mrpi, uint32_t *arpi, 864 uint32_t *mrpi, uint32_t *arpi,
865 uint32_t *mvpi, uint32_t *avpi) 865 uint32_t *mvpi, uint32_t *avpi)
866{ 866{
867 struct lpfc_sli *psli = &phba->sli;
868 struct lpfc_mbx_read_config *rd_config; 867 struct lpfc_mbx_read_config *rd_config;
869 LPFC_MBOXQ_t *pmboxq; 868 LPFC_MBOXQ_t *pmboxq;
870 MAILBOX_t *pmb; 869 MAILBOX_t *pmb;
@@ -893,8 +892,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
893 pmb->mbxOwner = OWN_HOST; 892 pmb->mbxOwner = OWN_HOST;
894 pmboxq->context1 = NULL; 893 pmboxq->context1 = NULL;
895 894
896 if ((phba->pport->fc_flag & FC_OFFLINE_MODE) || 895 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
897 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
898 rc = MBX_NOT_FINISHED; 896 rc = MBX_NOT_FINISHED;
899 else 897 else
900 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 898 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -1949,6 +1947,59 @@ static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
1949LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK, 1947LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
1950 LPFC_DELAY_INIT_LINK_INDEFINITELY, 1948 LPFC_DELAY_INIT_LINK_INDEFINITELY,
1951 "Suppress Link Up at initialization"); 1949 "Suppress Link Up at initialization");
1950/*
1951# lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS
1952# 1 - (1024)
1953# 2 - (2048)
1954# 3 - (3072)
1955# 4 - (4096)
1956# 5 - (5120)
1957*/
1958static ssize_t
1959lpfc_iocb_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
1960{
1961 struct Scsi_Host *shost = class_to_shost(dev);
1962 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
1963
1964 return snprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max);
1965}
1966
1967static DEVICE_ATTR(iocb_hw, S_IRUGO,
1968 lpfc_iocb_hw_show, NULL);
1969static ssize_t
1970lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
1971{
1972 struct Scsi_Host *shost = class_to_shost(dev);
1973 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
1974
1975 return snprintf(buf, PAGE_SIZE, "%d\n",
1976 phba->sli.ring[LPFC_ELS_RING].txq_max);
1977}
1978
1979static DEVICE_ATTR(txq_hw, S_IRUGO,
1980 lpfc_txq_hw_show, NULL);
1981static ssize_t
1982lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
1983 char *buf)
1984{
1985 struct Scsi_Host *shost = class_to_shost(dev);
1986 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
1987
1988 return snprintf(buf, PAGE_SIZE, "%d\n",
1989 phba->sli.ring[LPFC_ELS_RING].txcmplq_max);
1990}
1991
1992static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
1993 lpfc_txcmplq_hw_show, NULL);
1994
1995int lpfc_iocb_cnt = 2;
1996module_param(lpfc_iocb_cnt, int, 1);
1997MODULE_PARM_DESC(lpfc_iocb_cnt,
1998 "Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs");
1999lpfc_param_show(iocb_cnt);
2000lpfc_param_init(iocb_cnt, 2, 1, 5);
2001static DEVICE_ATTR(lpfc_iocb_cnt, S_IRUGO,
2002 lpfc_iocb_cnt_show, NULL);
1952 2003
1953/* 2004/*
1954# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear 2005# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
@@ -2157,6 +2208,13 @@ LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 128,
2157 "Max number of FCP commands we can queue to a specific LUN"); 2208 "Max number of FCP commands we can queue to a specific LUN");
2158 2209
2159/* 2210/*
2211# tgt_queue_depth: This parameter is used to limit the number of outstanding
2212# commands per target port. Value range is [10,65535]. Default value is 65535.
2213*/
2214LPFC_VPORT_ATTR_R(tgt_queue_depth, 65535, 10, 65535,
2215 "Max number of FCP commands we can queue to a specific target port");
2216
2217/*
2160# hba_queue_depth: This parameter is used to limit the number of outstanding 2218# hba_queue_depth: This parameter is used to limit the number of outstanding
2161# commands per lpfc HBA. Value range is [32,8192]. If this parameter 2219# commands per lpfc HBA. Value range is [32,8192]. If this parameter
2162# value is greater than the maximum number of exchanges supported by the HBA, 2220# value is greater than the maximum number of exchanges supported by the HBA,
@@ -2890,9 +2948,6 @@ lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
2890 struct lpfc_hba *phba = vport->phba; 2948 struct lpfc_hba *phba = vport->phba;
2891 int val = 0, rc = -EINVAL; 2949 int val = 0, rc = -EINVAL;
2892 2950
2893 /* AER not supported on OC devices yet */
2894 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
2895 return -EPERM;
2896 if (!isdigit(buf[0])) 2951 if (!isdigit(buf[0]))
2897 return -EINVAL; 2952 return -EINVAL;
2898 if (sscanf(buf, "%i", &val) != 1) 2953 if (sscanf(buf, "%i", &val) != 1)
@@ -2965,12 +3020,6 @@ lpfc_param_show(aer_support)
2965static int 3020static int
2966lpfc_aer_support_init(struct lpfc_hba *phba, int val) 3021lpfc_aer_support_init(struct lpfc_hba *phba, int val)
2967{ 3022{
2968 /* AER not supported on OC devices yet */
2969 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
2970 phba->cfg_aer_support = 0;
2971 return -EPERM;
2972 }
2973
2974 if (val == 0 || val == 1) { 3023 if (val == 0 || val == 1) {
2975 phba->cfg_aer_support = val; 3024 phba->cfg_aer_support = val;
2976 return 0; 3025 return 0;
@@ -3015,9 +3064,6 @@ lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
3015 struct lpfc_hba *phba = vport->phba; 3064 struct lpfc_hba *phba = vport->phba;
3016 int val, rc = -1; 3065 int val, rc = -1;
3017 3066
3018 /* AER not supported on OC devices yet */
3019 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
3020 return -EPERM;
3021 if (!isdigit(buf[0])) 3067 if (!isdigit(buf[0]))
3022 return -EINVAL; 3068 return -EINVAL;
3023 if (sscanf(buf, "%i", &val) != 1) 3069 if (sscanf(buf, "%i", &val) != 1)
@@ -3083,7 +3129,7 @@ lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
3083 continue; 3129 continue;
3084 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 3130 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
3085 continue; 3131 continue;
3086 ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; 3132 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
3087 } 3133 }
3088 spin_unlock_irq(shost->host_lock); 3134 spin_unlock_irq(shost->host_lock);
3089 return 0; 3135 return 0;
@@ -3287,6 +3333,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
3287 &dev_attr_lpfc_temp_sensor, 3333 &dev_attr_lpfc_temp_sensor,
3288 &dev_attr_lpfc_log_verbose, 3334 &dev_attr_lpfc_log_verbose,
3289 &dev_attr_lpfc_lun_queue_depth, 3335 &dev_attr_lpfc_lun_queue_depth,
3336 &dev_attr_lpfc_tgt_queue_depth,
3290 &dev_attr_lpfc_hba_queue_depth, 3337 &dev_attr_lpfc_hba_queue_depth,
3291 &dev_attr_lpfc_peer_port_login, 3338 &dev_attr_lpfc_peer_port_login,
3292 &dev_attr_lpfc_nodev_tmo, 3339 &dev_attr_lpfc_nodev_tmo,
@@ -3334,6 +3381,10 @@ struct device_attribute *lpfc_hba_attrs[] = {
3334 &dev_attr_lpfc_aer_support, 3381 &dev_attr_lpfc_aer_support,
3335 &dev_attr_lpfc_aer_state_cleanup, 3382 &dev_attr_lpfc_aer_state_cleanup,
3336 &dev_attr_lpfc_suppress_link_up, 3383 &dev_attr_lpfc_suppress_link_up,
3384 &dev_attr_lpfc_iocb_cnt,
3385 &dev_attr_iocb_hw,
3386 &dev_attr_txq_hw,
3387 &dev_attr_txcmplq_hw,
3337 NULL, 3388 NULL,
3338}; 3389};
3339 3390
@@ -3344,6 +3395,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
3344 &dev_attr_lpfc_drvr_version, 3395 &dev_attr_lpfc_drvr_version,
3345 &dev_attr_lpfc_log_verbose, 3396 &dev_attr_lpfc_log_verbose,
3346 &dev_attr_lpfc_lun_queue_depth, 3397 &dev_attr_lpfc_lun_queue_depth,
3398 &dev_attr_lpfc_tgt_queue_depth,
3347 &dev_attr_lpfc_nodev_tmo, 3399 &dev_attr_lpfc_nodev_tmo,
3348 &dev_attr_lpfc_devloss_tmo, 3400 &dev_attr_lpfc_devloss_tmo,
3349 &dev_attr_lpfc_hba_queue_depth, 3401 &dev_attr_lpfc_hba_queue_depth,
@@ -4042,8 +4094,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
4042 pmboxq->context1 = NULL; 4094 pmboxq->context1 = NULL;
4043 pmboxq->vport = vport; 4095 pmboxq->vport = vport;
4044 4096
4045 if ((vport->fc_flag & FC_OFFLINE_MODE) || 4097 if (vport->fc_flag & FC_OFFLINE_MODE)
4046 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
4047 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 4098 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
4048 else 4099 else
4049 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 4100 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -4067,8 +4118,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
4067 pmboxq->context1 = NULL; 4118 pmboxq->context1 = NULL;
4068 pmboxq->vport = vport; 4119 pmboxq->vport = vport;
4069 4120
4070 if ((vport->fc_flag & FC_OFFLINE_MODE) || 4121 if (vport->fc_flag & FC_OFFLINE_MODE)
4071 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
4072 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 4122 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
4073 else 4123 else
4074 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 4124 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -4521,6 +4571,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4521 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); 4571 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
4522 lpfc_aer_support_init(phba, lpfc_aer_support); 4572 lpfc_aer_support_init(phba, lpfc_aer_support);
4523 lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up); 4573 lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
4574 lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
4524 return; 4575 return;
4525} 4576}
4526 4577
@@ -4533,6 +4584,7 @@ lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
4533{ 4584{
4534 lpfc_log_verbose_init(vport, lpfc_log_verbose); 4585 lpfc_log_verbose_init(vport, lpfc_log_verbose);
4535 lpfc_lun_queue_depth_init(vport, lpfc_lun_queue_depth); 4586 lpfc_lun_queue_depth_init(vport, lpfc_lun_queue_depth);
4587 lpfc_tgt_queue_depth_init(vport, lpfc_tgt_queue_depth);
4536 lpfc_devloss_tmo_init(vport, lpfc_devloss_tmo); 4588 lpfc_devloss_tmo_init(vport, lpfc_devloss_tmo);
4537 lpfc_nodev_tmo_init(vport, lpfc_nodev_tmo); 4589 lpfc_nodev_tmo_init(vport, lpfc_nodev_tmo);
4538 lpfc_peer_port_login_init(vport, lpfc_peer_port_login); 4590 lpfc_peer_port_login_init(vport, lpfc_peer_port_login);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index dcf088262b20..d521569e6620 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -377,6 +377,11 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
377 377
378 if (rc == IOCB_SUCCESS) 378 if (rc == IOCB_SUCCESS)
379 return 0; /* done for now */ 379 return 0; /* done for now */
380 else if (rc == IOCB_BUSY)
381 rc = EAGAIN;
382 else
383 rc = EIO;
384
380 385
381 /* iocb failed so cleanup */ 386 /* iocb failed so cleanup */
382 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 387 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
@@ -625,6 +630,10 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
625 lpfc_nlp_put(ndlp); 630 lpfc_nlp_put(ndlp);
626 if (rc == IOCB_SUCCESS) 631 if (rc == IOCB_SUCCESS)
627 return 0; /* done for now */ 632 return 0; /* done for now */
633 else if (rc == IOCB_BUSY)
634 rc = EAGAIN;
635 else
636 rc = EIO;
628 637
629 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 638 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
630 job->request_payload.sg_cnt, DMA_TO_DEVICE); 639 job->request_payload.sg_cnt, DMA_TO_DEVICE);
@@ -953,10 +962,22 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
953 if (phba->sli_rev == LPFC_SLI_REV4) { 962 if (phba->sli_rev == LPFC_SLI_REV4) {
954 evt_dat->immed_dat = phba->ctx_idx; 963 evt_dat->immed_dat = phba->ctx_idx;
955 phba->ctx_idx = (phba->ctx_idx + 1) % 64; 964 phba->ctx_idx = (phba->ctx_idx + 1) % 64;
965 /* Provide warning for over-run of the ct_ctx array */
966 if (phba->ct_ctx[evt_dat->immed_dat].flags &
967 UNSOL_VALID)
968 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
969 "2717 CT context array entry "
970 "[%d] over-run: oxid:x%x, "
971 "sid:x%x\n", phba->ctx_idx,
972 phba->ct_ctx[
973 evt_dat->immed_dat].oxid,
974 phba->ct_ctx[
975 evt_dat->immed_dat].SID);
956 phba->ct_ctx[evt_dat->immed_dat].oxid = 976 phba->ct_ctx[evt_dat->immed_dat].oxid =
957 piocbq->iocb.ulpContext; 977 piocbq->iocb.ulpContext;
958 phba->ct_ctx[evt_dat->immed_dat].SID = 978 phba->ct_ctx[evt_dat->immed_dat].SID =
959 piocbq->iocb.un.rcvels.remoteID; 979 piocbq->iocb.un.rcvels.remoteID;
980 phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID;
960 } else 981 } else
961 evt_dat->immed_dat = piocbq->iocb.ulpContext; 982 evt_dat->immed_dat = piocbq->iocb.ulpContext;
962 983
@@ -1314,6 +1335,21 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1314 rc = IOCB_ERROR; 1335 rc = IOCB_ERROR;
1315 goto issue_ct_rsp_exit; 1336 goto issue_ct_rsp_exit;
1316 } 1337 }
1338
1339 /* Check if the ndlp is active */
1340 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1341 rc = -IOCB_ERROR;
1342 goto issue_ct_rsp_exit;
1343 }
1344
1345 /* get a refernece count so the ndlp doesn't go away while
1346 * we respond
1347 */
1348 if (!lpfc_nlp_get(ndlp)) {
1349 rc = -IOCB_ERROR;
1350 goto issue_ct_rsp_exit;
1351 }
1352
1317 icmd->un.ulpWord[3] = ndlp->nlp_rpi; 1353 icmd->un.ulpWord[3] = ndlp->nlp_rpi;
1318 /* The exchange is done, mark the entry as invalid */ 1354 /* The exchange is done, mark the entry as invalid */
1319 phba->ct_ctx[tag].flags &= ~UNSOL_VALID; 1355 phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index fbc9baeb6048..03f4ddc18572 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -41,6 +41,7 @@ void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
41void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); 41void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
42int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, 42int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
43 LPFC_MBOXQ_t *, uint32_t); 43 LPFC_MBOXQ_t *, uint32_t);
44void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
44void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 45void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
45void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 46void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
46void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *); 47void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
@@ -190,6 +191,7 @@ irqreturn_t lpfc_sli4_sp_intr_handler(int, void *);
190irqreturn_t lpfc_sli4_fp_intr_handler(int, void *); 191irqreturn_t lpfc_sli4_fp_intr_handler(int, void *);
191 192
192void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); 193void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
194void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *);
193void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *); 195void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
194void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *); 196void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
195void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); 197void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -402,3 +404,12 @@ int lpfc_bsg_request(struct fc_bsg_job *);
402int lpfc_bsg_timeout(struct fc_bsg_job *); 404int lpfc_bsg_timeout(struct fc_bsg_job *);
403int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, 405int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
404 struct lpfc_iocbq *); 406 struct lpfc_iocbq *);
407void __lpfc_sli_ringtx_put(struct lpfc_hba *, struct lpfc_sli_ring *,
408 struct lpfc_iocbq *);
409struct lpfc_iocbq *lpfc_sli_ringtx_get(struct lpfc_hba *,
410 struct lpfc_sli_ring *);
411int __lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
412 struct lpfc_iocbq *, uint32_t);
413uint32_t lpfc_drain_txq(struct lpfc_hba *);
414
415
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 36257a685509..7cae69de36f7 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -114,6 +114,8 @@ struct lpfc_nodelist {
114}; 114};
115 115
116/* Defines for nlp_flag (uint32) */ 116/* Defines for nlp_flag (uint32) */
117#define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */
118#define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */
117#define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */ 119#define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */
118#define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */ 120#define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */
119#define NLP_ADISC_SND 0x00000080 /* sent ADISC request for this entry */ 121#define NLP_ADISC_SND 0x00000080 /* sent ADISC request for this entry */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index c4c7f0ad7468..afbed6bc31f0 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -796,7 +796,9 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
796 * due to new FCF discovery 796 * due to new FCF discovery
797 */ 797 */
798 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 798 if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
799 (phba->fcf.fcf_flag & FCF_DISCOVERY)) { 799 (phba->fcf.fcf_flag & FCF_DISCOVERY) &&
800 (irsp->ulpStatus != IOSTAT_LOCAL_REJECT) &&
801 (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) {
800 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 802 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
801 "2611 FLOGI failed on registered " 803 "2611 FLOGI failed on registered "
802 "FCF record fcf_index:%d, trying " 804 "FCF record fcf_index:%d, trying "
@@ -811,18 +813,21 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
811 */ 813 */
812 lpfc_printf_log(phba, KERN_WARNING, 814 lpfc_printf_log(phba, KERN_WARNING,
813 LOG_FIP | LOG_ELS, 815 LOG_FIP | LOG_ELS,
814 "2760 FLOGI exhausted FCF " 816 "2760 Completed one round "
815 "round robin failover list, " 817 "of FLOGI FCF round robin "
816 "retry FLOGI on the current " 818 "failover list, retry FLOGI "
817 "registered FCF index:%d\n", 819 "on currently registered "
820 "FCF index:%d\n",
818 phba->fcf.current_rec.fcf_indx); 821 phba->fcf.current_rec.fcf_indx);
819 spin_lock_irq(&phba->hbalock);
820 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
821 spin_unlock_irq(&phba->hbalock);
822 } else { 822 } else {
823 lpfc_printf_log(phba, KERN_INFO,
824 LOG_FIP | LOG_ELS,
825 "2794 FLOGI FCF round robin "
826 "failover to FCF index x%x\n",
827 fcf_index);
823 rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, 828 rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba,
824 fcf_index); 829 fcf_index);
825 if (rc) { 830 if (rc)
826 lpfc_printf_log(phba, KERN_WARNING, 831 lpfc_printf_log(phba, KERN_WARNING,
827 LOG_FIP | LOG_ELS, 832 LOG_FIP | LOG_ELS,
828 "2761 FLOGI round " 833 "2761 FLOGI round "
@@ -831,10 +836,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
831 "rc:x%x, fcf_index:" 836 "rc:x%x, fcf_index:"
832 "%d\n", rc, 837 "%d\n", rc,
833 phba->fcf.current_rec.fcf_indx); 838 phba->fcf.current_rec.fcf_indx);
834 spin_lock_irq(&phba->hbalock); 839 else
835 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
836 spin_unlock_irq(&phba->hbalock);
837 } else
838 goto out; 840 goto out;
839 } 841 }
840 } 842 }
@@ -890,9 +892,39 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
890 */ 892 */
891 if (sp->cmn.fPort) 893 if (sp->cmn.fPort)
892 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp); 894 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
893 else 895 else if (!(phba->hba_flag & HBA_FCOE_SUPPORT))
894 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 896 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
895 897 else {
898 lpfc_printf_vlog(vport, KERN_ERR,
899 LOG_FIP | LOG_ELS,
900 "2831 FLOGI response with cleared Fabric "
901 "bit fcf_index 0x%x "
902 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
903 "Fabric Name "
904 "%02x%02x%02x%02x%02x%02x%02x%02x\n",
905 phba->fcf.current_rec.fcf_indx,
906 phba->fcf.current_rec.switch_name[0],
907 phba->fcf.current_rec.switch_name[1],
908 phba->fcf.current_rec.switch_name[2],
909 phba->fcf.current_rec.switch_name[3],
910 phba->fcf.current_rec.switch_name[4],
911 phba->fcf.current_rec.switch_name[5],
912 phba->fcf.current_rec.switch_name[6],
913 phba->fcf.current_rec.switch_name[7],
914 phba->fcf.current_rec.fabric_name[0],
915 phba->fcf.current_rec.fabric_name[1],
916 phba->fcf.current_rec.fabric_name[2],
917 phba->fcf.current_rec.fabric_name[3],
918 phba->fcf.current_rec.fabric_name[4],
919 phba->fcf.current_rec.fabric_name[5],
920 phba->fcf.current_rec.fabric_name[6],
921 phba->fcf.current_rec.fabric_name[7]);
922 lpfc_nlp_put(ndlp);
923 spin_lock_irq(&phba->hbalock);
924 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
925 spin_unlock_irq(&phba->hbalock);
926 goto out;
927 }
896 if (!rc) { 928 if (!rc) {
897 /* Mark the FCF discovery process done */ 929 /* Mark the FCF discovery process done */
898 if (phba->hba_flag & HBA_FIP_SUPPORT) 930 if (phba->hba_flag & HBA_FIP_SUPPORT)
@@ -1472,8 +1504,12 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1472 } 1504 }
1473 goto out; 1505 goto out;
1474 } 1506 }
1475 /* PLOGI failed */ 1507 /* PLOGI failed Don't print the vport to vport rjts */
1476 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1508 if (irsp->ulpStatus != IOSTAT_LS_RJT ||
1509 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
1510 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
1511 (phba)->pport->cfg_log_verbose & LOG_ELS)
1512 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1477 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n", 1513 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
1478 ndlp->nlp_DID, irsp->ulpStatus, 1514 ndlp->nlp_DID, irsp->ulpStatus,
1479 irsp->un.ulpWord[4]); 1515 irsp->un.ulpWord[4]);
@@ -2740,6 +2776,15 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2740 retry = 1; 2776 retry = 1;
2741 break; 2777 break;
2742 } 2778 }
2779 if (stat.un.b.lsRjtRsnCodeExp ==
2780 LSEXP_CANT_GIVE_DATA) {
2781 if (cmd == ELS_CMD_PLOGI) {
2782 delay = 1000;
2783 maxretry = 48;
2784 }
2785 retry = 1;
2786 break;
2787 }
2743 if (cmd == ELS_CMD_PLOGI) { 2788 if (cmd == ELS_CMD_PLOGI) {
2744 delay = 1000; 2789 delay = 1000;
2745 maxretry = lpfc_max_els_tries + 1; 2790 maxretry = lpfc_max_els_tries + 1;
@@ -5135,6 +5180,7 @@ lpfc_els_timeout(unsigned long ptr)
5135 return; 5180 return;
5136} 5181}
5137 5182
5183
5138/** 5184/**
5139 * lpfc_els_timeout_handler - Process an els timeout event 5185 * lpfc_els_timeout_handler - Process an els timeout event
5140 * @vport: pointer to a virtual N_Port data structure. 5186 * @vport: pointer to a virtual N_Port data structure.
@@ -5155,13 +5201,19 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
5155 uint32_t els_command = 0; 5201 uint32_t els_command = 0;
5156 uint32_t timeout; 5202 uint32_t timeout;
5157 uint32_t remote_ID = 0xffffffff; 5203 uint32_t remote_ID = 0xffffffff;
5204 LIST_HEAD(txcmplq_completions);
5205 LIST_HEAD(abort_list);
5206
5158 5207
5159 spin_lock_irq(&phba->hbalock);
5160 timeout = (uint32_t)(phba->fc_ratov << 1); 5208 timeout = (uint32_t)(phba->fc_ratov << 1);
5161 5209
5162 pring = &phba->sli.ring[LPFC_ELS_RING]; 5210 pring = &phba->sli.ring[LPFC_ELS_RING];
5163 5211
5164 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 5212 spin_lock_irq(&phba->hbalock);
5213 list_splice_init(&pring->txcmplq, &txcmplq_completions);
5214 spin_unlock_irq(&phba->hbalock);
5215
5216 list_for_each_entry_safe(piocb, tmp_iocb, &txcmplq_completions, list) {
5165 cmd = &piocb->iocb; 5217 cmd = &piocb->iocb;
5166 5218
5167 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 || 5219 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
@@ -5198,13 +5250,22 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
5198 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) 5250 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
5199 remote_ID = ndlp->nlp_DID; 5251 remote_ID = ndlp->nlp_DID;
5200 } 5252 }
5253 list_add_tail(&piocb->dlist, &abort_list);
5254 }
5255 spin_lock_irq(&phba->hbalock);
5256 list_splice(&txcmplq_completions, &pring->txcmplq);
5257 spin_unlock_irq(&phba->hbalock);
5258
5259 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
5201 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 5260 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5202 "0127 ELS timeout Data: x%x x%x x%x " 5261 "0127 ELS timeout Data: x%x x%x x%x "
5203 "x%x\n", els_command, 5262 "x%x\n", els_command,
5204 remote_ID, cmd->ulpCommand, cmd->ulpIoTag); 5263 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
5264 spin_lock_irq(&phba->hbalock);
5265 list_del_init(&piocb->dlist);
5205 lpfc_sli_issue_abort_iotag(phba, pring, piocb); 5266 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
5267 spin_unlock_irq(&phba->hbalock);
5206 } 5268 }
5207 spin_unlock_irq(&phba->hbalock);
5208 5269
5209 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) 5270 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
5210 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 5271 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
@@ -6901,6 +6962,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
6901 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 6962 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
6902 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 6963 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
6903 unsigned long iflag = 0; 6964 unsigned long iflag = 0;
6965 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6904 6966
6905 spin_lock_irqsave(&phba->hbalock, iflag); 6967 spin_lock_irqsave(&phba->hbalock, iflag);
6906 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 6968 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
@@ -6913,6 +6975,10 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
6913 sglq_entry->state = SGL_FREED; 6975 sglq_entry->state = SGL_FREED;
6914 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 6976 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
6915 spin_unlock_irqrestore(&phba->hbalock, iflag); 6977 spin_unlock_irqrestore(&phba->hbalock, iflag);
6978
6979 /* Check if TXQ queue needs to be serviced */
6980 if (pring->txq_cnt)
6981 lpfc_worker_wake_up(phba);
6916 return; 6982 return;
6917 } 6983 }
6918 } 6984 }
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 1f87b4fb8b50..0639c994349c 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -275,7 +275,9 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
275 if (!(vport->load_flag & FC_UNLOADING) && 275 if (!(vport->load_flag & FC_UNLOADING) &&
276 !(ndlp->nlp_flag & NLP_DELAY_TMO) && 276 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
277 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && 277 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
278 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) 278 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
279 (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
280 (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
279 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 281 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
280 282
281 lpfc_unregister_unused_fcf(phba); 283 lpfc_unregister_unused_fcf(phba);
@@ -586,6 +588,8 @@ lpfc_work_done(struct lpfc_hba *phba)
586 (status & 588 (status &
587 HA_RXMASK)); 589 HA_RXMASK));
588 } 590 }
591 if (pring->txq_cnt)
592 lpfc_drain_txq(phba);
589 /* 593 /*
590 * Turn on Ring interrupts 594 * Turn on Ring interrupts
591 */ 595 */
@@ -1297,7 +1301,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1297 * used for this FCF when the function returns. 1301 * used for this FCF when the function returns.
1298 * If the FCF record need to be used with a particular vlan id, the vlan is 1302 * If the FCF record need to be used with a particular vlan id, the vlan is
1299 * set in the vlan_id on return of the function. If not VLAN tagging need to 1303 * set in the vlan_id on return of the function. If not VLAN tagging need to
1300 * be used with the FCF vlan_id will be set to 0xFFFF; 1304 * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID;
1301 **/ 1305 **/
1302static int 1306static int
1303lpfc_match_fcf_conn_list(struct lpfc_hba *phba, 1307lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
@@ -1333,7 +1337,7 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1333 if (phba->valid_vlan) 1337 if (phba->valid_vlan)
1334 *vlan_id = phba->vlan_id; 1338 *vlan_id = phba->vlan_id;
1335 else 1339 else
1336 *vlan_id = 0xFFFF; 1340 *vlan_id = LPFC_FCOE_NULL_VID;
1337 return 1; 1341 return 1;
1338 } 1342 }
1339 1343
@@ -1357,7 +1361,7 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1357 if (fcf_vlan_id) 1361 if (fcf_vlan_id)
1358 *vlan_id = fcf_vlan_id; 1362 *vlan_id = fcf_vlan_id;
1359 else 1363 else
1360 *vlan_id = 0xFFFF; 1364 *vlan_id = LPFC_FCOE_NULL_VID;
1361 return 1; 1365 return 1;
1362 } 1366 }
1363 1367
@@ -1466,7 +1470,7 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1466 else if (fcf_vlan_id) 1470 else if (fcf_vlan_id)
1467 *vlan_id = fcf_vlan_id; 1471 *vlan_id = fcf_vlan_id;
1468 else 1472 else
1469 *vlan_id = 0xFFFF; 1473 *vlan_id = LPFC_FCOE_NULL_VID;
1470 1474
1471 return 1; 1475 return 1;
1472 } 1476 }
@@ -1518,6 +1522,9 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1518 * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS 1522 * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS
1519 * flag 1523 * flag
1520 */ 1524 */
1525 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1526 "2833 Stop FCF discovery process due to link "
1527 "state change (x%x)\n", phba->link_state);
1521 spin_lock_irq(&phba->hbalock); 1528 spin_lock_irq(&phba->hbalock);
1522 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1529 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1523 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); 1530 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
@@ -1565,7 +1572,7 @@ lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
1565} 1572}
1566 1573
1567/** 1574/**
1568 * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox. 1575 * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command.
1569 * @phba: pointer to lpfc hba data structure. 1576 * @phba: pointer to lpfc hba data structure.
1570 * @mboxq: pointer to mailbox object. 1577 * @mboxq: pointer to mailbox object.
1571 * @next_fcf_index: pointer to holder of next fcf index. 1578 * @next_fcf_index: pointer to holder of next fcf index.
@@ -1693,6 +1700,37 @@ lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
1693} 1700}
1694 1701
1695/** 1702/**
1703 lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
1704 * @phba: pointer to lpfc hba data structure.
1705 * @fcf_rec: pointer to an existing FCF record.
1706 * @new_fcf_record: pointer to a new FCF record.
1707 * @new_vlan_id: vlan id from the new FCF record.
1708 *
1709 * This function performs matching test of a new FCF record against an existing
1710 * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id
1711 * will not be used as part of the FCF record matching criteria.
1712 *
1713 * Returns true if all the fields matching, otherwise returns false.
1714 */
1715static bool
1716lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
1717 struct lpfc_fcf_rec *fcf_rec,
1718 struct fcf_record *new_fcf_record,
1719 uint16_t new_vlan_id)
1720{
1721 if (new_vlan_id != LPFC_FCOE_IGNORE_VID)
1722 if (!lpfc_vlan_id_match(fcf_rec->vlan_id, new_vlan_id))
1723 return false;
1724 if (!lpfc_mac_addr_match(fcf_rec->mac_addr, new_fcf_record))
1725 return false;
1726 if (!lpfc_sw_name_match(fcf_rec->switch_name, new_fcf_record))
1727 return false;
1728 if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record))
1729 return false;
1730 return true;
1731}
1732
1733/**
1696 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. 1734 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
1697 * @phba: pointer to lpfc hba data structure. 1735 * @phba: pointer to lpfc hba data structure.
1698 * @mboxq: pointer to mailbox object. 1736 * @mboxq: pointer to mailbox object.
@@ -1755,7 +1793,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1755 */ 1793 */
1756 if (!rc) { 1794 if (!rc) {
1757 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 1795 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1758 "2781 FCF record fcf_index:x%x failed FCF " 1796 "2781 FCF record (x%x) failed FCF "
1759 "connection list check, fcf_avail:x%x, " 1797 "connection list check, fcf_avail:x%x, "
1760 "fcf_valid:x%x\n", 1798 "fcf_valid:x%x\n",
1761 bf_get(lpfc_fcf_record_fcf_index, 1799 bf_get(lpfc_fcf_record_fcf_index,
@@ -1764,6 +1802,32 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1764 new_fcf_record), 1802 new_fcf_record),
1765 bf_get(lpfc_fcf_record_fcf_valid, 1803 bf_get(lpfc_fcf_record_fcf_valid,
1766 new_fcf_record)); 1804 new_fcf_record));
1805 if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
1806 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
1807 new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
1808 /*
1809 * In case the current in-use FCF record becomes
1810 * invalid/unavailable during FCF discovery that
1811 * was not triggered by fast FCF failover process,
1812 * treat it as fast FCF failover.
1813 */
1814 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) &&
1815 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
1816 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1817 "2835 Invalid in-use FCF "
1818 "record (x%x) reported, "
1819 "entering fast FCF failover "
1820 "mode scanning.\n",
1821 phba->fcf.current_rec.fcf_indx);
1822 spin_lock_irq(&phba->hbalock);
1823 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
1824 spin_unlock_irq(&phba->hbalock);
1825 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1826 lpfc_sli4_fcf_scan_read_fcf_rec(phba,
1827 LPFC_FCOE_FCF_GET_FIRST);
1828 return;
1829 }
1830 }
1767 goto read_next_fcf; 1831 goto read_next_fcf;
1768 } else { 1832 } else {
1769 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 1833 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
@@ -1780,14 +1844,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1780 */ 1844 */
1781 spin_lock_irq(&phba->hbalock); 1845 spin_lock_irq(&phba->hbalock);
1782 if (phba->fcf.fcf_flag & FCF_IN_USE) { 1846 if (phba->fcf.fcf_flag & FCF_IN_USE) {
1783 if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name, 1847 if (lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
1784 new_fcf_record) && 1848 new_fcf_record, vlan_id)) {
1785 lpfc_sw_name_match(phba->fcf.current_rec.switch_name,
1786 new_fcf_record) &&
1787 lpfc_mac_addr_match(phba->fcf.current_rec.mac_addr,
1788 new_fcf_record) &&
1789 lpfc_vlan_id_match(phba->fcf.current_rec.vlan_id,
1790 vlan_id)) {
1791 phba->fcf.fcf_flag |= FCF_AVAILABLE; 1849 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1792 if (phba->fcf.fcf_flag & FCF_REDISC_PEND) 1850 if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
1793 /* Stop FCF redisc wait timer if pending */ 1851 /* Stop FCF redisc wait timer if pending */
@@ -1797,6 +1855,13 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1797 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | 1855 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV |
1798 FCF_DISCOVERY); 1856 FCF_DISCOVERY);
1799 spin_unlock_irq(&phba->hbalock); 1857 spin_unlock_irq(&phba->hbalock);
1858 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1859 "2836 The new FCF record (x%x) "
1860 "matches the in-use FCF record "
1861 "(x%x)\n",
1862 phba->fcf.current_rec.fcf_indx,
1863 bf_get(lpfc_fcf_record_fcf_index,
1864 new_fcf_record));
1800 goto out; 1865 goto out;
1801 } 1866 }
1802 /* 1867 /*
@@ -1828,6 +1893,12 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1828 */ 1893 */
1829 if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) { 1894 if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) {
1830 /* Choose this FCF record */ 1895 /* Choose this FCF record */
1896 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1897 "2837 Update current FCF record "
1898 "(x%x) with new FCF record (x%x)\n",
1899 fcf_rec->fcf_indx,
1900 bf_get(lpfc_fcf_record_fcf_index,
1901 new_fcf_record));
1831 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 1902 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
1832 addr_mode, vlan_id, BOOT_ENABLE); 1903 addr_mode, vlan_id, BOOT_ENABLE);
1833 spin_unlock_irq(&phba->hbalock); 1904 spin_unlock_irq(&phba->hbalock);
@@ -1848,6 +1919,12 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1848 */ 1919 */
1849 if (new_fcf_record->fip_priority < fcf_rec->priority) { 1920 if (new_fcf_record->fip_priority < fcf_rec->priority) {
1850 /* Choose the new FCF record with lower priority */ 1921 /* Choose the new FCF record with lower priority */
1922 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1923 "2838 Update current FCF record "
1924 "(x%x) with new FCF record (x%x)\n",
1925 fcf_rec->fcf_indx,
1926 bf_get(lpfc_fcf_record_fcf_index,
1927 new_fcf_record));
1851 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 1928 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
1852 addr_mode, vlan_id, 0); 1929 addr_mode, vlan_id, 0);
1853 /* Reset running random FCF selection count */ 1930 /* Reset running random FCF selection count */
@@ -1857,11 +1934,18 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1857 phba->fcf.eligible_fcf_cnt++; 1934 phba->fcf.eligible_fcf_cnt++;
1858 select_new_fcf = lpfc_sli4_new_fcf_random_select(phba, 1935 select_new_fcf = lpfc_sli4_new_fcf_random_select(phba,
1859 phba->fcf.eligible_fcf_cnt); 1936 phba->fcf.eligible_fcf_cnt);
1860 if (select_new_fcf) 1937 if (select_new_fcf) {
1938 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1939 "2839 Update current FCF record "
1940 "(x%x) with new FCF record (x%x)\n",
1941 fcf_rec->fcf_indx,
1942 bf_get(lpfc_fcf_record_fcf_index,
1943 new_fcf_record));
1861 /* Choose the new FCF by random selection */ 1944 /* Choose the new FCF by random selection */
1862 __lpfc_update_fcf_record(phba, fcf_rec, 1945 __lpfc_update_fcf_record(phba, fcf_rec,
1863 new_fcf_record, 1946 new_fcf_record,
1864 addr_mode, vlan_id, 0); 1947 addr_mode, vlan_id, 0);
1948 }
1865 } 1949 }
1866 spin_unlock_irq(&phba->hbalock); 1950 spin_unlock_irq(&phba->hbalock);
1867 goto read_next_fcf; 1951 goto read_next_fcf;
@@ -1871,6 +1955,11 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1871 * initial best-fit FCF. 1955 * initial best-fit FCF.
1872 */ 1956 */
1873 if (fcf_rec) { 1957 if (fcf_rec) {
1958 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1959 "2840 Update current FCF record "
1960 "with initial FCF record (x%x)\n",
1961 bf_get(lpfc_fcf_record_fcf_index,
1962 new_fcf_record));
1874 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 1963 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
1875 addr_mode, vlan_id, (boot_flag ? 1964 addr_mode, vlan_id, (boot_flag ?
1876 BOOT_ENABLE : 0)); 1965 BOOT_ENABLE : 0));
@@ -1928,12 +2017,23 @@ read_next_fcf:
1928 lpfc_unregister_fcf(phba); 2017 lpfc_unregister_fcf(phba);
1929 2018
1930 /* Replace in-use record with the new record */ 2019 /* Replace in-use record with the new record */
2020 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2021 "2842 Replace the current in-use "
2022 "FCF record (x%x) with failover FCF "
2023 "record (x%x)\n",
2024 phba->fcf.current_rec.fcf_indx,
2025 phba->fcf.failover_rec.fcf_indx);
1931 memcpy(&phba->fcf.current_rec, 2026 memcpy(&phba->fcf.current_rec,
1932 &phba->fcf.failover_rec, 2027 &phba->fcf.failover_rec,
1933 sizeof(struct lpfc_fcf_rec)); 2028 sizeof(struct lpfc_fcf_rec));
1934 /* mark the FCF fast failover completed */ 2029 /*
2030 * Mark the fast FCF failover rediscovery completed
2031 * and the start of the first round of the roundrobin
2032 * FCF failover.
2033 */
1935 spin_lock_irq(&phba->hbalock); 2034 spin_lock_irq(&phba->hbalock);
1936 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 2035 phba->fcf.fcf_flag &=
2036 ~(FCF_REDISC_FOV | FCF_REDISC_RRU);
1937 spin_unlock_irq(&phba->hbalock); 2037 spin_unlock_irq(&phba->hbalock);
1938 /* 2038 /*
1939 * Set up the initial registered FCF index for FLOGI 2039 * Set up the initial registered FCF index for FLOGI
@@ -1951,15 +2051,42 @@ read_next_fcf:
1951 if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) || 2051 if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) ||
1952 (phba->fcf.fcf_flag & FCF_REDISC_PEND)) 2052 (phba->fcf.fcf_flag & FCF_REDISC_PEND))
1953 return; 2053 return;
2054
2055 if (phba->fcf.fcf_flag & FCF_IN_USE) {
2056 /*
2057 * In case the current in-use FCF record no
2058 * longer existed during FCF discovery that
2059 * was not triggered by fast FCF failover
2060 * process, treat it as fast FCF failover.
2061 */
2062 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2063 "2841 In-use FCF record (x%x) "
2064 "not reported, entering fast "
2065 "FCF failover mode scanning.\n",
2066 phba->fcf.current_rec.fcf_indx);
2067 spin_lock_irq(&phba->hbalock);
2068 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2069 spin_unlock_irq(&phba->hbalock);
2070 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2071 lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2072 LPFC_FCOE_FCF_GET_FIRST);
2073 return;
2074 }
2075
1954 /* 2076 /*
1955 * Otherwise, initial scan or post linkdown rescan, 2077 * Otherwise, initial scan or post linkdown rescan,
1956 * register with the best FCF record found so far 2078 * register with the best FCF record found so far
1957 * through the FCF scanning process. 2079 * through the FCF scanning process.
1958 */ 2080 */
1959 2081
1960 /* mark the initial FCF discovery completed */ 2082 /*
2083 * Mark the initial FCF discovery completed and
2084 * the start of the first round of the roundrobin
2085 * FCF failover.
2086 */
1961 spin_lock_irq(&phba->hbalock); 2087 spin_lock_irq(&phba->hbalock);
1962 phba->fcf.fcf_flag &= ~FCF_INIT_DISC; 2088 phba->fcf.fcf_flag &=
2089 ~(FCF_INIT_DISC | FCF_REDISC_RRU);
1963 spin_unlock_irq(&phba->hbalock); 2090 spin_unlock_irq(&phba->hbalock);
1964 /* 2091 /*
1965 * Set up the initial registered FCF index for FLOGI 2092 * Set up the initial registered FCF index for FLOGI
@@ -2033,6 +2160,11 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2033 next_fcf_index); 2160 next_fcf_index);
2034 2161
2035 /* Upload new FCF record to the failover FCF record */ 2162 /* Upload new FCF record to the failover FCF record */
2163 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2164 "2834 Update the current FCF record (x%x) "
2165 "with the next FCF record (x%x)\n",
2166 phba->fcf.failover_rec.fcf_indx,
2167 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record));
2036 spin_lock_irq(&phba->hbalock); 2168 spin_lock_irq(&phba->hbalock);
2037 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec, 2169 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
2038 new_fcf_record, addr_mode, vlan_id, 2170 new_fcf_record, addr_mode, vlan_id,
@@ -2050,7 +2182,7 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2050 2182
2051 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2183 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2052 "2783 FLOGI round robin FCF failover from FCF " 2184 "2783 FLOGI round robin FCF failover from FCF "
2053 "(index:x%x) to FCF (index:x%x).\n", 2185 "(x%x) to FCF (x%x).\n",
2054 current_fcf_index, 2186 current_fcf_index,
2055 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); 2187 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record));
2056 2188
@@ -2084,7 +2216,7 @@ lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2084 goto out; 2216 goto out;
2085 2217
2086 /* If FCF discovery period is over, no need to proceed */ 2218 /* If FCF discovery period is over, no need to proceed */
2087 if (phba->fcf.fcf_flag & FCF_DISCOVERY) 2219 if (!(phba->fcf.fcf_flag & FCF_DISCOVERY))
2088 goto out; 2220 goto out;
2089 2221
2090 /* Parse the FCF record from the non-embedded mailbox command */ 2222 /* Parse the FCF record from the non-embedded mailbox command */
@@ -2715,11 +2847,35 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2715 struct lpfc_vport *vport = pmb->vport; 2847 struct lpfc_vport *vport = pmb->vport;
2716 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 2848 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2717 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 2849 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2850 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2718 2851
2719 pmb->context1 = NULL; 2852 pmb->context1 = NULL;
2720 2853
2721 /* Good status, call state machine */ 2854 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
2722 lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN); 2855 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
2856
2857 if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
2858 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
2859 /* We rcvd a rscn after issuing this
2860 * mbox reg login, we may have cycled
2861 * back through the state and be
2862 * back at reg login state so this
2863 * mbox needs to be ignored becase
2864 * there is another reg login in
2865 * proccess.
2866 */
2867 spin_lock_irq(shost->host_lock);
2868 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
2869 spin_unlock_irq(shost->host_lock);
2870 if (phba->sli_rev == LPFC_SLI_REV4)
2871 lpfc_sli4_free_rpi(phba,
2872 pmb->u.mb.un.varRegLogin.rpi);
2873
2874 } else
2875 /* Good status, call state machine */
2876 lpfc_disc_state_machine(vport, ndlp, pmb,
2877 NLP_EVT_CMPL_REG_LOGIN);
2878
2723 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2879 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2724 kfree(mp); 2880 kfree(mp);
2725 mempool_free(pmb, phba->mbox_mem_pool); 2881 mempool_free(pmb, phba->mbox_mem_pool);
@@ -3427,7 +3583,7 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
3427 kref_init(&ndlp->kref); 3583 kref_init(&ndlp->kref);
3428 NLP_INT_NODE_ACT(ndlp); 3584 NLP_INT_NODE_ACT(ndlp);
3429 atomic_set(&ndlp->cmd_pending, 0); 3585 atomic_set(&ndlp->cmd_pending, 0);
3430 ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; 3586 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
3431} 3587}
3432 3588
3433struct lpfc_nodelist * 3589struct lpfc_nodelist *
@@ -3700,6 +3856,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3700 mempool_free(mbox, phba->mbox_mem_pool); 3856 mempool_free(mbox, phba->mbox_mem_pool);
3701 } 3857 }
3702 lpfc_no_rpi(phba, ndlp); 3858 lpfc_no_rpi(phba, ndlp);
3859
3703 ndlp->nlp_rpi = 0; 3860 ndlp->nlp_rpi = 0;
3704 ndlp->nlp_flag &= ~NLP_RPI_VALID; 3861 ndlp->nlp_flag &= ~NLP_RPI_VALID;
3705 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 3862 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
@@ -3842,6 +3999,9 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3842 kfree(mp); 3999 kfree(mp);
3843 } 4000 }
3844 list_del(&mb->list); 4001 list_del(&mb->list);
4002 if (phba->sli_rev == LPFC_SLI_REV4)
4003 lpfc_sli4_free_rpi(phba,
4004 mb->u.mb.un.varRegLogin.rpi);
3845 mempool_free(mb, phba->mbox_mem_pool); 4005 mempool_free(mb, phba->mbox_mem_pool);
3846 /* We shall not invoke the lpfc_nlp_put to decrement 4006 /* We shall not invoke the lpfc_nlp_put to decrement
3847 * the ndlp reference count as we are in the process 4007 * the ndlp reference count as we are in the process
@@ -3883,6 +4043,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3883 4043
3884 lpfc_cancel_retry_delay_tmo(vport, ndlp); 4044 lpfc_cancel_retry_delay_tmo(vport, ndlp);
3885 if ((ndlp->nlp_flag & NLP_DEFER_RM) && 4045 if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
4046 !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
3886 !(ndlp->nlp_flag & NLP_RPI_VALID)) { 4047 !(ndlp->nlp_flag & NLP_RPI_VALID)) {
3887 /* For this case we need to cleanup the default rpi 4048 /* For this case we need to cleanup the default rpi
3888 * allocated by the firmware. 4049 * allocated by the firmware.
@@ -5180,13 +5341,16 @@ void
5180lpfc_unregister_unused_fcf(struct lpfc_hba *phba) 5341lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
5181{ 5342{
5182 /* 5343 /*
5183 * If HBA is not running in FIP mode or if HBA does not support 5344 * If HBA is not running in FIP mode, if HBA does not support
5184 * FCoE or if FCF is not registered, do nothing. 5345 * FCoE, if FCF discovery is ongoing, or if FCF has not been
5346 * registered, do nothing.
5185 */ 5347 */
5186 spin_lock_irq(&phba->hbalock); 5348 spin_lock_irq(&phba->hbalock);
5187 if (!(phba->hba_flag & HBA_FCOE_SUPPORT) || 5349 if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
5188 !(phba->fcf.fcf_flag & FCF_REGISTERED) || 5350 !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
5189 !(phba->hba_flag & HBA_FIP_SUPPORT)) { 5351 !(phba->hba_flag & HBA_FIP_SUPPORT) ||
5352 (phba->fcf.fcf_flag & FCF_DISCOVERY) ||
5353 (phba->pport->port_state == LPFC_FLOGI)) {
5190 spin_unlock_irq(&phba->hbalock); 5354 spin_unlock_irq(&phba->hbalock);
5191 return; 5355 return;
5192 } 5356 }
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index e654d01dad24..f5dbf2be3eab 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1170,6 +1170,7 @@ typedef struct {
1170#define PCI_DEVICE_ID_TIGERSHARK 0x0704 1170#define PCI_DEVICE_ID_TIGERSHARK 0x0704
1171#define PCI_DEVICE_ID_TOMCAT 0x0714 1171#define PCI_DEVICE_ID_TOMCAT 0x0714
1172#define PCI_DEVICE_ID_FALCON 0xf180 1172#define PCI_DEVICE_ID_FALCON 0xf180
1173#define PCI_DEVICE_ID_BALIUS 0xe131
1173 1174
1174#define JEDEC_ID_ADDRESS 0x0080001c 1175#define JEDEC_ID_ADDRESS 0x0080001c
1175#define FIREFLY_JEDEC_ID 0x1ACC 1176#define FIREFLY_JEDEC_ID 0x1ACC
@@ -3014,18 +3015,10 @@ struct sli3_pgp {
3014 uint32_t hbq_get[16]; 3015 uint32_t hbq_get[16];
3015}; 3016};
3016 3017
3017struct sli3_inb_pgp {
3018 uint32_t ha_copy;
3019 uint32_t counter;
3020 struct lpfc_pgp port[MAX_RINGS];
3021 uint32_t hbq_get[16];
3022};
3023
3024union sli_var { 3018union sli_var {
3025 struct sli2_desc s2; 3019 struct sli2_desc s2;
3026 struct sli3_desc s3; 3020 struct sli3_desc s3;
3027 struct sli3_pgp s3_pgp; 3021 struct sli3_pgp s3_pgp;
3028 struct sli3_inb_pgp s3_inb_pgp;
3029}; 3022};
3030 3023
3031typedef struct { 3024typedef struct {
@@ -3132,6 +3125,14 @@ typedef struct {
3132#define IOERR_BUFFER_SHORTAGE 0x28 3125#define IOERR_BUFFER_SHORTAGE 0x28
3133#define IOERR_DEFAULT 0x29 3126#define IOERR_DEFAULT 0x29
3134#define IOERR_CNT 0x2A 3127#define IOERR_CNT 0x2A
3128#define IOERR_SLER_FAILURE 0x46
3129#define IOERR_SLER_CMD_RCV_FAILURE 0x47
3130#define IOERR_SLER_REC_RJT_ERR 0x48
3131#define IOERR_SLER_REC_SRR_RETRY_ERR 0x49
3132#define IOERR_SLER_SRR_RJT_ERR 0x4A
3133#define IOERR_SLER_RRQ_RJT_ERR 0x4C
3134#define IOERR_SLER_RRQ_RETRY_ERR 0x4D
3135#define IOERR_SLER_ABTS_ERR 0x4E
3135 3136
3136#define IOERR_DRVR_MASK 0x100 3137#define IOERR_DRVR_MASK 0x100
3137#define IOERR_SLI_DOWN 0x101 /* ulpStatus - Driver defined */ 3138#define IOERR_SLI_DOWN 0x101 /* ulpStatus - Driver defined */
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index cd9697edf860..2786ee3b605d 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -621,6 +621,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
621/** 621/**
622 * lpfc_hba_init_link - Initialize the FC link 622 * lpfc_hba_init_link - Initialize the FC link
623 * @phba: pointer to lpfc hba data structure. 623 * @phba: pointer to lpfc hba data structure.
624 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
624 * 625 *
625 * This routine will issue the INIT_LINK mailbox command call. 626 * This routine will issue the INIT_LINK mailbox command call.
626 * It is available to other drivers through the lpfc_hba data 627 * It is available to other drivers through the lpfc_hba data
@@ -632,7 +633,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
632 * Any other value - error 633 * Any other value - error
633 **/ 634 **/
634int 635int
635lpfc_hba_init_link(struct lpfc_hba *phba) 636lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
636{ 637{
637 struct lpfc_vport *vport = phba->pport; 638 struct lpfc_vport *vport = phba->pport;
638 LPFC_MBOXQ_t *pmb; 639 LPFC_MBOXQ_t *pmb;
@@ -651,7 +652,7 @@ lpfc_hba_init_link(struct lpfc_hba *phba)
651 phba->cfg_link_speed); 652 phba->cfg_link_speed);
652 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 653 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
653 lpfc_set_loopback_flag(phba); 654 lpfc_set_loopback_flag(phba);
654 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 655 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
655 if (rc != MBX_SUCCESS) { 656 if (rc != MBX_SUCCESS) {
656 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 657 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
657 "0498 Adapter failed to init, mbxCmd x%x " 658 "0498 Adapter failed to init, mbxCmd x%x "
@@ -664,17 +665,21 @@ lpfc_hba_init_link(struct lpfc_hba *phba)
664 writel(0xffffffff, phba->HAregaddr); 665 writel(0xffffffff, phba->HAregaddr);
665 readl(phba->HAregaddr); /* flush */ 666 readl(phba->HAregaddr); /* flush */
666 phba->link_state = LPFC_HBA_ERROR; 667 phba->link_state = LPFC_HBA_ERROR;
667 if (rc != MBX_BUSY) 668 if (rc != MBX_BUSY || flag == MBX_POLL)
668 mempool_free(pmb, phba->mbox_mem_pool); 669 mempool_free(pmb, phba->mbox_mem_pool);
669 return -EIO; 670 return -EIO;
670 } 671 }
671 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 672 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
673 if (flag == MBX_POLL)
674 mempool_free(pmb, phba->mbox_mem_pool);
672 675
673 return 0; 676 return 0;
674} 677}
675 678
676/** 679/**
677 * lpfc_hba_down_link - this routine downs the FC link 680 * lpfc_hba_down_link - this routine downs the FC link
681 * @phba: pointer to lpfc hba data structure.
682 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
678 * 683 *
679 * This routine will issue the DOWN_LINK mailbox command call. 684 * This routine will issue the DOWN_LINK mailbox command call.
680 * It is available to other drivers through the lpfc_hba data 685 * It is available to other drivers through the lpfc_hba data
@@ -685,7 +690,7 @@ lpfc_hba_init_link(struct lpfc_hba *phba)
685 * Any other value - error 690 * Any other value - error
686 **/ 691 **/
687int 692int
688lpfc_hba_down_link(struct lpfc_hba *phba) 693lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
689{ 694{
690 LPFC_MBOXQ_t *pmb; 695 LPFC_MBOXQ_t *pmb;
691 int rc; 696 int rc;
@@ -701,7 +706,7 @@ lpfc_hba_down_link(struct lpfc_hba *phba)
701 "0491 Adapter Link is disabled.\n"); 706 "0491 Adapter Link is disabled.\n");
702 lpfc_down_link(phba, pmb); 707 lpfc_down_link(phba, pmb);
703 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 708 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
704 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 709 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
705 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 710 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
706 lpfc_printf_log(phba, 711 lpfc_printf_log(phba,
707 KERN_ERR, LOG_INIT, 712 KERN_ERR, LOG_INIT,
@@ -711,6 +716,9 @@ lpfc_hba_down_link(struct lpfc_hba *phba)
711 mempool_free(pmb, phba->mbox_mem_pool); 716 mempool_free(pmb, phba->mbox_mem_pool);
712 return -EIO; 717 return -EIO;
713 } 718 }
719 if (flag == MBX_POLL)
720 mempool_free(pmb, phba->mbox_mem_pool);
721
714 return 0; 722 return 0;
715} 723}
716 724
@@ -1818,6 +1826,10 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1818 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 1826 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
1819 "EmulexSecure Fibre"}; 1827 "EmulexSecure Fibre"};
1820 break; 1828 break;
1829 case PCI_DEVICE_ID_BALIUS:
1830 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
1831 "Fibre Channel Adapter"};
1832 break;
1821 default: 1833 default:
1822 m = (typeof(m)){"Unknown", "", ""}; 1834 m = (typeof(m)){"Unknown", "", ""};
1823 break; 1835 break;
@@ -2279,10 +2291,32 @@ static void
2279lpfc_block_mgmt_io(struct lpfc_hba * phba) 2291lpfc_block_mgmt_io(struct lpfc_hba * phba)
2280{ 2292{
2281 unsigned long iflag; 2293 unsigned long iflag;
2294 uint8_t actcmd = MBX_HEARTBEAT;
2295 unsigned long timeout;
2296
2282 2297
2283 spin_lock_irqsave(&phba->hbalock, iflag); 2298 spin_lock_irqsave(&phba->hbalock, iflag);
2284 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2299 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2300 if (phba->sli.mbox_active)
2301 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2285 spin_unlock_irqrestore(&phba->hbalock, iflag); 2302 spin_unlock_irqrestore(&phba->hbalock, iflag);
2303 /* Determine how long we might wait for the active mailbox
2304 * command to be gracefully completed by firmware.
2305 */
2306 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) +
2307 jiffies;
2308 /* Wait for the outstnading mailbox command to complete */
2309 while (phba->sli.mbox_active) {
2310 /* Check active mailbox complete status every 2ms */
2311 msleep(2);
2312 if (time_after(jiffies, timeout)) {
2313 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2314 "2813 Mgmt IO is Blocked %x "
2315 "- mbox cmd %x still active\n",
2316 phba->sli.sli_flag, actcmd);
2317 break;
2318 }
2319 }
2286} 2320}
2287 2321
2288/** 2322/**
@@ -3323,22 +3357,14 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3323 "evt_tag:x%x, fcf_index:x%x\n", 3357 "evt_tag:x%x, fcf_index:x%x\n",
3324 acqe_fcoe->event_tag, 3358 acqe_fcoe->event_tag,
3325 acqe_fcoe->index); 3359 acqe_fcoe->index);
3360 /* If the FCF discovery is in progress, do nothing. */
3326 spin_lock_irq(&phba->hbalock); 3361 spin_lock_irq(&phba->hbalock);
3327 if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) || 3362 if (phba->hba_flag & FCF_DISC_INPROGRESS) {
3328 (phba->hba_flag & FCF_DISC_INPROGRESS)) {
3329 /*
3330 * If the current FCF is in discovered state or
3331 * FCF discovery is in progress, do nothing.
3332 */
3333 spin_unlock_irq(&phba->hbalock); 3363 spin_unlock_irq(&phba->hbalock);
3334 break; 3364 break;
3335 } 3365 }
3336 3366 /* If fast FCF failover rescan event is pending, do nothing */
3337 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 3367 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3338 /*
3339 * If fast FCF failover rescan event is pending,
3340 * do nothing.
3341 */
3342 spin_unlock_irq(&phba->hbalock); 3368 spin_unlock_irq(&phba->hbalock);
3343 break; 3369 break;
3344 } 3370 }
@@ -3359,7 +3385,13 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3359 acqe_fcoe->index); 3385 acqe_fcoe->index);
3360 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index); 3386 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
3361 } 3387 }
3362 3388 /* If the FCF has been in discovered state, do nothing. */
3389 spin_lock_irq(&phba->hbalock);
3390 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
3391 spin_unlock_irq(&phba->hbalock);
3392 break;
3393 }
3394 spin_unlock_irq(&phba->hbalock);
3363 /* Otherwise, scan the entire FCF table and re-discover SAN */ 3395 /* Otherwise, scan the entire FCF table and re-discover SAN */
3364 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3396 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3365 "2770 Start FCF table scan due to new FCF " 3397 "2770 Start FCF table scan due to new FCF "
@@ -4885,6 +4917,7 @@ lpfc_create_shost(struct lpfc_hba *phba)
4885 phba->fc_altov = FF_DEF_ALTOV; 4917 phba->fc_altov = FF_DEF_ALTOV;
4886 phba->fc_arbtov = FF_DEF_ARBTOV; 4918 phba->fc_arbtov = FF_DEF_ARBTOV;
4887 4919
4920 atomic_set(&phba->sdev_cnt, 0);
4888 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 4921 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
4889 if (!vport) 4922 if (!vport)
4890 return -ENODEV; 4923 return -ENODEV;
@@ -5533,9 +5566,12 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5533 mempool_free(pmb, phba->mbox_mem_pool); 5566 mempool_free(pmb, phba->mbox_mem_pool);
5534 5567
5535 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 5568 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
5536 if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri)) 5569 if (phba->cfg_hba_queue_depth >
5570 (phba->sli4_hba.max_cfg_param.max_xri -
5571 lpfc_sli4_get_els_iocb_cnt(phba)))
5537 phba->cfg_hba_queue_depth = 5572 phba->cfg_hba_queue_depth =
5538 phba->sli4_hba.max_cfg_param.max_xri; 5573 phba->sli4_hba.max_cfg_param.max_xri -
5574 lpfc_sli4_get_els_iocb_cnt(phba);
5539 return rc; 5575 return rc;
5540} 5576}
5541 5577
@@ -6993,22 +7029,28 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba)
6993static int 7029static int
6994lpfc_sli4_enable_msix(struct lpfc_hba *phba) 7030lpfc_sli4_enable_msix(struct lpfc_hba *phba)
6995{ 7031{
6996 int rc, index; 7032 int vectors, rc, index;
6997 7033
6998 /* Set up MSI-X multi-message vectors */ 7034 /* Set up MSI-X multi-message vectors */
6999 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 7035 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
7000 phba->sli4_hba.msix_entries[index].entry = index; 7036 phba->sli4_hba.msix_entries[index].entry = index;
7001 7037
7002 /* Configure MSI-X capability structure */ 7038 /* Configure MSI-X capability structure */
7039 vectors = phba->sli4_hba.cfg_eqn;
7040enable_msix_vectors:
7003 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, 7041 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
7004 phba->sli4_hba.cfg_eqn); 7042 vectors);
7005 if (rc) { 7043 if (rc > 1) {
7044 vectors = rc;
7045 goto enable_msix_vectors;
7046 } else if (rc) {
7006 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7047 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7007 "0484 PCI enable MSI-X failed (%d)\n", rc); 7048 "0484 PCI enable MSI-X failed (%d)\n", rc);
7008 goto msi_fail_out; 7049 goto msi_fail_out;
7009 } 7050 }
7051
7010 /* Log MSI-X vector assignment */ 7052 /* Log MSI-X vector assignment */
7011 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 7053 for (index = 0; index < vectors; index++)
7012 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7054 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7013 "0489 MSI-X entry[%d]: vector=x%x " 7055 "0489 MSI-X entry[%d]: vector=x%x "
7014 "message=%d\n", index, 7056 "message=%d\n", index,
@@ -7030,7 +7072,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
7030 } 7072 }
7031 7073
7032 /* The rest of the vector(s) are associated to fast-path handler(s) */ 7074 /* The rest of the vector(s) are associated to fast-path handler(s) */
7033 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) { 7075 for (index = 1; index < vectors; index++) {
7034 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; 7076 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
7035 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; 7077 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
7036 rc = request_irq(phba->sli4_hba.msix_entries[index].vector, 7078 rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
@@ -7044,6 +7086,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
7044 goto cfg_fail_out; 7086 goto cfg_fail_out;
7045 } 7087 }
7046 } 7088 }
7089 phba->sli4_hba.msix_vec_nr = vectors;
7047 7090
7048 return rc; 7091 return rc;
7049 7092
@@ -7077,9 +7120,10 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
7077 /* Free up MSI-X multi-message vectors */ 7120 /* Free up MSI-X multi-message vectors */
7078 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 7121 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7079 7122
7080 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) 7123 for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++)
7081 free_irq(phba->sli4_hba.msix_entries[index].vector, 7124 free_irq(phba->sli4_hba.msix_entries[index].vector,
7082 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7125 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7126
7083 /* Disable MSI-X */ 7127 /* Disable MSI-X */
7084 pci_disable_msix(phba->pcidev); 7128 pci_disable_msix(phba->pcidev);
7085 7129
@@ -7121,6 +7165,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
7121 pci_disable_msi(phba->pcidev); 7165 pci_disable_msi(phba->pcidev);
7122 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7166 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7123 "0490 MSI request_irq failed (%d)\n", rc); 7167 "0490 MSI request_irq failed (%d)\n", rc);
7168 return rc;
7124 } 7169 }
7125 7170
7126 for (index = 0; index < phba->cfg_fcp_eq_count; index++) { 7171 for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
@@ -7128,7 +7173,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
7128 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 7173 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7129 } 7174 }
7130 7175
7131 return rc; 7176 return 0;
7132} 7177}
7133 7178
7134/** 7179/**
@@ -7839,6 +7884,9 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
7839 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7884 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7840 "2710 PCI channel disable preparing for reset\n"); 7885 "2710 PCI channel disable preparing for reset\n");
7841 7886
7887 /* Block any management I/Os to the device */
7888 lpfc_block_mgmt_io(phba);
7889
7842 /* Block all SCSI devices' I/Os on the host */ 7890 /* Block all SCSI devices' I/Os on the host */
7843 lpfc_scsi_dev_block(phba); 7891 lpfc_scsi_dev_block(phba);
7844 7892
@@ -7848,6 +7896,7 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
7848 /* Disable interrupt and pci device */ 7896 /* Disable interrupt and pci device */
7849 lpfc_sli_disable_intr(phba); 7897 lpfc_sli_disable_intr(phba);
7850 pci_disable_device(phba->pcidev); 7898 pci_disable_device(phba->pcidev);
7899
7851 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 7900 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
7852 lpfc_sli_flush_fcp_rings(phba); 7901 lpfc_sli_flush_fcp_rings(phba);
7853} 7902}
@@ -7861,7 +7910,7 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
7861 * pending I/Os. 7910 * pending I/Os.
7862 **/ 7911 **/
7863static void 7912static void
7864lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba) 7913lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
7865{ 7914{
7866 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7915 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7867 "2711 PCI channel permanent disable for failure\n"); 7916 "2711 PCI channel permanent disable for failure\n");
@@ -7910,7 +7959,7 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
7910 return PCI_ERS_RESULT_NEED_RESET; 7959 return PCI_ERS_RESULT_NEED_RESET;
7911 case pci_channel_io_perm_failure: 7960 case pci_channel_io_perm_failure:
7912 /* Permanent failure, prepare for device down */ 7961 /* Permanent failure, prepare for device down */
7913 lpfc_prep_dev_for_perm_failure(phba); 7962 lpfc_sli_prep_dev_for_perm_failure(phba);
7914 return PCI_ERS_RESULT_DISCONNECT; 7963 return PCI_ERS_RESULT_DISCONNECT;
7915 default: 7964 default:
7916 /* Unknown state, prepare and request slot reset */ 7965 /* Unknown state, prepare and request slot reset */
@@ -7979,7 +8028,8 @@ lpfc_io_slot_reset_s3(struct pci_dev *pdev)
7979 } else 8028 } else
7980 phba->intr_mode = intr_mode; 8029 phba->intr_mode = intr_mode;
7981 8030
7982 /* Take device offline; this will perform cleanup */ 8031 /* Take device offline, it will perform cleanup */
8032 lpfc_offline_prep(phba);
7983 lpfc_offline(phba); 8033 lpfc_offline(phba);
7984 lpfc_sli_brdrestart(phba); 8034 lpfc_sli_brdrestart(phba);
7985 8035
@@ -8110,8 +8160,12 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8110 } 8160 }
8111 8161
8112 /* Initialize and populate the iocb list per host */ 8162 /* Initialize and populate the iocb list per host */
8113 error = lpfc_init_iocb_list(phba, 8163
8114 phba->sli4_hba.max_cfg_param.max_xri); 8164 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8165 "2821 initialize iocb list %d.\n",
8166 phba->cfg_iocb_cnt*1024);
8167 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
8168
8115 if (error) { 8169 if (error) {
8116 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8170 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8117 "1413 Failed to initialize iocb list.\n"); 8171 "1413 Failed to initialize iocb list.\n");
@@ -8160,6 +8214,8 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8160 /* Default to single FCP EQ for non-MSI-X */ 8214 /* Default to single FCP EQ for non-MSI-X */
8161 if (phba->intr_type != MSIX) 8215 if (phba->intr_type != MSIX)
8162 phba->cfg_fcp_eq_count = 1; 8216 phba->cfg_fcp_eq_count = 1;
8217 else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count)
8218 phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
8163 /* Set up SLI-4 HBA */ 8219 /* Set up SLI-4 HBA */
8164 if (lpfc_sli4_hba_setup(phba)) { 8220 if (lpfc_sli4_hba_setup(phba)) {
8165 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8221 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8321,7 +8377,7 @@ lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
8321 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8377 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8322 8378
8323 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8379 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8324 "0298 PCI device Power Management suspend.\n"); 8380 "2843 PCI device Power Management suspend.\n");
8325 8381
8326 /* Bring down the device */ 8382 /* Bring down the device */
8327 lpfc_offline_prep(phba); 8383 lpfc_offline_prep(phba);
@@ -8412,6 +8468,84 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev)
8412} 8468}
8413 8469
8414/** 8470/**
8471 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
8472 * @phba: pointer to lpfc hba data structure.
8473 *
8474 * This routine is called to prepare the SLI4 device for PCI slot recover. It
8475 * aborts all the outstanding SCSI I/Os to the pci device.
8476 **/
8477static void
8478lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
8479{
8480 struct lpfc_sli *psli = &phba->sli;
8481 struct lpfc_sli_ring *pring;
8482
8483 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8484 "2828 PCI channel I/O abort preparing for recovery\n");
8485 /*
8486 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
8487 * and let the SCSI mid-layer to retry them to recover.
8488 */
8489 pring = &psli->ring[psli->fcp_ring];
8490 lpfc_sli_abort_iocb_ring(phba, pring);
8491}
8492
8493/**
8494 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
8495 * @phba: pointer to lpfc hba data structure.
8496 *
8497 * This routine is called to prepare the SLI4 device for PCI slot reset. It
8498 * disables the device interrupt and pci device, and aborts the internal FCP
8499 * pending I/Os.
8500 **/
8501static void
8502lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
8503{
8504 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8505 "2826 PCI channel disable preparing for reset\n");
8506
8507 /* Block any management I/Os to the device */
8508 lpfc_block_mgmt_io(phba);
8509
8510 /* Block all SCSI devices' I/Os on the host */
8511 lpfc_scsi_dev_block(phba);
8512
8513 /* stop all timers */
8514 lpfc_stop_hba_timers(phba);
8515
8516 /* Disable interrupt and pci device */
8517 lpfc_sli4_disable_intr(phba);
8518 pci_disable_device(phba->pcidev);
8519
8520 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
8521 lpfc_sli_flush_fcp_rings(phba);
8522}
8523
8524/**
8525 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
8526 * @phba: pointer to lpfc hba data structure.
8527 *
8528 * This routine is called to prepare the SLI4 device for PCI slot permanently
8529 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
8530 * pending I/Os.
8531 **/
8532static void
8533lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
8534{
8535 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8536 "2827 PCI channel permanent disable for failure\n");
8537
8538 /* Block all SCSI devices' I/Os on the host */
8539 lpfc_scsi_dev_block(phba);
8540
8541 /* stop all timers */
8542 lpfc_stop_hba_timers(phba);
8543
8544 /* Clean up all driver's outstanding SCSI I/Os */
8545 lpfc_sli_flush_fcp_rings(phba);
8546}
8547
8548/**
8415 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 8549 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
8416 * @pdev: pointer to PCI device. 8550 * @pdev: pointer to PCI device.
8417 * @state: the current PCI connection state. 8551 * @state: the current PCI connection state.
@@ -8430,7 +8564,29 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev)
8430static pci_ers_result_t 8564static pci_ers_result_t
8431lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 8565lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
8432{ 8566{
8433 return PCI_ERS_RESULT_NEED_RESET; 8567 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8568 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8569
8570 switch (state) {
8571 case pci_channel_io_normal:
8572 /* Non-fatal error, prepare for recovery */
8573 lpfc_sli4_prep_dev_for_recover(phba);
8574 return PCI_ERS_RESULT_CAN_RECOVER;
8575 case pci_channel_io_frozen:
8576 /* Fatal error, prepare for slot reset */
8577 lpfc_sli4_prep_dev_for_reset(phba);
8578 return PCI_ERS_RESULT_NEED_RESET;
8579 case pci_channel_io_perm_failure:
8580 /* Permanent failure, prepare for device down */
8581 lpfc_sli4_prep_dev_for_perm_failure(phba);
8582 return PCI_ERS_RESULT_DISCONNECT;
8583 default:
8584 /* Unknown state, prepare and request slot reset */
8585 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8586 "2825 Unknown PCI error state: x%x\n", state);
8587 lpfc_sli4_prep_dev_for_reset(phba);
8588 return PCI_ERS_RESULT_NEED_RESET;
8589 }
8434} 8590}
8435 8591
8436/** 8592/**
@@ -8454,6 +8610,39 @@ lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
8454static pci_ers_result_t 8610static pci_ers_result_t
8455lpfc_io_slot_reset_s4(struct pci_dev *pdev) 8611lpfc_io_slot_reset_s4(struct pci_dev *pdev)
8456{ 8612{
8613 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8614 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8615 struct lpfc_sli *psli = &phba->sli;
8616 uint32_t intr_mode;
8617
8618 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
8619 if (pci_enable_device_mem(pdev)) {
8620 printk(KERN_ERR "lpfc: Cannot re-enable "
8621 "PCI device after reset.\n");
8622 return PCI_ERS_RESULT_DISCONNECT;
8623 }
8624
8625 pci_restore_state(pdev);
8626 if (pdev->is_busmaster)
8627 pci_set_master(pdev);
8628
8629 spin_lock_irq(&phba->hbalock);
8630 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8631 spin_unlock_irq(&phba->hbalock);
8632
8633 /* Configure and enable interrupt */
8634 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
8635 if (intr_mode == LPFC_INTR_ERROR) {
8636 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8637 "2824 Cannot re-enable interrupt after "
8638 "slot reset.\n");
8639 return PCI_ERS_RESULT_DISCONNECT;
8640 } else
8641 phba->intr_mode = intr_mode;
8642
8643 /* Log the current active interrupt mode */
8644 lpfc_log_intr_mode(phba, phba->intr_mode);
8645
8457 return PCI_ERS_RESULT_RECOVERED; 8646 return PCI_ERS_RESULT_RECOVERED;
8458} 8647}
8459 8648
@@ -8470,7 +8659,27 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
8470static void 8659static void
8471lpfc_io_resume_s4(struct pci_dev *pdev) 8660lpfc_io_resume_s4(struct pci_dev *pdev)
8472{ 8661{
8473 return; 8662 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8663 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8664
8665 /*
8666 * In case of slot reset, as function reset is performed through
8667 * mailbox command which needs DMA to be enabled, this operation
8668 * has to be moved to the io resume phase. Taking device offline
8669 * will perform the necessary cleanup.
8670 */
8671 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
8672 /* Perform device reset */
8673 lpfc_offline_prep(phba);
8674 lpfc_offline(phba);
8675 lpfc_sli_brdrestart(phba);
8676 /* Bring the device back online */
8677 lpfc_online(phba);
8678 }
8679
8680 /* Clean up Advanced Error Reporting (AER) if needed */
8681 if (phba->hba_flag & HBA_AER_ENABLED)
8682 pci_cleanup_aer_uncorrect_error_status(pdev);
8474} 8683}
8475 8684
8476/** 8685/**
@@ -8802,6 +9011,8 @@ static struct pci_device_id lpfc_id_table[] = {
8802 PCI_ANY_ID, PCI_ANY_ID, }, 9011 PCI_ANY_ID, PCI_ANY_ID, },
8803 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, 9012 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
8804 PCI_ANY_ID, PCI_ANY_ID, }, 9013 PCI_ANY_ID, PCI_ANY_ID, },
9014 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
9015 PCI_ANY_ID, PCI_ANY_ID, },
8805 { 0 } 9016 { 0 }
8806}; 9017};
8807 9018
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index e84dc33ca201..9c2c7c7140c7 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -955,6 +955,26 @@ lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
955 return; 955 return;
956} 956}
957 957
958void
959lpfc_sli4_swap_str(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
960{
961 MAILBOX_t *mb = &pmb->u.mb;
962 struct lpfc_mqe *mqe;
963
964 switch (mb->mbxCommand) {
965 case MBX_READ_REV:
966 mqe = &pmb->u.mqe;
967 lpfc_sli_pcimem_bcopy(mqe->un.read_rev.fw_name,
968 mqe->un.read_rev.fw_name, 16);
969 lpfc_sli_pcimem_bcopy(mqe->un.read_rev.ulp_fw_name,
970 mqe->un.read_rev.ulp_fw_name, 16);
971 break;
972 default:
973 break;
974 }
975 return;
976}
977
958/** 978/**
959 * lpfc_build_hbq_profile2 - Set up the HBQ Selection Profile 2 979 * lpfc_build_hbq_profile2 - Set up the HBQ Selection Profile 2
960 * @hbqmb: pointer to the HBQ configuration data structure in mailbox command. 980 * @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
@@ -1199,7 +1219,6 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1199 mb->un.varCfgPort.cdss = 1; /* Configure Security */ 1219 mb->un.varCfgPort.cdss = 1; /* Configure Security */
1200 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ 1220 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
1201 mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */ 1221 mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
1202 mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */
1203 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count(); 1222 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
1204 if (phba->max_vpi && phba->cfg_enable_npiv && 1223 if (phba->max_vpi && phba->cfg_enable_npiv &&
1205 phba->vpd.sli3Feat.cmv) { 1224 phba->vpd.sli3Feat.cmv) {
@@ -2026,7 +2045,7 @@ lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2026 phba->fcf.current_rec.fcf_indx); 2045 phba->fcf.current_rec.fcf_indx);
2027 /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */ 2046 /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
2028 bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3); 2047 bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3);
2029 if (phba->fcf.current_rec.vlan_id != 0xFFFF) { 2048 if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) {
2030 bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1); 2049 bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
2031 bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, 2050 bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi,
2032 phba->fcf.current_rec.vlan_id); 2051 phba->fcf.current_rec.vlan_id);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index b90820a699fd..bccc9c66fa37 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -190,6 +190,7 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
190} 190}
191 191
192 192
193
193/* 194/*
194 * Free resources / clean up outstanding I/Os 195 * Free resources / clean up outstanding I/Os
195 * associated with a LPFC_NODELIST entry. This 196 * associated with a LPFC_NODELIST entry. This
@@ -199,13 +200,15 @@ int
199lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 200lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
200{ 201{
201 LIST_HEAD(completions); 202 LIST_HEAD(completions);
203 LIST_HEAD(txcmplq_completions);
204 LIST_HEAD(abort_list);
202 struct lpfc_sli *psli = &phba->sli; 205 struct lpfc_sli *psli = &phba->sli;
203 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; 206 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
204 struct lpfc_iocbq *iocb, *next_iocb; 207 struct lpfc_iocbq *iocb, *next_iocb;
205 208
206 /* Abort outstanding I/O on NPort <nlp_DID> */ 209 /* Abort outstanding I/O on NPort <nlp_DID> */
207 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, 210 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
208 "0205 Abort outstanding I/O on NPort x%x " 211 "2819 Abort outstanding I/O on NPort x%x "
209 "Data: x%x x%x x%x\n", 212 "Data: x%x x%x x%x\n",
210 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 213 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
211 ndlp->nlp_rpi); 214 ndlp->nlp_rpi);
@@ -224,14 +227,25 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
224 } 227 }
225 228
226 /* Next check the txcmplq */ 229 /* Next check the txcmplq */
227 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 230 list_splice_init(&pring->txcmplq, &txcmplq_completions);
231 spin_unlock_irq(&phba->hbalock);
232
233 list_for_each_entry_safe(iocb, next_iocb, &txcmplq_completions, list) {
228 /* Check to see if iocb matches the nport we are looking for */ 234 /* Check to see if iocb matches the nport we are looking for */
229 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) { 235 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
230 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 236 list_add_tail(&iocb->dlist, &abort_list);
231 }
232 } 237 }
238 spin_lock_irq(&phba->hbalock);
239 list_splice(&txcmplq_completions, &pring->txcmplq);
233 spin_unlock_irq(&phba->hbalock); 240 spin_unlock_irq(&phba->hbalock);
234 241
242 list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
243 spin_lock_irq(&phba->hbalock);
244 list_del_init(&iocb->dlist);
245 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
246 spin_unlock_irq(&phba->hbalock);
247 }
248
235 /* Cancel all the IOCBs from the completions list */ 249 /* Cancel all the IOCBs from the completions list */
236 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 250 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
237 IOERR_SLI_ABORTED); 251 IOERR_SLI_ABORTED);
@@ -626,7 +640,8 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
626 if (!(vport->fc_flag & FC_PT2PT)) { 640 if (!(vport->fc_flag & FC_PT2PT)) {
627 /* Check config parameter use-adisc or FCP-2 */ 641 /* Check config parameter use-adisc or FCP-2 */
628 if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) || 642 if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
629 ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { 643 ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
644 (ndlp->nlp_type & NLP_FCP_TARGET))) {
630 spin_lock_irq(shost->host_lock); 645 spin_lock_irq(shost->host_lock);
631 ndlp->nlp_flag |= NLP_NPR_ADISC; 646 ndlp->nlp_flag |= NLP_NPR_ADISC;
632 spin_unlock_irq(shost->host_lock); 647 spin_unlock_irq(shost->host_lock);
@@ -962,6 +977,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
962 mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login; 977 mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
963 break; 978 break;
964 default: 979 default:
980 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
965 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 981 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
966 } 982 }
967 mbox->context2 = lpfc_nlp_get(ndlp); 983 mbox->context2 = lpfc_nlp_get(ndlp);
@@ -972,6 +988,8 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
972 NLP_STE_REG_LOGIN_ISSUE); 988 NLP_STE_REG_LOGIN_ISSUE);
973 return ndlp->nlp_state; 989 return ndlp->nlp_state;
974 } 990 }
991 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
992 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
975 /* decrement node reference count to the failed mbox 993 /* decrement node reference count to the failed mbox
976 * command 994 * command
977 */ 995 */
@@ -1458,6 +1476,7 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
1458 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1476 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1459 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1477 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1460 spin_lock_irq(shost->host_lock); 1478 spin_lock_irq(shost->host_lock);
1479 ndlp->nlp_flag |= NLP_IGNR_REG_CMPL;
1461 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1480 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1462 spin_unlock_irq(shost->host_lock); 1481 spin_unlock_irq(shost->host_lock);
1463 lpfc_disc_set_adisc(vport, ndlp); 1482 lpfc_disc_set_adisc(vport, ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index f4a3b2e79eea..c818a7255962 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -623,6 +623,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
623 unsigned long iflag = 0; 623 unsigned long iflag = 0;
624 struct lpfc_iocbq *iocbq; 624 struct lpfc_iocbq *iocbq;
625 int i; 625 int i;
626 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
626 627
627 spin_lock_irqsave(&phba->hbalock, iflag); 628 spin_lock_irqsave(&phba->hbalock, iflag);
628 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 629 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
@@ -651,6 +652,8 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
651 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 652 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
652 psb->exch_busy = 0; 653 psb->exch_busy = 0;
653 spin_unlock_irqrestore(&phba->hbalock, iflag); 654 spin_unlock_irqrestore(&phba->hbalock, iflag);
655 if (pring->txq_cnt)
656 lpfc_worker_wake_up(phba);
654 return; 657 return;
655 658
656 } 659 }
@@ -747,7 +750,6 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
747 int status = 0, index; 750 int status = 0, index;
748 int bcnt; 751 int bcnt;
749 int non_sequential_xri = 0; 752 int non_sequential_xri = 0;
750 int rc = 0;
751 LIST_HEAD(sblist); 753 LIST_HEAD(sblist);
752 754
753 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 755 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
@@ -774,6 +776,8 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
774 /* Allocate iotag for psb->cur_iocbq. */ 776 /* Allocate iotag for psb->cur_iocbq. */
775 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 777 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
776 if (iotag == 0) { 778 if (iotag == 0) {
779 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
780 psb->data, psb->dma_handle);
777 kfree(psb); 781 kfree(psb);
778 break; 782 break;
779 } 783 }
@@ -858,7 +862,6 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
858 if (status) { 862 if (status) {
859 /* Put this back on the abort scsi list */ 863 /* Put this back on the abort scsi list */
860 psb->exch_busy = 1; 864 psb->exch_busy = 1;
861 rc++;
862 } else { 865 } else {
863 psb->exch_busy = 0; 866 psb->exch_busy = 0;
864 psb->status = IOSTAT_SUCCESS; 867 psb->status = IOSTAT_SUCCESS;
@@ -877,7 +880,6 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
877 if (status) { 880 if (status) {
878 /* Put this back on the abort scsi list */ 881 /* Put this back on the abort scsi list */
879 psb->exch_busy = 1; 882 psb->exch_busy = 1;
880 rc++;
881 } else { 883 } else {
882 psb->exch_busy = 0; 884 psb->exch_busy = 0;
883 psb->status = IOSTAT_SUCCESS; 885 psb->status = IOSTAT_SUCCESS;
@@ -887,7 +889,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
887 } 889 }
888 } 890 }
889 891
890 return bcnt + non_sequential_xri - rc; 892 return bcnt + non_sequential_xri;
891} 893}
892 894
893/** 895/**
@@ -1323,6 +1325,10 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1323 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1325 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1324 pde5->reftag = reftag; 1326 pde5->reftag = reftag;
1325 1327
1328 /* Endian convertion if necessary for PDE5 */
1329 pde5->word0 = cpu_to_le32(pde5->word0);
1330 pde5->reftag = cpu_to_le32(pde5->reftag);
1331
1326 /* advance bpl and increment bde count */ 1332 /* advance bpl and increment bde count */
1327 num_bde++; 1333 num_bde++;
1328 bpl++; 1334 bpl++;
@@ -1341,6 +1347,11 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1341 bf_set(pde6_ai, pde6, 1); 1347 bf_set(pde6_ai, pde6, 1);
1342 bf_set(pde6_apptagval, pde6, apptagval); 1348 bf_set(pde6_apptagval, pde6, apptagval);
1343 1349
1350 /* Endian convertion if necessary for PDE6 */
1351 pde6->word0 = cpu_to_le32(pde6->word0);
1352 pde6->word1 = cpu_to_le32(pde6->word1);
1353 pde6->word2 = cpu_to_le32(pde6->word2);
1354
1344 /* advance bpl and increment bde count */ 1355 /* advance bpl and increment bde count */
1345 num_bde++; 1356 num_bde++;
1346 bpl++; 1357 bpl++;
@@ -1448,6 +1459,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1448 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1459 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1449 pde5->reftag = reftag; 1460 pde5->reftag = reftag;
1450 1461
1462 /* Endian convertion if necessary for PDE5 */
1463 pde5->word0 = cpu_to_le32(pde5->word0);
1464 pde5->reftag = cpu_to_le32(pde5->reftag);
1465
1451 /* advance bpl and increment bde count */ 1466 /* advance bpl and increment bde count */
1452 num_bde++; 1467 num_bde++;
1453 bpl++; 1468 bpl++;
@@ -1464,6 +1479,11 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1464 bf_set(pde6_ai, pde6, 1); 1479 bf_set(pde6_ai, pde6, 1);
1465 bf_set(pde6_apptagval, pde6, apptagval); 1480 bf_set(pde6_apptagval, pde6, apptagval);
1466 1481
1482 /* Endian convertion if necessary for PDE6 */
1483 pde6->word0 = cpu_to_le32(pde6->word0);
1484 pde6->word1 = cpu_to_le32(pde6->word1);
1485 pde6->word2 = cpu_to_le32(pde6->word2);
1486
1467 /* advance bpl and increment bde count */ 1487 /* advance bpl and increment bde count */
1468 num_bde++; 1488 num_bde++;
1469 bpl++; 1489 bpl++;
@@ -1475,7 +1495,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1475 prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr)); 1495 prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr));
1476 protgroup_len = sg_dma_len(sgpe); 1496 protgroup_len = sg_dma_len(sgpe);
1477 1497
1478
1479 /* must be integer multiple of the DIF block length */ 1498 /* must be integer multiple of the DIF block length */
1480 BUG_ON(protgroup_len % 8); 1499 BUG_ON(protgroup_len % 8);
1481 1500
@@ -2293,15 +2312,21 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2293 struct lpfc_vport *vport = pIocbIn->vport; 2312 struct lpfc_vport *vport = pIocbIn->vport;
2294 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 2313 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
2295 struct lpfc_nodelist *pnode = rdata->pnode; 2314 struct lpfc_nodelist *pnode = rdata->pnode;
2296 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 2315 struct scsi_cmnd *cmd;
2297 int result; 2316 int result;
2298 struct scsi_device *tmp_sdev; 2317 struct scsi_device *tmp_sdev;
2299 int depth; 2318 int depth;
2300 unsigned long flags; 2319 unsigned long flags;
2301 struct lpfc_fast_path_event *fast_path_evt; 2320 struct lpfc_fast_path_event *fast_path_evt;
2302 struct Scsi_Host *shost = cmd->device->host; 2321 struct Scsi_Host *shost;
2303 uint32_t queue_depth, scsi_id; 2322 uint32_t queue_depth, scsi_id;
2304 2323
2324 /* Sanity check on return of outstanding command */
2325 if (!(lpfc_cmd->pCmd))
2326 return;
2327 cmd = lpfc_cmd->pCmd;
2328 shost = cmd->device->host;
2329
2305 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; 2330 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
2306 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 2331 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
2307 /* pick up SLI4 exhange busy status from HBA */ 2332 /* pick up SLI4 exhange busy status from HBA */
@@ -2363,7 +2388,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2363 case IOSTAT_LOCAL_REJECT: 2388 case IOSTAT_LOCAL_REJECT:
2364 if (lpfc_cmd->result == IOERR_INVALID_RPI || 2389 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
2365 lpfc_cmd->result == IOERR_NO_RESOURCES || 2390 lpfc_cmd->result == IOERR_NO_RESOURCES ||
2366 lpfc_cmd->result == IOERR_ABORT_REQUESTED) { 2391 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
2392 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
2367 cmd->result = ScsiResult(DID_REQUEUE, 0); 2393 cmd->result = ScsiResult(DID_REQUEUE, 0);
2368 break; 2394 break;
2369 } 2395 }
@@ -2432,14 +2458,16 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2432 } 2458 }
2433 spin_unlock_irqrestore(shost->host_lock, flags); 2459 spin_unlock_irqrestore(shost->host_lock, flags);
2434 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) { 2460 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
2435 if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) && 2461 if ((pnode->cmd_qdepth < vport->cfg_tgt_queue_depth) &&
2436 time_after(jiffies, pnode->last_change_time + 2462 time_after(jiffies, pnode->last_change_time +
2437 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) { 2463 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
2438 spin_lock_irqsave(shost->host_lock, flags); 2464 spin_lock_irqsave(shost->host_lock, flags);
2439 pnode->cmd_qdepth += pnode->cmd_qdepth * 2465 depth = pnode->cmd_qdepth * LPFC_TGTQ_RAMPUP_PCENT
2440 LPFC_TGTQ_RAMPUP_PCENT / 100; 2466 / 100;
2441 if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH) 2467 depth = depth ? depth : 1;
2442 pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; 2468 pnode->cmd_qdepth += depth;
2469 if (pnode->cmd_qdepth > vport->cfg_tgt_queue_depth)
2470 pnode->cmd_qdepth = vport->cfg_tgt_queue_depth;
2443 pnode->last_change_time = jiffies; 2471 pnode->last_change_time = jiffies;
2444 spin_unlock_irqrestore(shost->host_lock, flags); 2472 spin_unlock_irqrestore(shost->host_lock, flags);
2445 } 2473 }
@@ -2894,8 +2922,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2894 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0); 2922 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
2895 goto out_fail_command; 2923 goto out_fail_command;
2896 } 2924 }
2897 if (vport->cfg_max_scsicmpl_time && 2925 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
2898 (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth))
2899 goto out_host_busy; 2926 goto out_host_busy;
2900 2927
2901 lpfc_cmd = lpfc_get_scsi_buf(phba); 2928 lpfc_cmd = lpfc_get_scsi_buf(phba);
@@ -3041,7 +3068,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
3041 int ret = SUCCESS; 3068 int ret = SUCCESS;
3042 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 3069 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
3043 3070
3044 fc_block_scsi_eh(cmnd); 3071 ret = fc_block_scsi_eh(cmnd);
3072 if (ret)
3073 return ret;
3045 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 3074 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
3046 BUG_ON(!lpfc_cmd); 3075 BUG_ON(!lpfc_cmd);
3047 3076
@@ -3225,7 +3254,9 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
3225 lpfc_taskmgmt_name(task_mgmt_cmd), 3254 lpfc_taskmgmt_name(task_mgmt_cmd),
3226 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus, 3255 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
3227 iocbqrsp->iocb.un.ulpWord[4]); 3256 iocbqrsp->iocb.un.ulpWord[4]);
3228 } else 3257 } else if (status == IOCB_BUSY)
3258 ret = FAILED;
3259 else
3229 ret = SUCCESS; 3260 ret = SUCCESS;
3230 3261
3231 lpfc_sli_release_iocbq(phba, iocbqrsp); 3262 lpfc_sli_release_iocbq(phba, iocbqrsp);
@@ -3357,7 +3388,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
3357 return FAILED; 3388 return FAILED;
3358 } 3389 }
3359 pnode = rdata->pnode; 3390 pnode = rdata->pnode;
3360 fc_block_scsi_eh(cmnd); 3391 status = fc_block_scsi_eh(cmnd);
3392 if (status)
3393 return status;
3361 3394
3362 status = lpfc_chk_tgt_mapped(vport, cmnd); 3395 status = lpfc_chk_tgt_mapped(vport, cmnd);
3363 if (status == FAILED) { 3396 if (status == FAILED) {
@@ -3422,7 +3455,9 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
3422 return FAILED; 3455 return FAILED;
3423 } 3456 }
3424 pnode = rdata->pnode; 3457 pnode = rdata->pnode;
3425 fc_block_scsi_eh(cmnd); 3458 status = fc_block_scsi_eh(cmnd);
3459 if (status)
3460 return status;
3426 3461
3427 status = lpfc_chk_tgt_mapped(vport, cmnd); 3462 status = lpfc_chk_tgt_mapped(vport, cmnd);
3428 if (status == FAILED) { 3463 if (status == FAILED) {
@@ -3488,7 +3523,9 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
3488 fc_host_post_vendor_event(shost, fc_get_event_number(), 3523 fc_host_post_vendor_event(shost, fc_get_event_number(),
3489 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 3524 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3490 3525
3491 fc_block_scsi_eh(cmnd); 3526 ret = fc_block_scsi_eh(cmnd);
3527 if (ret)
3528 return ret;
3492 3529
3493 /* 3530 /*
3494 * Since the driver manages a single bus device, reset all 3531 * Since the driver manages a single bus device, reset all
@@ -3561,11 +3598,13 @@ lpfc_slave_alloc(struct scsi_device *sdev)
3561 uint32_t total = 0; 3598 uint32_t total = 0;
3562 uint32_t num_to_alloc = 0; 3599 uint32_t num_to_alloc = 0;
3563 int num_allocated = 0; 3600 int num_allocated = 0;
3601 uint32_t sdev_cnt;
3564 3602
3565 if (!rport || fc_remote_port_chkready(rport)) 3603 if (!rport || fc_remote_port_chkready(rport))
3566 return -ENXIO; 3604 return -ENXIO;
3567 3605
3568 sdev->hostdata = rport->dd_data; 3606 sdev->hostdata = rport->dd_data;
3607 sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
3569 3608
3570 /* 3609 /*
3571 * Populate the cmds_per_lun count scsi_bufs into this host's globally 3610 * Populate the cmds_per_lun count scsi_bufs into this host's globally
@@ -3577,6 +3616,10 @@ lpfc_slave_alloc(struct scsi_device *sdev)
3577 total = phba->total_scsi_bufs; 3616 total = phba->total_scsi_bufs;
3578 num_to_alloc = vport->cfg_lun_queue_depth + 2; 3617 num_to_alloc = vport->cfg_lun_queue_depth + 2;
3579 3618
3619 /* If allocated buffers are enough do nothing */
3620 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
3621 return 0;
3622
3580 /* Allow some exchanges to be available always to complete discovery */ 3623 /* Allow some exchanges to be available always to complete discovery */
3581 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 3624 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
3582 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 3625 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
@@ -3658,6 +3701,9 @@ lpfc_slave_configure(struct scsi_device *sdev)
3658static void 3701static void
3659lpfc_slave_destroy(struct scsi_device *sdev) 3702lpfc_slave_destroy(struct scsi_device *sdev)
3660{ 3703{
3704 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
3705 struct lpfc_hba *phba = vport->phba;
3706 atomic_dec(&phba->sdev_cnt);
3661 sdev->hostdata = NULL; 3707 sdev->hostdata = NULL;
3662 return; 3708 return;
3663} 3709}
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 7a61455140b6..e758eae0d0fd 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -455,6 +455,11 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
455 struct lpfc_iocbq * iocbq = NULL; 455 struct lpfc_iocbq * iocbq = NULL;
456 456
457 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 457 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
458
459 if (iocbq)
460 phba->iocb_cnt++;
461 if (phba->iocb_cnt > phba->iocb_max)
462 phba->iocb_max = phba->iocb_cnt;
458 return iocbq; 463 return iocbq;
459} 464}
460 465
@@ -575,7 +580,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
575{ 580{
576 struct lpfc_sglq *sglq; 581 struct lpfc_sglq *sglq;
577 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 582 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
578 unsigned long iflag; 583 unsigned long iflag = 0;
584 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
579 585
580 if (iocbq->sli4_xritag == NO_XRI) 586 if (iocbq->sli4_xritag == NO_XRI)
581 sglq = NULL; 587 sglq = NULL;
@@ -593,6 +599,10 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
593 } else { 599 } else {
594 sglq->state = SGL_FREED; 600 sglq->state = SGL_FREED;
595 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); 601 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
602
603 /* Check if TXQ queue needs to be serviced */
604 if (pring->txq_cnt)
605 lpfc_worker_wake_up(phba);
596 } 606 }
597 } 607 }
598 608
@@ -605,6 +615,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
605 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 615 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
606} 616}
607 617
618
608/** 619/**
609 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool 620 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
610 * @phba: Pointer to HBA context object. 621 * @phba: Pointer to HBA context object.
@@ -642,6 +653,7 @@ static void
642__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 653__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
643{ 654{
644 phba->__lpfc_sli_release_iocbq(phba, iocbq); 655 phba->__lpfc_sli_release_iocbq(phba, iocbq);
656 phba->iocb_cnt--;
645} 657}
646 658
647/** 659/**
@@ -872,7 +884,11 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
872 struct lpfc_iocbq *piocb) 884 struct lpfc_iocbq *piocb)
873{ 885{
874 list_add_tail(&piocb->list, &pring->txcmplq); 886 list_add_tail(&piocb->list, &pring->txcmplq);
887 piocb->iocb_flag |= LPFC_IO_ON_Q;
875 pring->txcmplq_cnt++; 888 pring->txcmplq_cnt++;
889 if (pring->txcmplq_cnt > pring->txcmplq_max)
890 pring->txcmplq_max = pring->txcmplq_cnt;
891
876 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 892 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
877 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 893 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
878 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 894 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
@@ -897,7 +913,7 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
897 * the txq, the function returns first iocb in the list after 913 * the txq, the function returns first iocb in the list after
898 * removing the iocb from the list, else it returns NULL. 914 * removing the iocb from the list, else it returns NULL.
899 **/ 915 **/
900static struct lpfc_iocbq * 916struct lpfc_iocbq *
901lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 917lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
902{ 918{
903 struct lpfc_iocbq *cmd_iocb; 919 struct lpfc_iocbq *cmd_iocb;
@@ -2150,7 +2166,10 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2150 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2166 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2151 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2167 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2152 list_del_init(&cmd_iocb->list); 2168 list_del_init(&cmd_iocb->list);
2153 pring->txcmplq_cnt--; 2169 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
2170 pring->txcmplq_cnt--;
2171 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
2172 }
2154 return cmd_iocb; 2173 return cmd_iocb;
2155 } 2174 }
2156 2175
@@ -2183,7 +2202,10 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2183 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2202 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2184 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2203 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2185 list_del_init(&cmd_iocb->list); 2204 list_del_init(&cmd_iocb->list);
2186 pring->txcmplq_cnt--; 2205 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
2206 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
2207 pring->txcmplq_cnt--;
2208 }
2187 return cmd_iocb; 2209 return cmd_iocb;
2188 } 2210 }
2189 2211
@@ -3564,13 +3586,16 @@ static int
3564lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) 3586lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
3565{ 3587{
3566 struct lpfc_sli *psli = &phba->sli; 3588 struct lpfc_sli *psli = &phba->sli;
3567 3589 uint32_t hba_aer_enabled;
3568 3590
3569 /* Restart HBA */ 3591 /* Restart HBA */
3570 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3592 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3571 "0296 Restart HBA Data: x%x x%x\n", 3593 "0296 Restart HBA Data: x%x x%x\n",
3572 phba->pport->port_state, psli->sli_flag); 3594 phba->pport->port_state, psli->sli_flag);
3573 3595
3596 /* Take PCIe device Advanced Error Reporting (AER) state */
3597 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
3598
3574 lpfc_sli4_brdreset(phba); 3599 lpfc_sli4_brdreset(phba);
3575 3600
3576 spin_lock_irq(&phba->hbalock); 3601 spin_lock_irq(&phba->hbalock);
@@ -3582,6 +3607,10 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
3582 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 3607 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
3583 psli->stats_start = get_seconds(); 3608 psli->stats_start = get_seconds();
3584 3609
3610 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
3611 if (hba_aer_enabled)
3612 pci_disable_pcie_error_reporting(phba->pcidev);
3613
3585 lpfc_hba_down_post(phba); 3614 lpfc_hba_down_post(phba);
3586 3615
3587 return 0; 3616 return 0;
@@ -3794,7 +3823,7 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
3794 3823
3795 phba->link_state = LPFC_HBA_ERROR; 3824 phba->link_state = LPFC_HBA_ERROR;
3796 mempool_free(pmb, phba->mbox_mem_pool); 3825 mempool_free(pmb, phba->mbox_mem_pool);
3797 return ENXIO; 3826 return -ENXIO;
3798 } 3827 }
3799 } 3828 }
3800 phba->hbq_count = hbq_count; 3829 phba->hbq_count = hbq_count;
@@ -3885,7 +3914,6 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3885 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 3914 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
3886 LPFC_SLI3_HBQ_ENABLED | 3915 LPFC_SLI3_HBQ_ENABLED |
3887 LPFC_SLI3_CRP_ENABLED | 3916 LPFC_SLI3_CRP_ENABLED |
3888 LPFC_SLI3_INB_ENABLED |
3889 LPFC_SLI3_BG_ENABLED); 3917 LPFC_SLI3_BG_ENABLED);
3890 if (rc != MBX_SUCCESS) { 3918 if (rc != MBX_SUCCESS) {
3891 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3919 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -3927,20 +3955,9 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3927 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 3955 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
3928 if (pmb->u.mb.un.varCfgPort.gcrp) 3956 if (pmb->u.mb.un.varCfgPort.gcrp)
3929 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 3957 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
3930 if (pmb->u.mb.un.varCfgPort.ginb) { 3958
3931 phba->sli3_options |= LPFC_SLI3_INB_ENABLED; 3959 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
3932 phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get; 3960 phba->port_gp = phba->mbox->us.s3_pgp.port;
3933 phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
3934 phba->inb_ha_copy = &phba->mbox->us.s3_inb_pgp.ha_copy;
3935 phba->inb_counter = &phba->mbox->us.s3_inb_pgp.counter;
3936 phba->inb_last_counter =
3937 phba->mbox->us.s3_inb_pgp.counter;
3938 } else {
3939 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
3940 phba->port_gp = phba->mbox->us.s3_pgp.port;
3941 phba->inb_ha_copy = NULL;
3942 phba->inb_counter = NULL;
3943 }
3944 3961
3945 if (phba->cfg_enable_bg) { 3962 if (phba->cfg_enable_bg) {
3946 if (pmb->u.mb.un.varCfgPort.gbg) 3963 if (pmb->u.mb.un.varCfgPort.gbg)
@@ -3953,8 +3970,6 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3953 } else { 3970 } else {
3954 phba->hbq_get = NULL; 3971 phba->hbq_get = NULL;
3955 phba->port_gp = phba->mbox->us.s2.port; 3972 phba->port_gp = phba->mbox->us.s2.port;
3956 phba->inb_ha_copy = NULL;
3957 phba->inb_counter = NULL;
3958 phba->max_vpi = 0; 3973 phba->max_vpi = 0;
3959 } 3974 }
3960do_prep_failed: 3975do_prep_failed:
@@ -4214,7 +4229,8 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4214 if (mqe->un.read_rev.avail_vpd_len < *vpd_size) 4229 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
4215 *vpd_size = mqe->un.read_rev.avail_vpd_len; 4230 *vpd_size = mqe->un.read_rev.avail_vpd_len;
4216 4231
4217 lpfc_sli_pcimem_bcopy(dmabuf->virt, vpd, *vpd_size); 4232 memcpy(vpd, dmabuf->virt, *vpd_size);
4233
4218 dma_free_coherent(&phba->pcidev->dev, dma_size, 4234 dma_free_coherent(&phba->pcidev->dev, dma_size,
4219 dmabuf->virt, dmabuf->phys); 4235 dmabuf->virt, dmabuf->phys);
4220 kfree(dmabuf); 4236 kfree(dmabuf);
@@ -4539,6 +4555,24 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4539 /* Start error attention (ERATT) polling timer */ 4555 /* Start error attention (ERATT) polling timer */
4540 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 4556 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
4541 4557
4558 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4559 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
4560 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4561 if (!rc) {
4562 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4563 "2829 This device supports "
4564 "Advanced Error Reporting (AER)\n");
4565 spin_lock_irq(&phba->hbalock);
4566 phba->hba_flag |= HBA_AER_ENABLED;
4567 spin_unlock_irq(&phba->hbalock);
4568 } else {
4569 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4570 "2830 This device does not support "
4571 "Advanced Error Reporting (AER)\n");
4572 phba->cfg_aer_support = 0;
4573 }
4574 }
4575
4542 /* 4576 /*
4543 * The port is ready, set the host's link state to LINK_DOWN 4577 * The port is ready, set the host's link state to LINK_DOWN
4544 * in preparation for link interrupts. 4578 * in preparation for link interrupts.
@@ -5265,7 +5299,8 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
5265 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 5299 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
5266 bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status); 5300 bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status);
5267 rc = MBXERR_ERROR; 5301 rc = MBXERR_ERROR;
5268 } 5302 } else
5303 lpfc_sli4_swap_str(phba, mboxq);
5269 5304
5270 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5305 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5271 "(%d):0356 Mailbox cmd x%x (x%x) Status x%x " 5306 "(%d):0356 Mailbox cmd x%x (x%x) Status x%x "
@@ -5592,7 +5627,7 @@ lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5592 * iocb to the txq when SLI layer cannot submit the command iocb 5627 * iocb to the txq when SLI layer cannot submit the command iocb
5593 * to the ring. 5628 * to the ring.
5594 **/ 5629 **/
5595static void 5630void
5596__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 5631__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5597 struct lpfc_iocbq *piocb) 5632 struct lpfc_iocbq *piocb)
5598{ 5633{
@@ -6209,7 +6244,6 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6209 struct lpfc_iocbq *piocb, uint32_t flag) 6244 struct lpfc_iocbq *piocb, uint32_t flag)
6210{ 6245{
6211 struct lpfc_sglq *sglq; 6246 struct lpfc_sglq *sglq;
6212 uint16_t xritag;
6213 union lpfc_wqe wqe; 6247 union lpfc_wqe wqe;
6214 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 6248 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
6215 6249
@@ -6218,10 +6252,26 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6218 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 6252 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
6219 sglq = NULL; 6253 sglq = NULL;
6220 else { 6254 else {
6255 if (pring->txq_cnt) {
6256 if (!(flag & SLI_IOCB_RET_IOCB)) {
6257 __lpfc_sli_ringtx_put(phba,
6258 pring, piocb);
6259 return IOCB_SUCCESS;
6260 } else {
6261 return IOCB_BUSY;
6262 }
6263 } else {
6221 sglq = __lpfc_sli_get_sglq(phba); 6264 sglq = __lpfc_sli_get_sglq(phba);
6222 if (!sglq) 6265 if (!sglq) {
6223 return IOCB_ERROR; 6266 if (!(flag & SLI_IOCB_RET_IOCB)) {
6224 piocb->sli4_xritag = sglq->sli4_xritag; 6267 __lpfc_sli_ringtx_put(phba,
6268 pring,
6269 piocb);
6270 return IOCB_SUCCESS;
6271 } else
6272 return IOCB_BUSY;
6273 }
6274 }
6225 } 6275 }
6226 } else if (piocb->iocb_flag & LPFC_IO_FCP) { 6276 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
6227 sglq = NULL; /* These IO's already have an XRI and 6277 sglq = NULL; /* These IO's already have an XRI and
@@ -6237,8 +6287,9 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6237 } 6287 }
6238 6288
6239 if (sglq) { 6289 if (sglq) {
6240 xritag = lpfc_sli4_bpl2sgl(phba, piocb, sglq); 6290 piocb->sli4_xritag = sglq->sli4_xritag;
6241 if (xritag != sglq->sli4_xritag) 6291
6292 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
6242 return IOCB_ERROR; 6293 return IOCB_ERROR;
6243 } 6294 }
6244 6295
@@ -6278,7 +6329,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6278 * IOCB_SUCCESS - Success 6329 * IOCB_SUCCESS - Success
6279 * IOCB_BUSY - Busy 6330 * IOCB_BUSY - Busy
6280 **/ 6331 **/
6281static inline int 6332int
6282__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 6333__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
6283 struct lpfc_iocbq *piocb, uint32_t flag) 6334 struct lpfc_iocbq *piocb, uint32_t flag)
6284{ 6335{
@@ -7095,13 +7146,6 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7095 */ 7146 */
7096 abort_iocb = phba->sli.iocbq_lookup[abort_context]; 7147 abort_iocb = phba->sli.iocbq_lookup[abort_context];
7097 7148
7098 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
7099 "0327 Cannot abort els iocb %p "
7100 "with tag %x context %x, abort status %x, "
7101 "abort code %x\n",
7102 abort_iocb, abort_iotag, abort_context,
7103 irsp->ulpStatus, irsp->un.ulpWord[4]);
7104
7105 /* 7149 /*
7106 * If the iocb is not found in Firmware queue the iocb 7150 * If the iocb is not found in Firmware queue the iocb
7107 * might have completed already. Do not free it again. 7151 * might have completed already. Do not free it again.
@@ -7120,6 +7164,13 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7120 if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4) 7164 if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4)
7121 abort_context = abort_iocb->iocb.ulpContext; 7165 abort_context = abort_iocb->iocb.ulpContext;
7122 } 7166 }
7167
7168 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
7169 "0327 Cannot abort els iocb %p "
7170 "with tag %x context %x, abort status %x, "
7171 "abort code %x\n",
7172 abort_iocb, abort_iotag, abort_context,
7173 irsp->ulpStatus, irsp->un.ulpWord[4]);
7123 /* 7174 /*
7124 * make sure we have the right iocbq before taking it 7175 * make sure we have the right iocbq before taking it
7125 * off the txcmplq and try to call completion routine. 7176 * off the txcmplq and try to call completion routine.
@@ -7137,7 +7188,10 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7137 * following abort XRI from the HBA. 7188 * following abort XRI from the HBA.
7138 */ 7189 */
7139 list_del_init(&abort_iocb->list); 7190 list_del_init(&abort_iocb->list);
7140 pring->txcmplq_cnt--; 7191 if (abort_iocb->iocb_flag & LPFC_IO_ON_Q) {
7192 abort_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
7193 pring->txcmplq_cnt--;
7194 }
7141 7195
7142 /* Firmware could still be in progress of DMAing 7196 /* Firmware could still be in progress of DMAing
7143 * payload, so don't free data buffer till after 7197 * payload, so don't free data buffer till after
@@ -7269,8 +7323,9 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7269 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 7323 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
7270 "0339 Abort xri x%x, original iotag x%x, " 7324 "0339 Abort xri x%x, original iotag x%x, "
7271 "abort cmd iotag x%x\n", 7325 "abort cmd iotag x%x\n",
7326 iabt->un.acxri.abortIoTag,
7272 iabt->un.acxri.abortContextTag, 7327 iabt->un.acxri.abortContextTag,
7273 iabt->un.acxri.abortIoTag, abtsiocbp->iotag); 7328 abtsiocbp->iotag);
7274 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0); 7329 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0);
7275 7330
7276 if (retval) 7331 if (retval)
@@ -7600,7 +7655,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
7600 long timeleft, timeout_req = 0; 7655 long timeleft, timeout_req = 0;
7601 int retval = IOCB_SUCCESS; 7656 int retval = IOCB_SUCCESS;
7602 uint32_t creg_val; 7657 uint32_t creg_val;
7603 7658 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
7604 /* 7659 /*
7605 * If the caller has provided a response iocbq buffer, then context2 7660 * If the caller has provided a response iocbq buffer, then context2
7606 * is NULL or its an error. 7661 * is NULL or its an error.
@@ -7622,7 +7677,8 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
7622 readl(phba->HCregaddr); /* flush */ 7677 readl(phba->HCregaddr); /* flush */
7623 } 7678 }
7624 7679
7625 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 0); 7680 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
7681 SLI_IOCB_RET_IOCB);
7626 if (retval == IOCB_SUCCESS) { 7682 if (retval == IOCB_SUCCESS) {
7627 timeout_req = timeout * HZ; 7683 timeout_req = timeout * HZ;
7628 timeleft = wait_event_timeout(done_q, 7684 timeleft = wait_event_timeout(done_q,
@@ -7644,6 +7700,11 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
7644 timeout, (timeleft / jiffies)); 7700 timeout, (timeleft / jiffies));
7645 retval = IOCB_TIMEDOUT; 7701 retval = IOCB_TIMEDOUT;
7646 } 7702 }
7703 } else if (retval == IOCB_BUSY) {
7704 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7705 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
7706 phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt);
7707 return retval;
7647 } else { 7708 } else {
7648 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 7709 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7649 "0332 IOCB wait issue failed, Data x%x\n", 7710 "0332 IOCB wait issue failed, Data x%x\n",
@@ -7724,9 +7785,10 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
7724 * if LPFC_MBX_WAKE flag is set the mailbox is completed 7785 * if LPFC_MBX_WAKE flag is set the mailbox is completed
7725 * else do not free the resources. 7786 * else do not free the resources.
7726 */ 7787 */
7727 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) 7788 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
7728 retval = MBX_SUCCESS; 7789 retval = MBX_SUCCESS;
7729 else { 7790 lpfc_sli4_swap_str(phba, pmboxq);
7791 } else {
7730 retval = MBX_TIMEOUT; 7792 retval = MBX_TIMEOUT;
7731 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 7793 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
7732 } 7794 }
@@ -8789,12 +8851,17 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
8789{ 8851{
8790 struct lpfc_iocbq *irspiocbq; 8852 struct lpfc_iocbq *irspiocbq;
8791 unsigned long iflags; 8853 unsigned long iflags;
8854 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
8792 8855
8793 /* Get an irspiocbq for later ELS response processing use */ 8856 /* Get an irspiocbq for later ELS response processing use */
8794 irspiocbq = lpfc_sli_get_iocbq(phba); 8857 irspiocbq = lpfc_sli_get_iocbq(phba);
8795 if (!irspiocbq) { 8858 if (!irspiocbq) {
8796 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8859 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8797 "0387 Failed to allocate an iocbq\n"); 8860 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
8861 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
8862 pring->txq_cnt, phba->iocb_cnt,
8863 phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt,
8864 phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt);
8798 return false; 8865 return false;
8799 } 8866 }
8800 8867
@@ -9043,9 +9110,10 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
9043 } 9110 }
9044 } 9111 }
9045 if (unlikely(!cq)) { 9112 if (unlikely(!cq)) {
9046 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9113 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
9047 "0365 Slow-path CQ identifier (%d) does " 9114 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9048 "not exist\n", cqid); 9115 "0365 Slow-path CQ identifier "
9116 "(%d) does not exist\n", cqid);
9049 return; 9117 return;
9050 } 9118 }
9051 9119
@@ -9208,6 +9276,7 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
9208{ 9276{
9209 struct lpfc_wcqe_release wcqe; 9277 struct lpfc_wcqe_release wcqe;
9210 bool workposted = false; 9278 bool workposted = false;
9279 unsigned long iflag;
9211 9280
9212 /* Copy the work queue CQE and convert endian order if needed */ 9281 /* Copy the work queue CQE and convert endian order if needed */
9213 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); 9282 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
@@ -9216,6 +9285,9 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
9216 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 9285 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
9217 case CQE_CODE_COMPL_WQE: 9286 case CQE_CODE_COMPL_WQE:
9218 /* Process the WQ complete event */ 9287 /* Process the WQ complete event */
9288 spin_lock_irqsave(&phba->hbalock, iflag);
9289 phba->last_completion_time = jiffies;
9290 spin_unlock_irqrestore(&phba->hbalock, iflag);
9219 lpfc_sli4_fp_handle_fcp_wcqe(phba, 9291 lpfc_sli4_fp_handle_fcp_wcqe(phba,
9220 (struct lpfc_wcqe_complete *)&wcqe); 9292 (struct lpfc_wcqe_complete *)&wcqe);
9221 break; 9293 break;
@@ -9271,9 +9343,10 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
9271 9343
9272 cq = phba->sli4_hba.fcp_cq[fcp_cqidx]; 9344 cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
9273 if (unlikely(!cq)) { 9345 if (unlikely(!cq)) {
9274 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9346 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
9275 "0367 Fast-path completion queue does not " 9347 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9276 "exist\n"); 9348 "0367 Fast-path completion queue "
9349 "does not exist\n");
9277 return; 9350 return;
9278 } 9351 }
9279 9352
@@ -11898,12 +11971,26 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
11898 * available rpis maintained by the driver. 11971 * available rpis maintained by the driver.
11899 **/ 11972 **/
11900void 11973void
11974__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
11975{
11976 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
11977 phba->sli4_hba.rpi_count--;
11978 phba->sli4_hba.max_cfg_param.rpi_used--;
11979 }
11980}
11981
11982/**
11983 * lpfc_sli4_free_rpi - Release an rpi for reuse.
11984 * @phba: pointer to lpfc hba data structure.
11985 *
11986 * This routine is invoked to release an rpi to the pool of
11987 * available rpis maintained by the driver.
11988 **/
11989void
11901lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 11990lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
11902{ 11991{
11903 spin_lock_irq(&phba->hbalock); 11992 spin_lock_irq(&phba->hbalock);
11904 clear_bit(rpi, phba->sli4_hba.rpi_bmask); 11993 __lpfc_sli4_free_rpi(phba, rpi);
11905 phba->sli4_hba.rpi_count--;
11906 phba->sli4_hba.max_cfg_param.rpi_used--;
11907 spin_unlock_irq(&phba->hbalock); 11994 spin_unlock_irq(&phba->hbalock);
11908} 11995}
11909 11996
@@ -12318,18 +12405,47 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
12318{ 12405{
12319 uint16_t next_fcf_index; 12406 uint16_t next_fcf_index;
12320 12407
12321 /* Search from the currently registered FCF index */ 12408 /* Search start from next bit of currently registered FCF index */
12409 next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) %
12410 LPFC_SLI4_FCF_TBL_INDX_MAX;
12322 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 12411 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
12323 LPFC_SLI4_FCF_TBL_INDX_MAX, 12412 LPFC_SLI4_FCF_TBL_INDX_MAX,
12324 phba->fcf.current_rec.fcf_indx); 12413 next_fcf_index);
12414
12325 /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 12415 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
12326 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) 12416 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
12327 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 12417 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
12328 LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 12418 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
12329 /* Round robin failover stop condition */ 12419
12330 if (next_fcf_index == phba->fcf.fcf_rr_init_indx) 12420 /* Check roundrobin failover list empty condition */
12421 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
12422 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
12423 "2844 No roundrobin failover FCF available\n");
12331 return LPFC_FCOE_FCF_NEXT_NONE; 12424 return LPFC_FCOE_FCF_NEXT_NONE;
12425 }
12426
12427 /* Check roundrobin failover index bmask stop condition */
12428 if (next_fcf_index == phba->fcf.fcf_rr_init_indx) {
12429 if (!(phba->fcf.fcf_flag & FCF_REDISC_RRU)) {
12430 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
12431 "2847 Round robin failover FCF index "
12432 "search hit stop condition:x%x\n",
12433 next_fcf_index);
12434 return LPFC_FCOE_FCF_NEXT_NONE;
12435 }
12436 /* The roundrobin failover index bmask updated, start over */
12437 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
12438 "2848 Round robin failover FCF index bmask "
12439 "updated, start over\n");
12440 spin_lock_irq(&phba->hbalock);
12441 phba->fcf.fcf_flag &= ~FCF_REDISC_RRU;
12442 spin_unlock_irq(&phba->hbalock);
12443 return phba->fcf.fcf_rr_init_indx;
12444 }
12332 12445
12446 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
12447 "2845 Get next round robin failover "
12448 "FCF index x%x\n", next_fcf_index);
12333 return next_fcf_index; 12449 return next_fcf_index;
12334} 12450}
12335 12451
@@ -12359,11 +12475,20 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
12359 /* Set the eligible FCF record index bmask */ 12475 /* Set the eligible FCF record index bmask */
12360 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 12476 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
12361 12477
12478 /* Set the roundrobin index bmask updated */
12479 spin_lock_irq(&phba->hbalock);
12480 phba->fcf.fcf_flag |= FCF_REDISC_RRU;
12481 spin_unlock_irq(&phba->hbalock);
12482
12483 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
12484 "2790 Set FCF index x%x to round robin failover "
12485 "bmask\n", fcf_index);
12486
12362 return 0; 12487 return 0;
12363} 12488}
12364 12489
12365/** 12490/**
12366 * lpfc_sli4_fcf_rr_index_set - Clear bmask from eligible fcf record index 12491 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
12367 * @phba: pointer to lpfc hba data structure. 12492 * @phba: pointer to lpfc hba data structure.
12368 * 12493 *
12369 * This routine clears the FCF record index from the eligible bmask for 12494 * This routine clears the FCF record index from the eligible bmask for
@@ -12384,6 +12509,10 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
12384 } 12509 }
12385 /* Clear the eligible FCF record index bmask */ 12510 /* Clear the eligible FCF record index bmask */
12386 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 12511 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
12512
12513 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
12514 "2791 Clear FCF index x%x from round robin failover "
12515 "bmask\n", fcf_index);
12387} 12516}
12388 12517
12389/** 12518/**
@@ -12446,7 +12575,7 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
12446} 12575}
12447 12576
12448/** 12577/**
12449 * lpfc_sli4_redisc_all_fcf - Request to rediscover entire FCF table by port. 12578 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
12450 * @phba: pointer to lpfc hba data structure. 12579 * @phba: pointer to lpfc hba data structure.
12451 * 12580 *
12452 * This routine is invoked to request for rediscovery of the entire FCF table 12581 * This routine is invoked to request for rediscovery of the entire FCF table
@@ -12662,6 +12791,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12662 LPFC_MBOXQ_t *mb, *nextmb; 12791 LPFC_MBOXQ_t *mb, *nextmb;
12663 struct lpfc_dmabuf *mp; 12792 struct lpfc_dmabuf *mp;
12664 struct lpfc_nodelist *ndlp; 12793 struct lpfc_nodelist *ndlp;
12794 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
12665 12795
12666 spin_lock_irq(&phba->hbalock); 12796 spin_lock_irq(&phba->hbalock);
12667 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 12797 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
@@ -12673,6 +12803,9 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12673 continue; 12803 continue;
12674 12804
12675 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 12805 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
12806 if (phba->sli_rev == LPFC_SLI_REV4)
12807 __lpfc_sli4_free_rpi(phba,
12808 mb->u.mb.un.varRegLogin.rpi);
12676 mp = (struct lpfc_dmabuf *) (mb->context1); 12809 mp = (struct lpfc_dmabuf *) (mb->context1);
12677 if (mp) { 12810 if (mp) {
12678 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 12811 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -12680,6 +12813,9 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12680 } 12813 }
12681 ndlp = (struct lpfc_nodelist *) mb->context2; 12814 ndlp = (struct lpfc_nodelist *) mb->context2;
12682 if (ndlp) { 12815 if (ndlp) {
12816 spin_lock_irq(shost->host_lock);
12817 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
12818 spin_unlock_irq(shost->host_lock);
12683 lpfc_nlp_put(ndlp); 12819 lpfc_nlp_put(ndlp);
12684 mb->context2 = NULL; 12820 mb->context2 = NULL;
12685 } 12821 }
@@ -12695,6 +12831,9 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12695 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 12831 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
12696 ndlp = (struct lpfc_nodelist *) mb->context2; 12832 ndlp = (struct lpfc_nodelist *) mb->context2;
12697 if (ndlp) { 12833 if (ndlp) {
12834 spin_lock_irq(shost->host_lock);
12835 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
12836 spin_unlock_irq(shost->host_lock);
12698 lpfc_nlp_put(ndlp); 12837 lpfc_nlp_put(ndlp);
12699 mb->context2 = NULL; 12838 mb->context2 = NULL;
12700 } 12839 }
@@ -12705,3 +12844,85 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12705 spin_unlock_irq(&phba->hbalock); 12844 spin_unlock_irq(&phba->hbalock);
12706} 12845}
12707 12846
12847/**
12848 * lpfc_drain_txq - Drain the txq
12849 * @phba: Pointer to HBA context object.
12850 *
12851 * This function attempt to submit IOCBs on the txq
12852 * to the adapter. For SLI4 adapters, the txq contains
12853 * ELS IOCBs that have been deferred because the there
12854 * are no SGLs. This congestion can occur with large
12855 * vport counts during node discovery.
12856 **/
12857
12858uint32_t
12859lpfc_drain_txq(struct lpfc_hba *phba)
12860{
12861 LIST_HEAD(completions);
12862 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
12863 struct lpfc_iocbq *piocbq = 0;
12864 unsigned long iflags = 0;
12865 char *fail_msg = NULL;
12866 struct lpfc_sglq *sglq;
12867 union lpfc_wqe wqe;
12868
12869 spin_lock_irqsave(&phba->hbalock, iflags);
12870 if (pring->txq_cnt > pring->txq_max)
12871 pring->txq_max = pring->txq_cnt;
12872
12873 spin_unlock_irqrestore(&phba->hbalock, iflags);
12874
12875 while (pring->txq_cnt) {
12876 spin_lock_irqsave(&phba->hbalock, iflags);
12877
12878 sglq = __lpfc_sli_get_sglq(phba);
12879 if (!sglq) {
12880 spin_unlock_irqrestore(&phba->hbalock, iflags);
12881 break;
12882 } else {
12883 piocbq = lpfc_sli_ringtx_get(phba, pring);
12884 if (!piocbq) {
12885 /* The txq_cnt out of sync. This should
12886 * never happen
12887 */
12888 sglq = __lpfc_clear_active_sglq(phba,
12889 sglq->sli4_xritag);
12890 spin_unlock_irqrestore(&phba->hbalock, iflags);
12891 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12892 "2823 txq empty and txq_cnt is %d\n ",
12893 pring->txq_cnt);
12894 break;
12895 }
12896 }
12897
12898 /* The xri and iocb resources secured,
12899 * attempt to issue request
12900 */
12901 piocbq->sli4_xritag = sglq->sli4_xritag;
12902 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
12903 fail_msg = "to convert bpl to sgl";
12904 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
12905 fail_msg = "to convert iocb to wqe";
12906 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
12907 fail_msg = " - Wq is full";
12908 else
12909 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
12910
12911 if (fail_msg) {
12912 /* Failed means we can't issue and need to cancel */
12913 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12914 "2822 IOCB failed %s iotag 0x%x "
12915 "xri 0x%x\n",
12916 fail_msg,
12917 piocbq->iotag, piocbq->sli4_xritag);
12918 list_add_tail(&piocbq->list, &completions);
12919 }
12920 spin_unlock_irqrestore(&phba->hbalock, iflags);
12921 }
12922
12923 /* Cancel all the IOCBs that cannot be issued */
12924 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
12925 IOERR_SLI_ABORTED);
12926
12927 return pring->txq_cnt;
12928}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index e3792151ca06..cd56d6cce6c3 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -48,6 +48,7 @@ struct lpfc_iocbq {
48 /* lpfc_iocbqs are used in double linked lists */ 48 /* lpfc_iocbqs are used in double linked lists */
49 struct list_head list; 49 struct list_head list;
50 struct list_head clist; 50 struct list_head clist;
51 struct list_head dlist;
51 uint16_t iotag; /* pre-assigned IO tag */ 52 uint16_t iotag; /* pre-assigned IO tag */
52 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ 53 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
53 struct lpfc_cq_event cq_event; 54 struct lpfc_cq_event cq_event;
@@ -64,6 +65,7 @@ struct lpfc_iocbq {
64#define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */ 65#define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */
65#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */ 66#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */
66#define DSS_SECURITY_OP 0x100 /* security IO */ 67#define DSS_SECURITY_OP 0x100 /* security IO */
68#define LPFC_IO_ON_Q 0x200 /* The IO is still on the TXCMPLQ */
67 69
68#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ 70#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
69#define LPFC_FIP_ELS_ID_SHIFT 14 71#define LPFC_FIP_ELS_ID_SHIFT 14
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 58bb4c81b54e..a3b24d99a2a7 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -51,6 +51,9 @@
51#define LPFC_FCOE_FCF_GET_FIRST 0xFFFF 51#define LPFC_FCOE_FCF_GET_FIRST 0xFFFF
52#define LPFC_FCOE_FCF_NEXT_NONE 0xFFFF 52#define LPFC_FCOE_FCF_NEXT_NONE 0xFFFF
53 53
54#define LPFC_FCOE_NULL_VID 0xFFF
55#define LPFC_FCOE_IGNORE_VID 0xFFFF
56
54/* First 3 bytes of default FCF MAC is specified by FC_MAP */ 57/* First 3 bytes of default FCF MAC is specified by FC_MAP */
55#define LPFC_FCOE_FCF_MAC3 0xFF 58#define LPFC_FCOE_FCF_MAC3 0xFF
56#define LPFC_FCOE_FCF_MAC4 0xFF 59#define LPFC_FCOE_FCF_MAC4 0xFF
@@ -58,7 +61,7 @@
58#define LPFC_FCOE_FCF_MAP0 0x0E 61#define LPFC_FCOE_FCF_MAP0 0x0E
59#define LPFC_FCOE_FCF_MAP1 0xFC 62#define LPFC_FCOE_FCF_MAP1 0xFC
60#define LPFC_FCOE_FCF_MAP2 0x00 63#define LPFC_FCOE_FCF_MAP2 0x00
61#define LPFC_FCOE_MAX_RCV_SIZE 0x5AC 64#define LPFC_FCOE_MAX_RCV_SIZE 0x800
62#define LPFC_FCOE_FKA_ADV_PER 0 65#define LPFC_FCOE_FKA_ADV_PER 0
63#define LPFC_FCOE_FIP_PRIORITY 0x80 66#define LPFC_FCOE_FIP_PRIORITY 0x80
64 67
@@ -160,6 +163,7 @@ struct lpfc_fcf {
160#define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */ 163#define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */
161#define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */ 164#define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */
162#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */ 165#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
166#define FCF_REDISC_RRU 0x400 /* Roundrobin bitmap updated */
163 uint32_t addr_mode; 167 uint32_t addr_mode;
164 uint16_t fcf_rr_init_indx; 168 uint16_t fcf_rr_init_indx;
165 uint32_t eligible_fcf_cnt; 169 uint32_t eligible_fcf_cnt;
@@ -382,6 +386,7 @@ struct lpfc_sli4_hba {
382 struct lpfc_pc_sli4_params pc_sli4_params; 386 struct lpfc_pc_sli4_params pc_sli4_params;
383 struct msix_entry *msix_entries; 387 struct msix_entry *msix_entries;
384 uint32_t cfg_eqn; 388 uint32_t cfg_eqn;
389 uint32_t msix_vec_nr;
385 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ 390 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
386 /* Pointers to the constructed SLI4 queues */ 391 /* Pointers to the constructed SLI4 queues */
387 struct lpfc_queue **fp_eq; /* Fast-path event queue */ 392 struct lpfc_queue **fp_eq; /* Fast-path event queue */
@@ -524,6 +529,7 @@ int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *);
524struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *); 529struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *);
525void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *); 530void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *);
526int lpfc_sli4_alloc_rpi(struct lpfc_hba *); 531int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
532void __lpfc_sli4_free_rpi(struct lpfc_hba *, int);
527void lpfc_sli4_free_rpi(struct lpfc_hba *, int); 533void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
528void lpfc_sli4_remove_rpis(struct lpfc_hba *); 534void lpfc_sli4_remove_rpis(struct lpfc_hba *);
529void lpfc_sli4_async_event_proc(struct lpfc_hba *); 535void lpfc_sli4_async_event_proc(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 5294c3a515a1..d28830af71d8 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.12" 21#define LPFC_DRIVER_VERSION "8.3.15"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index ab91359bde20..1655507a682c 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -782,7 +782,7 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
782 int i; 782 int i;
783 if (vports == NULL) 783 if (vports == NULL)
784 return; 784 return;
785 for (i = 0; vports[i] != NULL && i <= phba->max_vports; i++) 785 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
786 scsi_host_put(lpfc_shost_from_vport(vports[i])); 786 scsi_host_put(lpfc_shost_from_vport(vports[i]));
787 kfree(vports); 787 kfree(vports);
788} 788}
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index dada0a13223f..4b1c2f0350f9 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
8 * scatter/gather formats. 8 * scatter/gather formats.
9 * Creation Date: June 21, 2006 9 * Creation Date: June 21, 2006
10 * 10 *
11 * mpi2.h Version: 02.00.14 11 * mpi2.h Version: 02.00.15
12 * 12 *
13 * Version History 13 * Version History
14 * --------------- 14 * ---------------
@@ -57,6 +57,10 @@
57 * Added MSI-x index mask and shift for Reply Post Host 57 * Added MSI-x index mask and shift for Reply Post Host
58 * Index register. 58 * Index register.
59 * Added function code for Host Based Discovery Action. 59 * Added function code for Host Based Discovery Action.
60 * 02-10-10 02.00.15 Bumped MPI2_HEADER_VERSION_UNIT.
61 * Added define for MPI2_FUNCTION_PWR_MGMT_CONTROL.
62 * Added defines for product-specific range of message
63 * function codes, 0xF0 to 0xFF.
60 * -------------------------------------------------------------------------- 64 * --------------------------------------------------------------------------
61 */ 65 */
62 66
@@ -82,7 +86,7 @@
82#define MPI2_VERSION_02_00 (0x0200) 86#define MPI2_VERSION_02_00 (0x0200)
83 87
84/* versioning for this MPI header set */ 88/* versioning for this MPI header set */
85#define MPI2_HEADER_VERSION_UNIT (0x0E) 89#define MPI2_HEADER_VERSION_UNIT (0x0F)
86#define MPI2_HEADER_VERSION_DEV (0x00) 90#define MPI2_HEADER_VERSION_DEV (0x00)
87#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) 91#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
88#define MPI2_HEADER_VERSION_UNIT_SHIFT (8) 92#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -473,8 +477,6 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION
473/***************************************************************************** 477/*****************************************************************************
474* 478*
475* Message Functions 479* Message Functions
476* 0x80 -> 0x8F reserved for private message use per product
477*
478* 480*
479*****************************************************************************/ 481*****************************************************************************/
480 482
@@ -506,6 +508,13 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION
506#define MPI2_FUNCTION_RAID_ACCELERATOR (0x2C) /* RAID Accelerator*/ 508#define MPI2_FUNCTION_RAID_ACCELERATOR (0x2C) /* RAID Accelerator*/
507/* Host Based Discovery Action */ 509/* Host Based Discovery Action */
508#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F) 510#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F)
511/* Power Management Control */
512#define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30)
513/* beginning of product-specific range */
514#define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0)
515/* end of product-specific range */
516#define MPI2_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF)
517
509 518
510 519
511 520
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index d4e9d6f8452e..e3728d736d85 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
6 * Title: MPI Configuration messages and pages 6 * Title: MPI Configuration messages and pages
7 * Creation Date: November 10, 2006 7 * Creation Date: November 10, 2006
8 * 8 *
9 * mpi2_cnfg.h Version: 02.00.13 9 * mpi2_cnfg.h Version: 02.00.14
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -109,6 +109,18 @@
109 * Added Ethernet configuration pages. 109 * Added Ethernet configuration pages.
110 * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY. 110 * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY.
111 * Added SAS PHY Page 4 structure and defines. 111 * Added SAS PHY Page 4 structure and defines.
112 * 02-10-10 02.00.14 Modified the comments for the configuration page
113 * structures that contain an array of data. The host
114 * should use the "count" field in the page data (e.g. the
115 * NumPhys field) to determine the number of valid elements
116 * in the array.
117 * Added/modified some MPI2_MFGPAGE_DEVID_SAS defines.
118 * Added PowerManagementCapabilities to IO Unit Page 7.
119 * Added PortWidthModGroup field to
120 * MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS.
121 * Added MPI2_CONFIG_PAGE_SASIOUNIT_6 and related defines.
122 * Added MPI2_CONFIG_PAGE_SASIOUNIT_7 and related defines.
123 * Added MPI2_CONFIG_PAGE_SASIOUNIT_8 and related defines.
112 * -------------------------------------------------------------------------- 124 * --------------------------------------------------------------------------
113 */ 125 */
114 126
@@ -373,8 +385,9 @@ typedef struct _MPI2_CONFIG_REPLY
373#define MPI2_MFGPAGE_DEVID_SAS2208_4 (0x0083) 385#define MPI2_MFGPAGE_DEVID_SAS2208_4 (0x0083)
374#define MPI2_MFGPAGE_DEVID_SAS2208_5 (0x0084) 386#define MPI2_MFGPAGE_DEVID_SAS2208_5 (0x0084)
375#define MPI2_MFGPAGE_DEVID_SAS2208_6 (0x0085) 387#define MPI2_MFGPAGE_DEVID_SAS2208_6 (0x0085)
376#define MPI2_MFGPAGE_DEVID_SAS2208_7 (0x0086) 388#define MPI2_MFGPAGE_DEVID_SAS2308_1 (0x0086)
377#define MPI2_MFGPAGE_DEVID_SAS2208_8 (0x0087) 389#define MPI2_MFGPAGE_DEVID_SAS2308_2 (0x0087)
390#define MPI2_MFGPAGE_DEVID_SAS2308_3 (0x006E)
378 391
379 392
380/* Manufacturing Page 0 */ 393/* Manufacturing Page 0 */
@@ -540,7 +553,7 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_4
540 553
541/* 554/*
542 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to 555 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
543 * one and check Header.PageLength or NumPhys at runtime. 556 * one and check the value returned for NumPhys at runtime.
544 */ 557 */
545#ifndef MPI2_MAN_PAGE_5_PHY_ENTRIES 558#ifndef MPI2_MAN_PAGE_5_PHY_ENTRIES
546#define MPI2_MAN_PAGE_5_PHY_ENTRIES (1) 559#define MPI2_MAN_PAGE_5_PHY_ENTRIES (1)
@@ -618,7 +631,7 @@ typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO
618 631
619/* 632/*
620 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to 633 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
621 * one and check NumPhys at runtime. 634 * one and check the value returned for NumPhys at runtime.
622 */ 635 */
623#ifndef MPI2_MANPAGE7_CONNECTOR_INFO_MAX 636#ifndef MPI2_MANPAGE7_CONNECTOR_INFO_MAX
624#define MPI2_MANPAGE7_CONNECTOR_INFO_MAX (1) 637#define MPI2_MANPAGE7_CONNECTOR_INFO_MAX (1)
@@ -731,7 +744,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1
731 744
732/* 745/*
733 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to 746 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
734 * one and check Header.PageLength at runtime. 747 * one and check the value returned for GPIOCount at runtime.
735 */ 748 */
736#ifndef MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX 749#ifndef MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX
737#define MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX (1) 750#define MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX (1)
@@ -760,7 +773,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_3
760 773
761/* 774/*
762 * Upper layer code (drivers, utilities, etc.) should leave this define set to 775 * Upper layer code (drivers, utilities, etc.) should leave this define set to
763 * one and check Header.PageLength or NumDmaEngines at runtime. 776 * one and check the value returned for NumDmaEngines at runtime.
764 */ 777 */
765#ifndef MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES 778#ifndef MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES
766#define MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES (1) 779#define MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES (1)
@@ -823,7 +836,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
823 U8 PCIeWidth; /* 0x06 */ 836 U8 PCIeWidth; /* 0x06 */
824 U8 PCIeSpeed; /* 0x07 */ 837 U8 PCIeSpeed; /* 0x07 */
825 U32 ProcessorState; /* 0x08 */ 838 U32 ProcessorState; /* 0x08 */
826 U32 Reserved2; /* 0x0C */ 839 U32 PowerManagementCapabilities; /* 0x0C */
827 U16 IOCTemperature; /* 0x10 */ 840 U16 IOCTemperature; /* 0x10 */
828 U8 IOCTemperatureUnits; /* 0x12 */ 841 U8 IOCTemperatureUnits; /* 0x12 */
829 U8 IOCSpeed; /* 0x13 */ 842 U8 IOCSpeed; /* 0x13 */
@@ -831,7 +844,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
831} MPI2_CONFIG_PAGE_IO_UNIT_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_7, 844} MPI2_CONFIG_PAGE_IO_UNIT_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_7,
832 Mpi2IOUnitPage7_t, MPI2_POINTER pMpi2IOUnitPage7_t; 845 Mpi2IOUnitPage7_t, MPI2_POINTER pMpi2IOUnitPage7_t;
833 846
834#define MPI2_IOUNITPAGE7_PAGEVERSION (0x00) 847#define MPI2_IOUNITPAGE7_PAGEVERSION (0x01)
835 848
836/* defines for IO Unit Page 7 PCIeWidth field */ 849/* defines for IO Unit Page 7 PCIeWidth field */
837#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1 (0x01) 850#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1 (0x01)
@@ -852,6 +865,14 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
852#define MPI2_IOUNITPAGE7_PSTATE_DISABLED (0x01) 865#define MPI2_IOUNITPAGE7_PSTATE_DISABLED (0x01)
853#define MPI2_IOUNITPAGE7_PSTATE_ENABLED (0x02) 866#define MPI2_IOUNITPAGE7_PSTATE_ENABLED (0x02)
854 867
868/* defines for IO Unit Page 7 PowerManagementCapabilities field */
869#define MPI2_IOUNITPAGE7_PMCAP_12_5_PCT_IOCSPEED (0x00000400)
870#define MPI2_IOUNITPAGE7_PMCAP_25_0_PCT_IOCSPEED (0x00000200)
871#define MPI2_IOUNITPAGE7_PMCAP_50_0_PCT_IOCSPEED (0x00000100)
872#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008)
873#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004)
874
875
855/* defines for IO Unit Page 7 IOCTemperatureUnits field */ 876/* defines for IO Unit Page 7 IOCTemperatureUnits field */
856#define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00) 877#define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00)
857#define MPI2_IOUNITPAGE7_IOC_TEMP_FAHRENHEIT (0x01) 878#define MPI2_IOUNITPAGE7_IOC_TEMP_FAHRENHEIT (0x01)
@@ -1195,7 +1216,7 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_3
1195 1216
1196/* 1217/*
1197 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to 1218 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1198 * one and check Header.PageLength or NumPhys at runtime. 1219 * one and check the value returned for NumPhys at runtime.
1199 */ 1220 */
1200#ifndef MPI2_BIOS_PAGE_4_PHY_ENTRIES 1221#ifndef MPI2_BIOS_PAGE_4_PHY_ENTRIES
1201#define MPI2_BIOS_PAGE_4_PHY_ENTRIES (1) 1222#define MPI2_BIOS_PAGE_4_PHY_ENTRIES (1)
@@ -1269,7 +1290,7 @@ typedef struct _MPI2_RAIDVOL0_SETTINGS
1269 1290
1270/* 1291/*
1271 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to 1292 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1272 * one and check Header.PageLength at runtime. 1293 * one and check the value returned for NumPhysDisks at runtime.
1273 */ 1294 */
1274#ifndef MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX 1295#ifndef MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX
1275#define MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX (1) 1296#define MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX (1)
@@ -1471,7 +1492,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_0
1471 1492
1472/* 1493/*
1473 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to 1494 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1474 * one and check Header.PageLength or NumPhysDiskPaths at runtime. 1495 * one and check the value returned for NumPhysDiskPaths at runtime.
1475 */ 1496 */
1476#ifndef MPI2_RAID_PHYS_DISK1_PATH_MAX 1497#ifndef MPI2_RAID_PHYS_DISK1_PATH_MAX
1477#define MPI2_RAID_PHYS_DISK1_PATH_MAX (1) 1498#define MPI2_RAID_PHYS_DISK1_PATH_MAX (1)
@@ -1633,7 +1654,7 @@ typedef struct _MPI2_SAS_IO_UNIT0_PHY_DATA
1633 1654
1634/* 1655/*
1635 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to 1656 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1636 * one and check Header.ExtPageLength or NumPhys at runtime. 1657 * one and check the value returned for NumPhys at runtime.
1637 */ 1658 */
1638#ifndef MPI2_SAS_IOUNIT0_PHY_MAX 1659#ifndef MPI2_SAS_IOUNIT0_PHY_MAX
1639#define MPI2_SAS_IOUNIT0_PHY_MAX (1) 1660#define MPI2_SAS_IOUNIT0_PHY_MAX (1)
@@ -1704,7 +1725,7 @@ typedef struct _MPI2_SAS_IO_UNIT1_PHY_DATA
1704 1725
1705/* 1726/*
1706 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to 1727 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1707 * one and check Header.ExtPageLength or NumPhys at runtime. 1728 * one and check the value returned for NumPhys at runtime.
1708 */ 1729 */
1709#ifndef MPI2_SAS_IOUNIT1_PHY_MAX 1730#ifndef MPI2_SAS_IOUNIT1_PHY_MAX
1710#define MPI2_SAS_IOUNIT1_PHY_MAX (1) 1731#define MPI2_SAS_IOUNIT1_PHY_MAX (1)
@@ -1795,7 +1816,7 @@ typedef struct _MPI2_SAS_IOUNIT4_SPINUP_GROUP
1795 1816
1796/* 1817/*
1797 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to 1818 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1798 * four and check Header.ExtPageLength or NumPhys at runtime. 1819 * one and check the value returned for NumPhys at runtime.
1799 */ 1820 */
1800#ifndef MPI2_SAS_IOUNIT4_PHY_MAX 1821#ifndef MPI2_SAS_IOUNIT4_PHY_MAX
1801#define MPI2_SAS_IOUNIT4_PHY_MAX (4) 1822#define MPI2_SAS_IOUNIT4_PHY_MAX (4)
@@ -1833,7 +1854,7 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_4
1833 1854
1834typedef struct _MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS { 1855typedef struct _MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS {
1835 U8 ControlFlags; /* 0x00 */ 1856 U8 ControlFlags; /* 0x00 */
1836 U8 Reserved1; /* 0x01 */ 1857 U8 PortWidthModGroup; /* 0x01 */
1837 U16 InactivityTimerExponent; /* 0x02 */ 1858 U16 InactivityTimerExponent; /* 0x02 */
1838 U8 SATAPartialTimeout; /* 0x04 */ 1859 U8 SATAPartialTimeout; /* 0x04 */
1839 U8 Reserved2; /* 0x05 */ 1860 U8 Reserved2; /* 0x05 */
@@ -1853,6 +1874,9 @@ typedef struct _MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS {
1853#define MPI2_SASIOUNIT5_CONTROL_SATA_SLUMBER_ENABLE (0x02) 1874#define MPI2_SASIOUNIT5_CONTROL_SATA_SLUMBER_ENABLE (0x02)
1854#define MPI2_SASIOUNIT5_CONTROL_SATA_PARTIAL_ENABLE (0x01) 1875#define MPI2_SASIOUNIT5_CONTROL_SATA_PARTIAL_ENABLE (0x01)
1855 1876
1877/* defines for PortWidthModeGroup field */
1878#define MPI2_SASIOUNIT5_PWMG_DISABLE (0xFF)
1879
1856/* defines for InactivityTimerExponent field */ 1880/* defines for InactivityTimerExponent field */
1857#define MPI2_SASIOUNIT5_ITE_MASK_SAS_SLUMBER (0x7000) 1881#define MPI2_SASIOUNIT5_ITE_MASK_SAS_SLUMBER (0x7000)
1858#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_SLUMBER (12) 1882#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_SLUMBER (12)
@@ -1874,7 +1898,7 @@ typedef struct _MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS {
1874 1898
1875/* 1899/*
1876 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to 1900 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1877 * one and check Header.ExtPageLength or NumPhys at runtime. 1901 * one and check the value returned for NumPhys at runtime.
1878 */ 1902 */
1879#ifndef MPI2_SAS_IOUNIT5_PHY_MAX 1903#ifndef MPI2_SAS_IOUNIT5_PHY_MAX
1880#define MPI2_SAS_IOUNIT5_PHY_MAX (1) 1904#define MPI2_SAS_IOUNIT5_PHY_MAX (1)
@@ -1892,7 +1916,132 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_5 {
1892 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_5, 1916 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_5,
1893 Mpi2SasIOUnitPage5_t, MPI2_POINTER pMpi2SasIOUnitPage5_t; 1917 Mpi2SasIOUnitPage5_t, MPI2_POINTER pMpi2SasIOUnitPage5_t;
1894 1918
1895#define MPI2_SASIOUNITPAGE5_PAGEVERSION (0x00) 1919#define MPI2_SASIOUNITPAGE5_PAGEVERSION (0x01)
1920
1921
1922/* SAS IO Unit Page 6 */
1923
1924typedef struct _MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS {
1925 U8 CurrentStatus; /* 0x00 */
1926 U8 CurrentModulation; /* 0x01 */
1927 U8 CurrentUtilization; /* 0x02 */
1928 U8 Reserved1; /* 0x03 */
1929 U32 Reserved2; /* 0x04 */
1930} MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS,
1931 MPI2_POINTER PTR_MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS,
1932 Mpi2SasIOUnit6PortWidthModGroupStatus_t,
1933 MPI2_POINTER pMpi2SasIOUnit6PortWidthModGroupStatus_t;
1934
1935/* defines for CurrentStatus field */
1936#define MPI2_SASIOUNIT6_STATUS_UNAVAILABLE (0x00)
1937#define MPI2_SASIOUNIT6_STATUS_UNCONFIGURED (0x01)
1938#define MPI2_SASIOUNIT6_STATUS_INVALID_CONFIG (0x02)
1939#define MPI2_SASIOUNIT6_STATUS_LINK_DOWN (0x03)
1940#define MPI2_SASIOUNIT6_STATUS_OBSERVATION_ONLY (0x04)
1941#define MPI2_SASIOUNIT6_STATUS_INACTIVE (0x05)
1942#define MPI2_SASIOUNIT6_STATUS_ACTIVE_IOUNIT (0x06)
1943#define MPI2_SASIOUNIT6_STATUS_ACTIVE_HOST (0x07)
1944
1945/* defines for CurrentModulation field */
1946#define MPI2_SASIOUNIT6_MODULATION_25_PERCENT (0x00)
1947#define MPI2_SASIOUNIT6_MODULATION_50_PERCENT (0x01)
1948#define MPI2_SASIOUNIT6_MODULATION_75_PERCENT (0x02)
1949#define MPI2_SASIOUNIT6_MODULATION_100_PERCENT (0x03)
1950
1951/*
1952 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1953 * one and check the value returned for NumGroups at runtime.
1954 */
1955#ifndef MPI2_SAS_IOUNIT6_GROUP_MAX
1956#define MPI2_SAS_IOUNIT6_GROUP_MAX (1)
1957#endif
1958
1959typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_6 {
1960 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
1961 U32 Reserved1; /* 0x08 */
1962 U32 Reserved2; /* 0x0C */
1963 U8 NumGroups; /* 0x10 */
1964 U8 Reserved3; /* 0x11 */
1965 U16 Reserved4; /* 0x12 */
1966 MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS
1967 PortWidthModulationGroupStatus[MPI2_SAS_IOUNIT6_GROUP_MAX]; /* 0x14 */
1968} MPI2_CONFIG_PAGE_SASIOUNIT_6,
1969 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_6,
1970 Mpi2SasIOUnitPage6_t, MPI2_POINTER pMpi2SasIOUnitPage6_t;
1971
1972#define MPI2_SASIOUNITPAGE6_PAGEVERSION (0x00)
1973
1974
1975/* SAS IO Unit Page 7 */
1976
1977typedef struct _MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS {
1978 U8 Flags; /* 0x00 */
1979 U8 Reserved1; /* 0x01 */
1980 U16 Reserved2; /* 0x02 */
1981 U8 Threshold75Pct; /* 0x04 */
1982 U8 Threshold50Pct; /* 0x05 */
1983 U8 Threshold25Pct; /* 0x06 */
1984 U8 Reserved3; /* 0x07 */
1985} MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS,
1986 MPI2_POINTER PTR_MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS,
1987 Mpi2SasIOUnit7PortWidthModGroupSettings_t,
1988 MPI2_POINTER pMpi2SasIOUnit7PortWidthModGroupSettings_t;
1989
1990/* defines for Flags field */
1991#define MPI2_SASIOUNIT7_FLAGS_ENABLE_PORT_WIDTH_MODULATION (0x01)
1992
1993
1994/*
1995 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1996 * one and check the value returned for NumGroups at runtime.
1997 */
1998#ifndef MPI2_SAS_IOUNIT7_GROUP_MAX
1999#define MPI2_SAS_IOUNIT7_GROUP_MAX (1)
2000#endif
2001
2002typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_7 {
2003 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
2004 U8 SamplingInterval; /* 0x08 */
2005 U8 WindowLength; /* 0x09 */
2006 U16 Reserved1; /* 0x0A */
2007 U32 Reserved2; /* 0x0C */
2008 U32 Reserved3; /* 0x10 */
2009 U8 NumGroups; /* 0x14 */
2010 U8 Reserved4; /* 0x15 */
2011 U16 Reserved5; /* 0x16 */
2012 MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS
2013 PortWidthModulationGroupSettings[MPI2_SAS_IOUNIT7_GROUP_MAX]; /* 0x18 */
2014} MPI2_CONFIG_PAGE_SASIOUNIT_7,
2015 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_7,
2016 Mpi2SasIOUnitPage7_t, MPI2_POINTER pMpi2SasIOUnitPage7_t;
2017
2018#define MPI2_SASIOUNITPAGE7_PAGEVERSION (0x00)
2019
2020
2021/* SAS IO Unit Page 8 */
2022
2023typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_8 {
2024 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
2025 U32 Reserved1; /* 0x08 */
2026 U32 PowerManagementCapabilities;/* 0x0C */
2027 U32 Reserved2; /* 0x10 */
2028} MPI2_CONFIG_PAGE_SASIOUNIT_8,
2029 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_8,
2030 Mpi2SasIOUnitPage8_t, MPI2_POINTER pMpi2SasIOUnitPage8_t;
2031
2032#define MPI2_SASIOUNITPAGE8_PAGEVERSION (0x00)
2033
2034/* defines for PowerManagementCapabilities field */
2035#define MPI2_SASIOUNIT8_PM_HOST_PORT_WIDTH_MOD (0x000001000)
2036#define MPI2_SASIOUNIT8_PM_HOST_SAS_SLUMBER_MODE (0x000000800)
2037#define MPI2_SASIOUNIT8_PM_HOST_SAS_PARTIAL_MODE (0x000000400)
2038#define MPI2_SASIOUNIT8_PM_HOST_SATA_SLUMBER_MODE (0x000000200)
2039#define MPI2_SASIOUNIT8_PM_HOST_SATA_PARTIAL_MODE (0x000000100)
2040#define MPI2_SASIOUNIT8_PM_IOUNIT_PORT_WIDTH_MOD (0x000000010)
2041#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_SLUMBER_MODE (0x000000008)
2042#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_PARTIAL_MODE (0x000000004)
2043#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_SLUMBER_MODE (0x000000002)
2044#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_PARTIAL_MODE (0x000000001)
1896 2045
1897 2046
1898 2047
@@ -2182,7 +2331,7 @@ typedef struct _MPI2_SASPHY2_PHY_EVENT {
2182 2331
2183/* 2332/*
2184 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to 2333 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
2185 * one and check Header.ExtPageLength or NumPhyEvents at runtime. 2334 * one and check the value returned for NumPhyEvents at runtime.
2186 */ 2335 */
2187#ifndef MPI2_SASPHY2_PHY_EVENT_MAX 2336#ifndef MPI2_SASPHY2_PHY_EVENT_MAX
2188#define MPI2_SASPHY2_PHY_EVENT_MAX (1) 2337#define MPI2_SASPHY2_PHY_EVENT_MAX (1)
@@ -2274,7 +2423,7 @@ typedef struct _MPI2_SASPHY3_PHY_EVENT_CONFIG {
2274 2423
2275/* 2424/*
2276 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to 2425 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
2277 * one and check Header.ExtPageLength or NumPhyEvents at runtime. 2426 * one and check the value returned for NumPhyEvents at runtime.
2278 */ 2427 */
2279#ifndef MPI2_SASPHY3_PHY_EVENT_MAX 2428#ifndef MPI2_SASPHY3_PHY_EVENT_MAX
2280#define MPI2_SASPHY3_PHY_EVENT_MAX (1) 2429#define MPI2_SASPHY3_PHY_EVENT_MAX (1)
@@ -2385,7 +2534,7 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0
2385 2534
2386/* 2535/*
2387 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to 2536 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
2388 * one and check Header.ExtPageLength or NumPhys at runtime. 2537 * one and check the value returned for NumLogEntries at runtime.
2389 */ 2538 */
2390#ifndef MPI2_LOG_0_NUM_LOG_ENTRIES 2539#ifndef MPI2_LOG_0_NUM_LOG_ENTRIES
2391#define MPI2_LOG_0_NUM_LOG_ENTRIES (1) 2540#define MPI2_LOG_0_NUM_LOG_ENTRIES (1)
@@ -2435,7 +2584,7 @@ typedef struct _MPI2_CONFIG_PAGE_LOG_0
2435 2584
2436/* 2585/*
2437 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to 2586 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
2438 * one and check Header.ExtPageLength or NumPhys at runtime. 2587 * one and check the value returned for NumElements at runtime.
2439 */ 2588 */
2440#ifndef MPI2_RAIDCONFIG0_MAX_ELEMENTS 2589#ifndef MPI2_RAIDCONFIG0_MAX_ELEMENTS
2441#define MPI2_RAIDCONFIG0_MAX_ELEMENTS (1) 2590#define MPI2_RAIDCONFIG0_MAX_ELEMENTS (1)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
index 220bf65a9216..c4c99dfcb820 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
@@ -6,7 +6,7 @@
6 * Title: MPI SCSI initiator mode messages and structures 6 * Title: MPI SCSI initiator mode messages and structures
7 * Creation Date: June 23, 2006 7 * Creation Date: June 23, 2006
8 * 8 *
9 * mpi2_init.h Version: 02.00.08 9 * mpi2_init.h Version: 02.00.09
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -31,6 +31,7 @@
31 * both SCSI IO Error Reply and SCSI Task Management Reply. 31 * both SCSI IO Error Reply and SCSI Task Management Reply.
32 * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY. 32 * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY.
33 * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define. 33 * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
34 * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it.
34 * -------------------------------------------------------------------------- 35 * --------------------------------------------------------------------------
35 */ 36 */
36 37
@@ -57,20 +58,6 @@ typedef struct
57} MPI2_SCSI_IO_CDB_EEDP32, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_EEDP32, 58} MPI2_SCSI_IO_CDB_EEDP32, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_EEDP32,
58 Mpi2ScsiIoCdbEedp32_t, MPI2_POINTER pMpi2ScsiIoCdbEedp32_t; 59 Mpi2ScsiIoCdbEedp32_t, MPI2_POINTER pMpi2ScsiIoCdbEedp32_t;
59 60
60/* TBD: I don't think this is needed for MPI2/Gen2 */
61#if 0
62typedef struct
63{
64 U8 CDB[16]; /* 0x00 */
65 U32 DataLength; /* 0x10 */
66 U32 PrimaryReferenceTag; /* 0x14 */
67 U16 PrimaryApplicationTag; /* 0x18 */
68 U16 PrimaryApplicationTagMask; /* 0x1A */
69 U32 TransferLength; /* 0x1C */
70} MPI2_SCSI_IO32_CDB_EEDP16, MPI2_POINTER PTR_MPI2_SCSI_IO32_CDB_EEDP16,
71 Mpi2ScsiIo32CdbEedp16_t, MPI2_POINTER pMpi2ScsiIo32CdbEedp16_t;
72#endif
73
74typedef union 61typedef union
75{ 62{
76 U8 CDB32[32]; 63 U8 CDB32[32];
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index f18f114922ba..495bedc4d1f7 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -6,7 +6,7 @@
6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages 6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
7 * Creation Date: October 11, 2006 7 * Creation Date: October 11, 2006
8 * 8 *
9 * mpi2_ioc.h Version: 02.00.13 9 * mpi2_ioc.h Version: 02.00.14
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -98,6 +98,9 @@
98 * (MPI2_FW_HEADER_PID_). 98 * (MPI2_FW_HEADER_PID_).
99 * Modified values for SAS ProductID Family 99 * Modified values for SAS ProductID Family
100 * (MPI2_FW_HEADER_PID_FAMILY_). 100 * (MPI2_FW_HEADER_PID_FAMILY_).
101 * 02-10-10 02.00.14 Added SAS Quiesce Event structure and defines.
102 * Added PowerManagementControl Request structures and
103 * defines.
101 * -------------------------------------------------------------------------- 104 * --------------------------------------------------------------------------
102 */ 105 */
103 106
@@ -469,6 +472,7 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY
469#define MPI2_EVENT_SAS_PHY_COUNTER (0x0022) 472#define MPI2_EVENT_SAS_PHY_COUNTER (0x0022)
470#define MPI2_EVENT_GPIO_INTERRUPT (0x0023) 473#define MPI2_EVENT_GPIO_INTERRUPT (0x0023)
471#define MPI2_EVENT_HOST_BASED_DISCOVERY_PHY (0x0024) 474#define MPI2_EVENT_HOST_BASED_DISCOVERY_PHY (0x0024)
475#define MPI2_EVENT_SAS_QUIESCE (0x0025)
472 476
473 477
474/* Log Entry Added Event data */ 478/* Log Entry Added Event data */
@@ -895,6 +899,22 @@ typedef struct _MPI2_EVENT_DATA_SAS_PHY_COUNTER {
895 * */ 899 * */
896 900
897 901
902/* SAS Quiesce Event data */
903
904typedef struct _MPI2_EVENT_DATA_SAS_QUIESCE {
905 U8 ReasonCode; /* 0x00 */
906 U8 Reserved1; /* 0x01 */
907 U16 Reserved2; /* 0x02 */
908 U32 Reserved3; /* 0x04 */
909} MPI2_EVENT_DATA_SAS_QUIESCE,
910 MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_QUIESCE,
911 Mpi2EventDataSasQuiesce_t, MPI2_POINTER pMpi2EventDataSasQuiesce_t;
912
913/* SAS Quiesce Event data ReasonCode values */
914#define MPI2_EVENT_SAS_QUIESCE_RC_STARTED (0x01)
915#define MPI2_EVENT_SAS_QUIESCE_RC_COMPLETED (0x02)
916
917
898/* Host Based Discovery Phy Event data */ 918/* Host Based Discovery Phy Event data */
899 919
900typedef struct _MPI2_EVENT_HBD_PHY_SAS { 920typedef struct _MPI2_EVENT_HBD_PHY_SAS {
@@ -1006,6 +1026,7 @@ typedef struct _MPI2_FW_DOWNLOAD_REQUEST
1006#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_1 (0x07) 1026#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_1 (0x07)
1007#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_2 (0x08) 1027#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_2 (0x08)
1008#define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09) 1028#define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09)
1029#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A)
1009#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B) 1030#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
1010 1031
1011/* FWDownload TransactionContext Element */ 1032/* FWDownload TransactionContext Element */
@@ -1183,7 +1204,6 @@ typedef struct _MPI2_FW_IMAGE_HEADER
1183 1204
1184#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00) 1205#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00)
1185#define MPI2_FW_HEADER_PID_PROD_A (0x0000) 1206#define MPI2_FW_HEADER_PID_PROD_A (0x0000)
1186#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00)
1187#define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200) 1207#define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200)
1188#define MPI2_FW_HEADER_PID_PROD_IR_SCSI (0x0700) 1208#define MPI2_FW_HEADER_PID_PROD_IR_SCSI (0x0700)
1189 1209
@@ -1407,5 +1427,100 @@ typedef struct _MPI2_INIT_IMAGE_FOOTER
1407#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14) 1427#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14)
1408 1428
1409 1429
1430/****************************************************************************
1431* PowerManagementControl message
1432****************************************************************************/
1433
1434/* PowerManagementControl Request message */
1435typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST {
1436 U8 Feature; /* 0x00 */
1437 U8 Reserved1; /* 0x01 */
1438 U8 ChainOffset; /* 0x02 */
1439 U8 Function; /* 0x03 */
1440 U16 Reserved2; /* 0x04 */
1441 U8 Reserved3; /* 0x06 */
1442 U8 MsgFlags; /* 0x07 */
1443 U8 VP_ID; /* 0x08 */
1444 U8 VF_ID; /* 0x09 */
1445 U16 Reserved4; /* 0x0A */
1446 U8 Parameter1; /* 0x0C */
1447 U8 Parameter2; /* 0x0D */
1448 U8 Parameter3; /* 0x0E */
1449 U8 Parameter4; /* 0x0F */
1450 U32 Reserved5; /* 0x10 */
1451 U32 Reserved6; /* 0x14 */
1452} MPI2_PWR_MGMT_CONTROL_REQUEST, MPI2_POINTER PTR_MPI2_PWR_MGMT_CONTROL_REQUEST,
1453 Mpi2PwrMgmtControlRequest_t, MPI2_POINTER pMpi2PwrMgmtControlRequest_t;
1454
1455/* defines for the Feature field */
1456#define MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND (0x01)
1457#define MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION (0x02)
1458#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK (0x03)
1459#define MPI2_PM_CONTROL_FEATURE_IOC_SPEED (0x04)
1460#define MPI2_PM_CONTROL_FEATURE_MIN_PRODUCT_SPECIFIC (0x80)
1461#define MPI2_PM_CONTROL_FEATURE_MAX_PRODUCT_SPECIFIC (0xFF)
1462
1463/* parameter usage for the MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND Feature */
1464/* Parameter1 contains a PHY number */
1465/* Parameter2 indicates power condition action using these defines */
1466#define MPI2_PM_CONTROL_PARAM2_PARTIAL (0x01)
1467#define MPI2_PM_CONTROL_PARAM2_SLUMBER (0x02)
1468#define MPI2_PM_CONTROL_PARAM2_EXIT_PWR_MGMT (0x03)
1469/* Parameter3 and Parameter4 are reserved */
1470
1471/* parameter usage for the MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION
1472 * Feature */
1473/* Parameter1 contains SAS port width modulation group number */
1474/* Parameter2 indicates IOC action using these defines */
1475#define MPI2_PM_CONTROL_PARAM2_REQUEST_OWNERSHIP (0x01)
1476#define MPI2_PM_CONTROL_PARAM2_CHANGE_MODULATION (0x02)
1477#define MPI2_PM_CONTROL_PARAM2_RELINQUISH_OWNERSHIP (0x03)
1478/* Parameter3 indicates desired modulation level using these defines */
1479#define MPI2_PM_CONTROL_PARAM3_25_PERCENT (0x00)
1480#define MPI2_PM_CONTROL_PARAM3_50_PERCENT (0x01)
1481#define MPI2_PM_CONTROL_PARAM3_75_PERCENT (0x02)
1482#define MPI2_PM_CONTROL_PARAM3_100_PERCENT (0x03)
1483/* Parameter4 is reserved */
1484
1485/* parameter usage for the MPI2_PM_CONTROL_FEATURE_PCIE_LINK Feature */
1486/* Parameter1 indicates desired PCIe link speed using these defines */
1487#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS (0x00)
1488#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS (0x01)
1489#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS (0x02)
1490/* Parameter2 indicates desired PCIe link width using these defines */
1491#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1 (0x01)
1492#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2 (0x02)
1493#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4 (0x04)
1494#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8 (0x08)
1495/* Parameter3 and Parameter4 are reserved */
1496
1497/* parameter usage for the MPI2_PM_CONTROL_FEATURE_IOC_SPEED Feature */
1498/* Parameter1 indicates desired IOC hardware clock speed using these defines */
1499#define MPI2_PM_CONTROL_PARAM1_FULL_IOC_SPEED (0x01)
1500#define MPI2_PM_CONTROL_PARAM1_HALF_IOC_SPEED (0x02)
1501#define MPI2_PM_CONTROL_PARAM1_QUARTER_IOC_SPEED (0x04)
1502#define MPI2_PM_CONTROL_PARAM1_EIGHTH_IOC_SPEED (0x08)
1503/* Parameter2, Parameter3, and Parameter4 are reserved */
1504
1505
1506/* PowerManagementControl Reply message */
1507typedef struct _MPI2_PWR_MGMT_CONTROL_REPLY {
1508 U8 Feature; /* 0x00 */
1509 U8 Reserved1; /* 0x01 */
1510 U8 MsgLength; /* 0x02 */
1511 U8 Function; /* 0x03 */
1512 U16 Reserved2; /* 0x04 */
1513 U8 Reserved3; /* 0x06 */
1514 U8 MsgFlags; /* 0x07 */
1515 U8 VP_ID; /* 0x08 */
1516 U8 VF_ID; /* 0x09 */
1517 U16 Reserved4; /* 0x0A */
1518 U16 Reserved5; /* 0x0C */
1519 U16 IOCStatus; /* 0x0E */
1520 U32 IOCLogInfo; /* 0x10 */
1521} MPI2_PWR_MGMT_CONTROL_REPLY, MPI2_POINTER PTR_MPI2_PWR_MGMT_CONTROL_REPLY,
1522 Mpi2PwrMgmtControlReply_t, MPI2_POINTER pMpi2PwrMgmtControlReply_t;
1523
1524
1410#endif 1525#endif
1411 1526
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 0ec1ed389c20..57bcd5c9dcff 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -95,6 +95,10 @@ int mpt2sas_fwfault_debug;
95MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault " 95MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
96 "and halt firmware - (default=0)"); 96 "and halt firmware - (default=0)");
97 97
98static int disable_discovery = -1;
99module_param(disable_discovery, int, 0);
100MODULE_PARM_DESC(disable_discovery, " disable discovery ");
101
98/** 102/**
99 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. 103 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
100 * 104 *
@@ -1238,7 +1242,7 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1238 u64 pio_chip = 0; 1242 u64 pio_chip = 0;
1239 u64 chip_phys = 0; 1243 u64 chip_phys = 0;
1240 1244
1241 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", 1245 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n",
1242 ioc->name, __func__)); 1246 ioc->name, __func__));
1243 1247
1244 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); 1248 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
@@ -1307,6 +1311,9 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1307 printk(MPT2SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n", 1311 printk(MPT2SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n",
1308 ioc->name, (unsigned long long)pio_chip, pio_sz); 1312 ioc->name, (unsigned long long)pio_chip, pio_sz);
1309 1313
1314 /* Save PCI configuration state for recovery from PCI AER/EEH errors */
1315 pci_save_state(pdev);
1316
1310 return 0; 1317 return 0;
1311 1318
1312 out_fail: 1319 out_fail:
@@ -1861,7 +1868,7 @@ _base_static_config_pages(struct MPT2SAS_ADAPTER *ioc)
1861static void 1868static void
1862_base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc) 1869_base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
1863{ 1870{
1864 dexitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, 1871 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1865 __func__)); 1872 __func__));
1866 1873
1867 if (ioc->request) { 1874 if (ioc->request) {
@@ -1947,7 +1954,7 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
1947 u32 retry_sz; 1954 u32 retry_sz;
1948 u16 max_request_credit; 1955 u16 max_request_credit;
1949 1956
1950 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, 1957 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1951 __func__)); 1958 __func__));
1952 1959
1953 retry_sz = 0; 1960 retry_sz = 0;
@@ -2374,7 +2381,7 @@ _base_wait_for_doorbell_int(struct MPT2SAS_ADAPTER *ioc, int timeout,
2374 do { 2381 do {
2375 int_status = readl(&ioc->chip->HostInterruptStatus); 2382 int_status = readl(&ioc->chip->HostInterruptStatus);
2376 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 2383 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
2377 dhsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " 2384 dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
2378 "successfull count(%d), timeout(%d)\n", ioc->name, 2385 "successfull count(%d), timeout(%d)\n", ioc->name,
2379 __func__, count, timeout)); 2386 __func__, count, timeout));
2380 return 0; 2387 return 0;
@@ -2415,7 +2422,7 @@ _base_wait_for_doorbell_ack(struct MPT2SAS_ADAPTER *ioc, int timeout,
2415 do { 2422 do {
2416 int_status = readl(&ioc->chip->HostInterruptStatus); 2423 int_status = readl(&ioc->chip->HostInterruptStatus);
2417 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) { 2424 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
2418 dhsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " 2425 dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
2419 "successfull count(%d), timeout(%d)\n", ioc->name, 2426 "successfull count(%d), timeout(%d)\n", ioc->name,
2420 __func__, count, timeout)); 2427 __func__, count, timeout));
2421 return 0; 2428 return 0;
@@ -2463,7 +2470,7 @@ _base_wait_for_doorbell_not_used(struct MPT2SAS_ADAPTER *ioc, int timeout,
2463 do { 2470 do {
2464 doorbell_reg = readl(&ioc->chip->Doorbell); 2471 doorbell_reg = readl(&ioc->chip->Doorbell);
2465 if (!(doorbell_reg & MPI2_DOORBELL_USED)) { 2472 if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
2466 dhsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " 2473 dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
2467 "successfull count(%d), timeout(%d)\n", ioc->name, 2474 "successfull count(%d), timeout(%d)\n", ioc->name,
2468 __func__, count, timeout)); 2475 __func__, count, timeout));
2469 return 0; 2476 return 0;
@@ -2637,9 +2644,9 @@ _base_handshake_req_reply_wait(struct MPT2SAS_ADAPTER *ioc, int request_bytes,
2637 2644
2638 if (ioc->logging_level & MPT_DEBUG_INIT) { 2645 if (ioc->logging_level & MPT_DEBUG_INIT) {
2639 mfp = (u32 *)reply; 2646 mfp = (u32 *)reply;
2640 printk(KERN_DEBUG "\toffset:data\n"); 2647 printk(KERN_INFO "\toffset:data\n");
2641 for (i = 0; i < reply_bytes/4; i++) 2648 for (i = 0; i < reply_bytes/4; i++)
2642 printk(KERN_DEBUG "\t[0x%02x]:%08x\n", i*4, 2649 printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4,
2643 le32_to_cpu(mfp[i])); 2650 le32_to_cpu(mfp[i]));
2644 } 2651 }
2645 return 0; 2652 return 0;
@@ -2672,7 +2679,7 @@ mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc,
2672 void *request; 2679 void *request;
2673 u16 wait_state_count; 2680 u16 wait_state_count;
2674 2681
2675 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, 2682 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2676 __func__)); 2683 __func__));
2677 2684
2678 mutex_lock(&ioc->base_cmds.mutex); 2685 mutex_lock(&ioc->base_cmds.mutex);
@@ -2777,7 +2784,7 @@ mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc,
2777 void *request; 2784 void *request;
2778 u16 wait_state_count; 2785 u16 wait_state_count;
2779 2786
2780 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, 2787 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2781 __func__)); 2788 __func__));
2782 2789
2783 mutex_lock(&ioc->base_cmds.mutex); 2790 mutex_lock(&ioc->base_cmds.mutex);
@@ -2865,7 +2872,7 @@ _base_get_port_facts(struct MPT2SAS_ADAPTER *ioc, int port, int sleep_flag)
2865 Mpi2PortFactsReply_t mpi_reply, *pfacts; 2872 Mpi2PortFactsReply_t mpi_reply, *pfacts;
2866 int mpi_reply_sz, mpi_request_sz, r; 2873 int mpi_reply_sz, mpi_request_sz, r;
2867 2874
2868 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, 2875 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2869 __func__)); 2876 __func__));
2870 2877
2871 mpi_reply_sz = sizeof(Mpi2PortFactsReply_t); 2878 mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
@@ -2907,7 +2914,7 @@ _base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2907 Mpi2IOCFactsReply_t mpi_reply, *facts; 2914 Mpi2IOCFactsReply_t mpi_reply, *facts;
2908 int mpi_reply_sz, mpi_request_sz, r; 2915 int mpi_reply_sz, mpi_request_sz, r;
2909 2916
2910 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, 2917 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2911 __func__)); 2918 __func__));
2912 2919
2913 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t); 2920 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
@@ -2979,7 +2986,7 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2979 struct timeval current_time; 2986 struct timeval current_time;
2980 u16 ioc_status; 2987 u16 ioc_status;
2981 2988
2982 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, 2989 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2983 __func__)); 2990 __func__));
2984 2991
2985 memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t)); 2992 memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
@@ -3040,9 +3047,9 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3040 int i; 3047 int i;
3041 3048
3042 mfp = (u32 *)&mpi_request; 3049 mfp = (u32 *)&mpi_request;
3043 printk(KERN_DEBUG "\toffset:data\n"); 3050 printk(KERN_INFO "\toffset:data\n");
3044 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++) 3051 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
3045 printk(KERN_DEBUG "\t[0x%02x]:%08x\n", i*4, 3052 printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4,
3046 le32_to_cpu(mfp[i])); 3053 le32_to_cpu(mfp[i]));
3047 } 3054 }
3048 3055
@@ -3121,7 +3128,7 @@ _base_send_port_enable(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3121 r = -ETIME; 3128 r = -ETIME;
3122 goto out; 3129 goto out;
3123 } else 3130 } else
3124 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: complete\n", 3131 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: complete\n",
3125 ioc->name, __func__)); 3132 ioc->name, __func__));
3126 3133
3127 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_OPERATIONAL, 3134 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_OPERATIONAL,
@@ -3181,7 +3188,7 @@ _base_event_notification(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3181 int r = 0; 3188 int r = 0;
3182 int i; 3189 int i;
3183 3190
3184 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, 3191 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3185 __func__)); 3192 __func__));
3186 3193
3187 if (ioc->base_cmds.status & MPT2_CMD_PENDING) { 3194 if (ioc->base_cmds.status & MPT2_CMD_PENDING) {
@@ -3219,7 +3226,7 @@ _base_event_notification(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3219 else 3226 else
3220 r = -ETIME; 3227 r = -ETIME;
3221 } else 3228 } else
3222 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: complete\n", 3229 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: complete\n",
3223 ioc->name, __func__)); 3230 ioc->name, __func__));
3224 ioc->base_cmds.status = MPT2_CMD_NOT_USED; 3231 ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3225 return r; 3232 return r;
@@ -3281,7 +3288,7 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3281 3288
3282 _base_save_msix_table(ioc); 3289 _base_save_msix_table(ioc);
3283 3290
3284 drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "clear interrupts\n", 3291 drsprintk(ioc, printk(MPT2SAS_INFO_FMT "clear interrupts\n",
3285 ioc->name)); 3292 ioc->name));
3286 3293
3287 count = 0; 3294 count = 0;
@@ -3289,7 +3296,7 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3289 /* Write magic sequence to WriteSequence register 3296 /* Write magic sequence to WriteSequence register
3290 * Loop until in diagnostic mode 3297 * Loop until in diagnostic mode
3291 */ 3298 */
3292 drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "write magic " 3299 drsprintk(ioc, printk(MPT2SAS_INFO_FMT "write magic "
3293 "sequence\n", ioc->name)); 3300 "sequence\n", ioc->name));
3294 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); 3301 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
3295 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence); 3302 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
@@ -3309,7 +3316,7 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3309 goto out; 3316 goto out;
3310 3317
3311 host_diagnostic = readl(&ioc->chip->HostDiagnostic); 3318 host_diagnostic = readl(&ioc->chip->HostDiagnostic);
3312 drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "wrote magic " 3319 drsprintk(ioc, printk(MPT2SAS_INFO_FMT "wrote magic "
3313 "sequence: count(%d), host_diagnostic(0x%08x)\n", 3320 "sequence: count(%d), host_diagnostic(0x%08x)\n",
3314 ioc->name, count, host_diagnostic)); 3321 ioc->name, count, host_diagnostic));
3315 3322
@@ -3317,7 +3324,7 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3317 3324
3318 hcb_size = readl(&ioc->chip->HCBSize); 3325 hcb_size = readl(&ioc->chip->HCBSize);
3319 3326
3320 drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "diag reset: issued\n", 3327 drsprintk(ioc, printk(MPT2SAS_INFO_FMT "diag reset: issued\n",
3321 ioc->name)); 3328 ioc->name));
3322 writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER, 3329 writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
3323 &ioc->chip->HostDiagnostic); 3330 &ioc->chip->HostDiagnostic);
@@ -3344,29 +3351,29 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3344 3351
3345 if (host_diagnostic & MPI2_DIAG_HCB_MODE) { 3352 if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
3346 3353
3347 drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "restart the adapter " 3354 drsprintk(ioc, printk(MPT2SAS_INFO_FMT "restart the adapter "
3348 "assuming the HCB Address points to good F/W\n", 3355 "assuming the HCB Address points to good F/W\n",
3349 ioc->name)); 3356 ioc->name));
3350 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK; 3357 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
3351 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW; 3358 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
3352 writel(host_diagnostic, &ioc->chip->HostDiagnostic); 3359 writel(host_diagnostic, &ioc->chip->HostDiagnostic);
3353 3360
3354 drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT 3361 drsprintk(ioc, printk(MPT2SAS_INFO_FMT
3355 "re-enable the HCDW\n", ioc->name)); 3362 "re-enable the HCDW\n", ioc->name));
3356 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE, 3363 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
3357 &ioc->chip->HCBSize); 3364 &ioc->chip->HCBSize);
3358 } 3365 }
3359 3366
3360 drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "restart the adapter\n", 3367 drsprintk(ioc, printk(MPT2SAS_INFO_FMT "restart the adapter\n",
3361 ioc->name)); 3368 ioc->name));
3362 writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET, 3369 writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
3363 &ioc->chip->HostDiagnostic); 3370 &ioc->chip->HostDiagnostic);
3364 3371
3365 drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "disable writes to the " 3372 drsprintk(ioc, printk(MPT2SAS_INFO_FMT "disable writes to the "
3366 "diagnostic register\n", ioc->name)); 3373 "diagnostic register\n", ioc->name));
3367 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); 3374 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
3368 3375
3369 drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "Wait for FW to go to the " 3376 drsprintk(ioc, printk(MPT2SAS_INFO_FMT "Wait for FW to go to the "
3370 "READY state\n", ioc->name)); 3377 "READY state\n", ioc->name));
3371 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20, 3378 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20,
3372 sleep_flag); 3379 sleep_flag);
@@ -3398,19 +3405,23 @@ _base_make_ioc_ready(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
3398 enum reset_type type) 3405 enum reset_type type)
3399{ 3406{
3400 u32 ioc_state; 3407 u32 ioc_state;
3408 int rc;
3401 3409
3402 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, 3410 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3403 __func__)); 3411 __func__));
3404 3412
3413 if (ioc->pci_error_recovery)
3414 return 0;
3415
3405 ioc_state = mpt2sas_base_get_iocstate(ioc, 0); 3416 ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
3406 dhsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: ioc_state(0x%08x)\n", 3417 dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: ioc_state(0x%08x)\n",
3407 ioc->name, __func__, ioc_state)); 3418 ioc->name, __func__, ioc_state));
3408 3419
3409 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) 3420 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
3410 return 0; 3421 return 0;
3411 3422
3412 if (ioc_state & MPI2_DOORBELL_USED) { 3423 if (ioc_state & MPI2_DOORBELL_USED) {
3413 dhsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "unexpected doorbell " 3424 dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "unexpected doorbell "
3414 "active!\n", ioc->name)); 3425 "active!\n", ioc->name));
3415 goto issue_diag_reset; 3426 goto issue_diag_reset;
3416 } 3427 }
@@ -3426,11 +3437,15 @@ _base_make_ioc_ready(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
3426 3437
3427 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL) 3438 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
3428 if (!(_base_send_ioc_reset(ioc, 3439 if (!(_base_send_ioc_reset(ioc,
3429 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) 3440 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) {
3441 ioc->ioc_reset_count++;
3430 return 0; 3442 return 0;
3443 }
3431 3444
3432 issue_diag_reset: 3445 issue_diag_reset:
3433 return _base_diag_reset(ioc, CAN_SLEEP); 3446 rc = _base_diag_reset(ioc, CAN_SLEEP);
3447 ioc->ioc_reset_count++;
3448 return rc;
3434} 3449}
3435 3450
3436/** 3451/**
@@ -3449,7 +3464,7 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3449 u16 smid; 3464 u16 smid;
3450 struct _tr_list *delayed_tr, *delayed_tr_next; 3465 struct _tr_list *delayed_tr, *delayed_tr_next;
3451 3466
3452 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, 3467 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3453 __func__)); 3468 __func__));
3454 3469
3455 /* clean the delayed target reset list */ 3470 /* clean the delayed target reset list */
@@ -3459,6 +3474,12 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3459 kfree(delayed_tr); 3474 kfree(delayed_tr);
3460 } 3475 }
3461 3476
3477 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
3478 &ioc->delayed_tr_volume_list, list) {
3479 list_del(&delayed_tr->list);
3480 kfree(delayed_tr);
3481 }
3482
3462 /* initialize the scsi lookup free list */ 3483 /* initialize the scsi lookup free list */
3463 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 3484 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3464 INIT_LIST_HEAD(&ioc->free_list); 3485 INIT_LIST_HEAD(&ioc->free_list);
@@ -3520,6 +3541,13 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3520 if (sleep_flag == CAN_SLEEP) 3541 if (sleep_flag == CAN_SLEEP)
3521 _base_static_config_pages(ioc); 3542 _base_static_config_pages(ioc);
3522 3543
3544 if (ioc->wait_for_port_enable_to_complete) {
3545 if (diag_buffer_enable != 0)
3546 mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable);
3547 if (disable_discovery > 0)
3548 return r;
3549 }
3550
3523 r = _base_send_port_enable(ioc, sleep_flag); 3551 r = _base_send_port_enable(ioc, sleep_flag);
3524 if (r) 3552 if (r)
3525 return r; 3553 return r;
@@ -3538,7 +3566,7 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
3538{ 3566{
3539 struct pci_dev *pdev = ioc->pdev; 3567 struct pci_dev *pdev = ioc->pdev;
3540 3568
3541 dexitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, 3569 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3542 __func__)); 3570 __func__));
3543 3571
3544 _base_mask_interrupts(ioc); 3572 _base_mask_interrupts(ioc);
@@ -3571,7 +3599,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3571{ 3599{
3572 int r, i; 3600 int r, i;
3573 3601
3574 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, 3602 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3575 __func__)); 3603 __func__));
3576 3604
3577 r = mpt2sas_base_map_resources(ioc); 3605 r = mpt2sas_base_map_resources(ioc);
@@ -3606,6 +3634,17 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3606 3634
3607 init_waitqueue_head(&ioc->reset_wq); 3635 init_waitqueue_head(&ioc->reset_wq);
3608 3636
3637 /* allocate memory pd handle bitmask list */
3638 ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
3639 if (ioc->facts.MaxDevHandle % 8)
3640 ioc->pd_handles_sz++;
3641 ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
3642 GFP_KERNEL);
3643 if (!ioc->pd_handles) {
3644 r = -ENOMEM;
3645 goto out_free_resources;
3646 }
3647
3609 ioc->fwfault_debug = mpt2sas_fwfault_debug; 3648 ioc->fwfault_debug = mpt2sas_fwfault_debug;
3610 3649
3611 /* base internal command bits */ 3650 /* base internal command bits */
@@ -3635,11 +3674,20 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3635 3674
3636 /* ctl module internal command bits */ 3675 /* ctl module internal command bits */
3637 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 3676 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3677 ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
3638 ioc->ctl_cmds.status = MPT2_CMD_NOT_USED; 3678 ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
3639 mutex_init(&ioc->ctl_cmds.mutex); 3679 mutex_init(&ioc->ctl_cmds.mutex);
3640 3680
3641 if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply || 3681 if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
3642 !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply || 3682 !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
3683 !ioc->config_cmds.reply || !ioc->ctl_cmds.reply ||
3684 !ioc->ctl_cmds.sense) {
3685 r = -ENOMEM;
3686 goto out_free_resources;
3687 }
3688
3689 if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
3690 !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
3643 !ioc->config_cmds.reply || !ioc->ctl_cmds.reply) { 3691 !ioc->config_cmds.reply || !ioc->ctl_cmds.reply) {
3644 r = -ENOMEM; 3692 r = -ENOMEM;
3645 goto out_free_resources; 3693 goto out_free_resources;
@@ -3667,8 +3715,6 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3667 goto out_free_resources; 3715 goto out_free_resources;
3668 3716
3669 mpt2sas_base_start_watchdog(ioc); 3717 mpt2sas_base_start_watchdog(ioc);
3670 if (diag_buffer_enable != 0)
3671 mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable);
3672 return 0; 3718 return 0;
3673 3719
3674 out_free_resources: 3720 out_free_resources:
@@ -3677,12 +3723,14 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3677 mpt2sas_base_free_resources(ioc); 3723 mpt2sas_base_free_resources(ioc);
3678 _base_release_memory_pools(ioc); 3724 _base_release_memory_pools(ioc);
3679 pci_set_drvdata(ioc->pdev, NULL); 3725 pci_set_drvdata(ioc->pdev, NULL);
3726 kfree(ioc->pd_handles);
3680 kfree(ioc->tm_cmds.reply); 3727 kfree(ioc->tm_cmds.reply);
3681 kfree(ioc->transport_cmds.reply); 3728 kfree(ioc->transport_cmds.reply);
3682 kfree(ioc->scsih_cmds.reply); 3729 kfree(ioc->scsih_cmds.reply);
3683 kfree(ioc->config_cmds.reply); 3730 kfree(ioc->config_cmds.reply);
3684 kfree(ioc->base_cmds.reply); 3731 kfree(ioc->base_cmds.reply);
3685 kfree(ioc->ctl_cmds.reply); 3732 kfree(ioc->ctl_cmds.reply);
3733 kfree(ioc->ctl_cmds.sense);
3686 kfree(ioc->pfacts); 3734 kfree(ioc->pfacts);
3687 ioc->ctl_cmds.reply = NULL; 3735 ioc->ctl_cmds.reply = NULL;
3688 ioc->base_cmds.reply = NULL; 3736 ioc->base_cmds.reply = NULL;
@@ -3705,15 +3753,17 @@ void
3705mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc) 3753mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
3706{ 3754{
3707 3755
3708 dexitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, 3756 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3709 __func__)); 3757 __func__));
3710 3758
3711 mpt2sas_base_stop_watchdog(ioc); 3759 mpt2sas_base_stop_watchdog(ioc);
3712 mpt2sas_base_free_resources(ioc); 3760 mpt2sas_base_free_resources(ioc);
3713 _base_release_memory_pools(ioc); 3761 _base_release_memory_pools(ioc);
3714 pci_set_drvdata(ioc->pdev, NULL); 3762 pci_set_drvdata(ioc->pdev, NULL);
3763 kfree(ioc->pd_handles);
3715 kfree(ioc->pfacts); 3764 kfree(ioc->pfacts);
3716 kfree(ioc->ctl_cmds.reply); 3765 kfree(ioc->ctl_cmds.reply);
3766 kfree(ioc->ctl_cmds.sense);
3717 kfree(ioc->base_cmds.reply); 3767 kfree(ioc->base_cmds.reply);
3718 kfree(ioc->tm_cmds.reply); 3768 kfree(ioc->tm_cmds.reply);
3719 kfree(ioc->transport_cmds.reply); 3769 kfree(ioc->transport_cmds.reply);
@@ -3738,11 +3788,11 @@ _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
3738{ 3788{
3739 switch (reset_phase) { 3789 switch (reset_phase) {
3740 case MPT2_IOC_PRE_RESET: 3790 case MPT2_IOC_PRE_RESET:
3741 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " 3791 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
3742 "MPT2_IOC_PRE_RESET\n", ioc->name, __func__)); 3792 "MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
3743 break; 3793 break;
3744 case MPT2_IOC_AFTER_RESET: 3794 case MPT2_IOC_AFTER_RESET:
3745 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " 3795 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
3746 "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__)); 3796 "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
3747 if (ioc->transport_cmds.status & MPT2_CMD_PENDING) { 3797 if (ioc->transport_cmds.status & MPT2_CMD_PENDING) {
3748 ioc->transport_cmds.status |= MPT2_CMD_RESET; 3798 ioc->transport_cmds.status |= MPT2_CMD_RESET;
@@ -3762,7 +3812,7 @@ _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
3762 } 3812 }
3763 break; 3813 break;
3764 case MPT2_IOC_DONE_RESET: 3814 case MPT2_IOC_DONE_RESET:
3765 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " 3815 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
3766 "MPT2_IOC_DONE_RESET\n", ioc->name, __func__)); 3816 "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
3767 break; 3817 break;
3768 } 3818 }
@@ -3804,7 +3854,7 @@ _wait_for_commands_to_complete(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3804 return; 3854 return;
3805 3855
3806 /* wait for pending commands to complete */ 3856 /* wait for pending commands to complete */
3807 wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 3 * HZ); 3857 wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
3808} 3858}
3809 3859
3810/** 3860/**
@@ -3822,19 +3872,37 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
3822 int r; 3872 int r;
3823 unsigned long flags; 3873 unsigned long flags;
3824 3874
3825 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name, 3875 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
3826 __func__)); 3876 __func__));
3827 3877
3878 if (ioc->pci_error_recovery) {
3879 printk(MPT2SAS_ERR_FMT "%s: pci error recovery reset\n",
3880 ioc->name, __func__);
3881 r = 0;
3882 goto out;
3883 }
3884
3828 if (mpt2sas_fwfault_debug) 3885 if (mpt2sas_fwfault_debug)
3829 mpt2sas_halt_firmware(ioc); 3886 mpt2sas_halt_firmware(ioc);
3830 3887
3831 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 3888 /* TODO - What we really should be doing is pulling
3832 if (ioc->shost_recovery) { 3889 * out all the code associated with NO_SLEEP; its never used.
3833 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 3890 * That is legacy code from mpt fusion driver, ported over.
3834 printk(MPT2SAS_ERR_FMT "%s: busy\n", 3891 * I will leave this BUG_ON here for now till its been resolved.
3835 ioc->name, __func__); 3892 */
3836 return -EBUSY; 3893 BUG_ON(sleep_flag == NO_SLEEP);
3894
3895 /* wait for an active reset in progress to complete */
3896 if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
3897 do {
3898 ssleep(1);
3899 } while (ioc->shost_recovery == 1);
3900 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit\n", ioc->name,
3901 __func__));
3902 return ioc->ioc_reset_in_progress_status;
3837 } 3903 }
3904
3905 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
3838 ioc->shost_recovery = 1; 3906 ioc->shost_recovery = 1;
3839 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 3907 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
3840 3908
@@ -3849,13 +3917,17 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
3849 if (!r) 3917 if (!r)
3850 _base_reset_handler(ioc, MPT2_IOC_DONE_RESET); 3918 _base_reset_handler(ioc, MPT2_IOC_DONE_RESET);
3851 out: 3919 out:
3852 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: %s\n", 3920 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: %s\n",
3853 ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED"))); 3921 ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
3854 3922
3855 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 3923 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
3924 ioc->ioc_reset_in_progress_status = r;
3856 ioc->shost_recovery = 0; 3925 ioc->shost_recovery = 0;
3857 complete(&ioc->shost_recovery_done); 3926 complete(&ioc->shost_recovery_done);
3858 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 3927 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
3928 mutex_unlock(&ioc->reset_in_progress_mutex);
3859 3929
3930 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit\n", ioc->name,
3931 __func__));
3860 return r; 3932 return r;
3861} 3933}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index b4afe431ac1e..0ebef0c0d949 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,11 +69,11 @@
69#define MPT2SAS_DRIVER_NAME "mpt2sas" 69#define MPT2SAS_DRIVER_NAME "mpt2sas"
70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" 70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" 71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
72#define MPT2SAS_DRIVER_VERSION "05.100.00.02" 72#define MPT2SAS_DRIVER_VERSION "06.100.00.00"
73#define MPT2SAS_MAJOR_VERSION 05 73#define MPT2SAS_MAJOR_VERSION 06
74#define MPT2SAS_MINOR_VERSION 100 74#define MPT2SAS_MINOR_VERSION 100
75#define MPT2SAS_BUILD_VERSION 00 75#define MPT2SAS_BUILD_VERSION 00
76#define MPT2SAS_RELEASE_VERSION 02 76#define MPT2SAS_RELEASE_VERSION 00
77 77
78/* 78/*
79 * Set MPT2SAS_SG_DEPTH value based on user input. 79 * Set MPT2SAS_SG_DEPTH value based on user input.
@@ -124,7 +124,6 @@
124 * logging format 124 * logging format
125 */ 125 */
126#define MPT2SAS_FMT "%s: " 126#define MPT2SAS_FMT "%s: "
127#define MPT2SAS_DEBUG_FMT KERN_DEBUG MPT2SAS_FMT
128#define MPT2SAS_INFO_FMT KERN_INFO MPT2SAS_FMT 127#define MPT2SAS_INFO_FMT KERN_INFO MPT2SAS_FMT
129#define MPT2SAS_NOTE_FMT KERN_NOTICE MPT2SAS_FMT 128#define MPT2SAS_NOTE_FMT KERN_NOTICE MPT2SAS_FMT
130#define MPT2SAS_WARN_FMT KERN_WARNING MPT2SAS_FMT 129#define MPT2SAS_WARN_FMT KERN_WARNING MPT2SAS_FMT
@@ -248,6 +247,7 @@ struct MPT2SAS_DEVICE {
248 * @mutex: mutex 247 * @mutex: mutex
249 * @done: completion 248 * @done: completion
250 * @reply: reply message pointer 249 * @reply: reply message pointer
250 * @sense: sense data
251 * @status: MPT2_CMD_XXX status 251 * @status: MPT2_CMD_XXX status
252 * @smid: system message id 252 * @smid: system message id
253 */ 253 */
@@ -255,6 +255,7 @@ struct _internal_cmd {
255 struct mutex mutex; 255 struct mutex mutex;
256 struct completion done; 256 struct completion done;
257 void *reply; 257 void *reply;
258 void *sense;
258 u16 status; 259 u16 status;
259 u16 smid; 260 u16 smid;
260}; 261};
@@ -276,7 +277,7 @@ struct _internal_cmd {
276 * @id: target id 277 * @id: target id
277 * @channel: target channel 278 * @channel: target channel
278 * @slot: number number 279 * @slot: number number
279 * @hidden_raid_component: set to 1 when this is a raid member 280 * @phy: phy identifier provided in sas device page 0
280 * @responding: used in _scsih_sas_device_mark_responding 281 * @responding: used in _scsih_sas_device_mark_responding
281 */ 282 */
282struct _sas_device { 283struct _sas_device {
@@ -294,7 +295,7 @@ struct _sas_device {
294 int id; 295 int id;
295 int channel; 296 int channel;
296 u16 slot; 297 u16 slot;
297 u8 hidden_raid_component; 298 u8 phy;
298 u8 responding; 299 u8 responding;
299}; 300};
300 301
@@ -476,6 +477,7 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
476 * @ioc_link_reset_in_progress: phy/hard reset in progress 477 * @ioc_link_reset_in_progress: phy/hard reset in progress
477 * @ignore_loginfos: ignore loginfos during task managment 478 * @ignore_loginfos: ignore loginfos during task managment
478 * @remove_host: flag for when driver unloads, to avoid sending dev resets 479 * @remove_host: flag for when driver unloads, to avoid sending dev resets
480 * @pci_error_recovery: flag to prevent ioc access until slot reset completes
479 * @wait_for_port_enable_to_complete: 481 * @wait_for_port_enable_to_complete:
480 * @msix_enable: flag indicating msix is enabled 482 * @msix_enable: flag indicating msix is enabled
481 * @msix_vector_count: number msix vectors 483 * @msix_vector_count: number msix vectors
@@ -488,6 +490,8 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
488 * @ctl_cb_idx: clt internal commands 490 * @ctl_cb_idx: clt internal commands
489 * @base_cb_idx: base internal commands 491 * @base_cb_idx: base internal commands
490 * @config_cb_idx: base internal commands 492 * @config_cb_idx: base internal commands
493 * @tm_tr_cb_idx : device removal target reset handshake
494 * @tm_tr_volume_cb_idx : volume removal target reset
491 * @base_cmds: 495 * @base_cmds:
492 * @transport_cmds: 496 * @transport_cmds:
493 * @scsih_cmds: 497 * @scsih_cmds:
@@ -516,6 +520,9 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
516 * @sas_device_lock: 520 * @sas_device_lock:
517 * @io_missing_delay: time for IO completed by fw when PDR enabled 521 * @io_missing_delay: time for IO completed by fw when PDR enabled
518 * @device_missing_delay: time for device missing by fw when PDR enabled 522 * @device_missing_delay: time for device missing by fw when PDR enabled
523 * @sas_id : used for setting volume target IDs
524 * @pd_handles : bitmask for PD handles
525 * @pd_handles_sz : size of pd_handle bitmask
519 * @config_page_sz: config page size 526 * @config_page_sz: config page size
520 * @config_page: reserve memory for config page payload 527 * @config_page: reserve memory for config page payload
521 * @config_page_dma: 528 * @config_page_dma:
@@ -568,6 +575,8 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
568 * @reply_post_free_dma: 575 * @reply_post_free_dma:
569 * @reply_post_free_dma_pool: 576 * @reply_post_free_dma_pool:
570 * @reply_post_host_index: head index in the pool where FW completes IO 577 * @reply_post_host_index: head index in the pool where FW completes IO
578 * @delayed_tr_list: target reset link list
579 * @delayed_tr_volume_list: volume target reset link list
571 */ 580 */
572struct MPT2SAS_ADAPTER { 581struct MPT2SAS_ADAPTER {
573 struct list_head list; 582 struct list_head list;
@@ -600,17 +609,23 @@ struct MPT2SAS_ADAPTER {
600 int aen_event_read_flag; 609 int aen_event_read_flag;
601 u8 broadcast_aen_busy; 610 u8 broadcast_aen_busy;
602 u8 shost_recovery; 611 u8 shost_recovery;
612
613 struct mutex reset_in_progress_mutex;
603 struct completion shost_recovery_done; 614 struct completion shost_recovery_done;
604 spinlock_t ioc_reset_in_progress_lock; 615 spinlock_t ioc_reset_in_progress_lock;
605 u8 ioc_link_reset_in_progress; 616 u8 ioc_link_reset_in_progress;
617 int ioc_reset_in_progress_status;
618
606 u8 ignore_loginfos; 619 u8 ignore_loginfos;
607 u8 remove_host; 620 u8 remove_host;
621 u8 pci_error_recovery;
608 u8 wait_for_port_enable_to_complete; 622 u8 wait_for_port_enable_to_complete;
609 623
610 u8 msix_enable; 624 u8 msix_enable;
611 u16 msix_vector_count; 625 u16 msix_vector_count;
612 u32 *msix_table; 626 u32 *msix_table;
613 u32 *msix_table_backup; 627 u32 *msix_table_backup;
628 u32 ioc_reset_count;
614 629
615 /* internal commands, callback index */ 630 /* internal commands, callback index */
616 u8 scsi_io_cb_idx; 631 u8 scsi_io_cb_idx;
@@ -621,6 +636,7 @@ struct MPT2SAS_ADAPTER {
621 u8 base_cb_idx; 636 u8 base_cb_idx;
622 u8 config_cb_idx; 637 u8 config_cb_idx;
623 u8 tm_tr_cb_idx; 638 u8 tm_tr_cb_idx;
639 u8 tm_tr_volume_cb_idx;
624 u8 tm_sas_control_cb_idx; 640 u8 tm_sas_control_cb_idx;
625 struct _internal_cmd base_cmds; 641 struct _internal_cmd base_cmds;
626 struct _internal_cmd transport_cmds; 642 struct _internal_cmd transport_cmds;
@@ -664,6 +680,9 @@ struct MPT2SAS_ADAPTER {
664 u16 device_missing_delay; 680 u16 device_missing_delay;
665 int sas_id; 681 int sas_id;
666 682
683 void *pd_handles;
684 u16 pd_handles_sz;
685
667 /* config page */ 686 /* config page */
668 u16 config_page_sz; 687 u16 config_page_sz;
669 void *config_page; 688 void *config_page;
@@ -735,6 +754,7 @@ struct MPT2SAS_ADAPTER {
735 u32 reply_post_host_index; 754 u32 reply_post_host_index;
736 755
737 struct list_head delayed_tr_list; 756 struct list_head delayed_tr_list;
757 struct list_head delayed_tr_volume_list;
738 758
739 /* diag buffer support */ 759 /* diag buffer support */
740 u8 *diag_buffer[MPI2_DIAG_BUF_TYPE_COUNT]; 760 u8 *diag_buffer[MPI2_DIAG_BUF_TYPE_COUNT];
@@ -745,6 +765,8 @@ struct MPT2SAS_ADAPTER {
745 Mpi2ManufacturingPage10_t manu_pg10; 765 Mpi2ManufacturingPage10_t manu_pg10;
746 u32 product_specific[MPI2_DIAG_BUF_TYPE_COUNT][23]; 766 u32 product_specific[MPI2_DIAG_BUF_TYPE_COUNT][23];
747 u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT]; 767 u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT];
768 u32 ring_buffer_offset;
769 u32 ring_buffer_sz;
748}; 770};
749 771
750typedef u8 (*MPT_CALLBACK)(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 772typedef u8 (*MPT_CALLBACK)(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index c65442982d7b..6afd67b324fe 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -159,7 +159,7 @@ _config_display_some_debug(struct MPT2SAS_ADAPTER *ioc, u16 smid,
159 if (!desc) 159 if (!desc)
160 return; 160 return;
161 161
162 printk(MPT2SAS_DEBUG_FMT "%s: %s(%d), action(%d), form(0x%08x), " 162 printk(MPT2SAS_INFO_FMT "%s: %s(%d), action(%d), form(0x%08x), "
163 "smid(%d)\n", ioc->name, calling_function_name, desc, 163 "smid(%d)\n", ioc->name, calling_function_name, desc,
164 mpi_request->Header.PageNumber, mpi_request->Action, 164 mpi_request->Header.PageNumber, mpi_request->Action,
165 le32_to_cpu(mpi_request->PageAddress), smid); 165 le32_to_cpu(mpi_request->PageAddress), smid);
@@ -168,7 +168,7 @@ _config_display_some_debug(struct MPT2SAS_ADAPTER *ioc, u16 smid,
168 return; 168 return;
169 169
170 if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) 170 if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
171 printk(MPT2SAS_DEBUG_FMT 171 printk(MPT2SAS_INFO_FMT
172 "\tiocstatus(0x%04x), loginfo(0x%08x)\n", 172 "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
173 ioc->name, le16_to_cpu(mpi_reply->IOCStatus), 173 ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
174 le32_to_cpu(mpi_reply->IOCLogInfo)); 174 le32_to_cpu(mpi_reply->IOCLogInfo));
@@ -401,7 +401,7 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
401 if (ioc->config_cmds.smid == smid) 401 if (ioc->config_cmds.smid == smid)
402 mpt2sas_base_free_smid(ioc, smid); 402 mpt2sas_base_free_smid(ioc, smid);
403 if ((ioc->shost_recovery) || (ioc->config_cmds.status & 403 if ((ioc->shost_recovery) || (ioc->config_cmds.status &
404 MPT2_CMD_RESET)) 404 MPT2_CMD_RESET) || ioc->pci_error_recovery)
405 goto retry_config; 405 goto retry_config;
406 issue_host_reset = 1; 406 issue_host_reset = 1;
407 r = -EFAULT; 407 r = -EFAULT;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index d88e9756d8f5..b774973f0765 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -80,6 +80,32 @@ enum block_state {
80 BLOCKING, 80 BLOCKING,
81}; 81};
82 82
83/**
84 * _ctl_sas_device_find_by_handle - sas device search
85 * @ioc: per adapter object
86 * @handle: sas device handle (assigned by firmware)
87 * Context: Calling function should acquire ioc->sas_device_lock
88 *
89 * This searches for sas_device based on sas_address, then return sas_device
90 * object.
91 */
92static struct _sas_device *
93_ctl_sas_device_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
94{
95 struct _sas_device *sas_device, *r;
96
97 r = NULL;
98 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
99 if (sas_device->handle != handle)
100 continue;
101 r = sas_device;
102 goto out;
103 }
104
105 out:
106 return r;
107}
108
83#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 109#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
84/** 110/**
85 * _ctl_display_some_debug - debug routine 111 * _ctl_display_some_debug - debug routine
@@ -188,14 +214,14 @@ _ctl_display_some_debug(struct MPT2SAS_ADAPTER *ioc, u16 smid,
188 if (!desc) 214 if (!desc)
189 return; 215 return;
190 216
191 printk(MPT2SAS_DEBUG_FMT "%s: %s, smid(%d)\n", 217 printk(MPT2SAS_INFO_FMT "%s: %s, smid(%d)\n",
192 ioc->name, calling_function_name, desc, smid); 218 ioc->name, calling_function_name, desc, smid);
193 219
194 if (!mpi_reply) 220 if (!mpi_reply)
195 return; 221 return;
196 222
197 if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) 223 if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
198 printk(MPT2SAS_DEBUG_FMT 224 printk(MPT2SAS_INFO_FMT
199 "\tiocstatus(0x%04x), loginfo(0x%08x)\n", 225 "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
200 ioc->name, le16_to_cpu(mpi_reply->IOCStatus), 226 ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
201 le32_to_cpu(mpi_reply->IOCLogInfo)); 227 le32_to_cpu(mpi_reply->IOCLogInfo));
@@ -205,8 +231,24 @@ _ctl_display_some_debug(struct MPT2SAS_ADAPTER *ioc, u16 smid,
205 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 231 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
206 Mpi2SCSIIOReply_t *scsi_reply = 232 Mpi2SCSIIOReply_t *scsi_reply =
207 (Mpi2SCSIIOReply_t *)mpi_reply; 233 (Mpi2SCSIIOReply_t *)mpi_reply;
234 struct _sas_device *sas_device = NULL;
235 unsigned long flags;
236
237 spin_lock_irqsave(&ioc->sas_device_lock, flags);
238 sas_device = _ctl_sas_device_find_by_handle(ioc,
239 le16_to_cpu(scsi_reply->DevHandle));
240 if (sas_device) {
241 printk(MPT2SAS_WARN_FMT "\tsas_address(0x%016llx), "
242 "phy(%d)\n", ioc->name, (unsigned long long)
243 sas_device->sas_address, sas_device->phy);
244 printk(MPT2SAS_WARN_FMT
245 "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
246 ioc->name, sas_device->enclosure_logical_id,
247 sas_device->slot);
248 }
249 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
208 if (scsi_reply->SCSIState || scsi_reply->SCSIStatus) 250 if (scsi_reply->SCSIState || scsi_reply->SCSIStatus)
209 printk(MPT2SAS_DEBUG_FMT 251 printk(MPT2SAS_INFO_FMT
210 "\tscsi_state(0x%02x), scsi_status" 252 "\tscsi_state(0x%02x), scsi_status"
211 "(0x%02x)\n", ioc->name, 253 "(0x%02x)\n", ioc->name,
212 scsi_reply->SCSIState, 254 scsi_reply->SCSIState,
@@ -233,6 +275,9 @@ mpt2sas_ctl_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
233 u32 reply) 275 u32 reply)
234{ 276{
235 MPI2DefaultReply_t *mpi_reply; 277 MPI2DefaultReply_t *mpi_reply;
278 Mpi2SCSIIOReply_t *scsiio_reply;
279 const void *sense_data;
280 u32 sz;
236 281
237 if (ioc->ctl_cmds.status == MPT2_CMD_NOT_USED) 282 if (ioc->ctl_cmds.status == MPT2_CMD_NOT_USED)
238 return 1; 283 return 1;
@@ -243,6 +288,20 @@ mpt2sas_ctl_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
243 if (mpi_reply) { 288 if (mpi_reply) {
244 memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); 289 memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
245 ioc->ctl_cmds.status |= MPT2_CMD_REPLY_VALID; 290 ioc->ctl_cmds.status |= MPT2_CMD_REPLY_VALID;
291 /* get sense data */
292 if (mpi_reply->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
293 mpi_reply->Function ==
294 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
295 scsiio_reply = (Mpi2SCSIIOReply_t *)mpi_reply;
296 if (scsiio_reply->SCSIState &
297 MPI2_SCSI_STATE_AUTOSENSE_VALID) {
298 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
299 le32_to_cpu(scsiio_reply->SenseCount));
300 sense_data = mpt2sas_base_get_sense_buffer(ioc,
301 smid);
302 memcpy(ioc->ctl_cmds.sense, sense_data, sz);
303 }
304 }
246 } 305 }
247#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 306#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
248 _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply); 307 _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply);
@@ -392,7 +451,7 @@ mpt2sas_ctl_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
392 451
393 switch (reset_phase) { 452 switch (reset_phase) {
394 case MPT2_IOC_PRE_RESET: 453 case MPT2_IOC_PRE_RESET:
395 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " 454 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
396 "MPT2_IOC_PRE_RESET\n", ioc->name, __func__)); 455 "MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
397 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { 456 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
398 if (!(ioc->diag_buffer_status[i] & 457 if (!(ioc->diag_buffer_status[i] &
@@ -405,7 +464,7 @@ mpt2sas_ctl_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
405 } 464 }
406 break; 465 break;
407 case MPT2_IOC_AFTER_RESET: 466 case MPT2_IOC_AFTER_RESET:
408 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " 467 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
409 "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__)); 468 "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
410 if (ioc->ctl_cmds.status & MPT2_CMD_PENDING) { 469 if (ioc->ctl_cmds.status & MPT2_CMD_PENDING) {
411 ioc->ctl_cmds.status |= MPT2_CMD_RESET; 470 ioc->ctl_cmds.status |= MPT2_CMD_RESET;
@@ -414,7 +473,7 @@ mpt2sas_ctl_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
414 } 473 }
415 break; 474 break;
416 case MPT2_IOC_DONE_RESET: 475 case MPT2_IOC_DONE_RESET:
417 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " 476 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
418 "MPT2_IOC_DONE_RESET\n", ioc->name, __func__)); 477 "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
419 478
420 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { 479 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
@@ -531,7 +590,7 @@ _ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
531 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 590 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
532 591
533 if (!found) { 592 if (!found) {
534 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " 593 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
535 "handle(0x%04x), lun(%d), no active mid!!\n", ioc->name, 594 "handle(0x%04x), lun(%d), no active mid!!\n", ioc->name,
536 desc, le16_to_cpu(tm_request->DevHandle), lun)); 595 desc, le16_to_cpu(tm_request->DevHandle), lun));
537 tm_reply = ioc->ctl_cmds.reply; 596 tm_reply = ioc->ctl_cmds.reply;
@@ -549,7 +608,7 @@ _ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
549 return 1; 608 return 1;
550 } 609 }
551 610
552 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " 611 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
553 "handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name, 612 "handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
554 desc, le16_to_cpu(tm_request->DevHandle), lun, 613 desc, le16_to_cpu(tm_request->DevHandle), lun,
555 le16_to_cpu(tm_request->TaskMID))); 614 le16_to_cpu(tm_request->TaskMID)));
@@ -567,7 +626,7 @@ static long
567_ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, 626_ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
568 struct mpt2_ioctl_command karg, void __user *mf, enum block_state state) 627 struct mpt2_ioctl_command karg, void __user *mf, enum block_state state)
569{ 628{
570 MPI2RequestHeader_t *mpi_request; 629 MPI2RequestHeader_t *mpi_request = NULL, *request;
571 MPI2DefaultReply_t *mpi_reply; 630 MPI2DefaultReply_t *mpi_reply;
572 u32 ioc_state; 631 u32 ioc_state;
573 u16 ioc_status; 632 u16 ioc_status;
@@ -576,7 +635,6 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
576 u8 issue_reset; 635 u8 issue_reset;
577 u32 sz; 636 u32 sz;
578 void *psge; 637 void *psge;
579 void *priv_sense = NULL;
580 void *data_out = NULL; 638 void *data_out = NULL;
581 dma_addr_t data_out_dma; 639 dma_addr_t data_out_dma;
582 size_t data_out_sz = 0; 640 size_t data_out_sz = 0;
@@ -621,31 +679,50 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
621 printk(MPT2SAS_INFO_FMT "%s: ioc is operational\n", 679 printk(MPT2SAS_INFO_FMT "%s: ioc is operational\n",
622 ioc->name, __func__); 680 ioc->name, __func__);
623 681
624 smid = mpt2sas_base_get_smid_scsiio(ioc, ioc->ctl_cb_idx, NULL); 682 mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL);
625 if (!smid) { 683 if (!mpi_request) {
626 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", 684 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a memory for "
627 ioc->name, __func__); 685 "mpi_request\n", ioc->name, __func__);
628 ret = -EAGAIN; 686 ret = -ENOMEM;
629 goto out; 687 goto out;
630 } 688 }
631 689
632 ret = 0;
633 ioc->ctl_cmds.status = MPT2_CMD_PENDING;
634 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
635 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
636 ioc->ctl_cmds.smid = smid;
637 data_out_sz = karg.data_out_size;
638 data_in_sz = karg.data_in_size;
639
640 /* copy in request message frame from user */ 690 /* copy in request message frame from user */
641 if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) { 691 if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) {
642 printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, 692 printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__,
643 __func__); 693 __func__);
644 ret = -EFAULT; 694 ret = -EFAULT;
645 mpt2sas_base_free_smid(ioc, smid);
646 goto out; 695 goto out;
647 } 696 }
648 697
698 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
699 smid = mpt2sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx);
700 if (!smid) {
701 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
702 ioc->name, __func__);
703 ret = -EAGAIN;
704 goto out;
705 }
706 } else {
707
708 smid = mpt2sas_base_get_smid_scsiio(ioc, ioc->ctl_cb_idx, NULL);
709 if (!smid) {
710 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
711 ioc->name, __func__);
712 ret = -EAGAIN;
713 goto out;
714 }
715 }
716
717 ret = 0;
718 ioc->ctl_cmds.status = MPT2_CMD_PENDING;
719 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
720 request = mpt2sas_base_get_msg_frame(ioc, smid);
721 memcpy(request, mpi_request, karg.data_sge_offset*4);
722 ioc->ctl_cmds.smid = smid;
723 data_out_sz = karg.data_out_size;
724 data_in_sz = karg.data_in_size;
725
649 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 726 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
650 mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 727 mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
651 if (!le16_to_cpu(mpi_request->FunctionDependent1) || 728 if (!le16_to_cpu(mpi_request->FunctionDependent1) ||
@@ -691,7 +768,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
691 } 768 }
692 769
693 /* add scatter gather elements */ 770 /* add scatter gather elements */
694 psge = (void *)mpi_request + (karg.data_sge_offset*4); 771 psge = (void *)request + (karg.data_sge_offset*4);
695 772
696 if (!data_out_sz && !data_in_sz) { 773 if (!data_out_sz && !data_in_sz) {
697 mpt2sas_base_build_zero_len_sge(ioc, psge); 774 mpt2sas_base_build_zero_len_sge(ioc, psge);
@@ -739,11 +816,11 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
739 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 816 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
740 { 817 {
741 Mpi2SCSIIORequest_t *scsiio_request = 818 Mpi2SCSIIORequest_t *scsiio_request =
742 (Mpi2SCSIIORequest_t *)mpi_request; 819 (Mpi2SCSIIORequest_t *)request;
820 scsiio_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
743 scsiio_request->SenseBufferLowAddress = 821 scsiio_request->SenseBufferLowAddress =
744 mpt2sas_base_get_sense_buffer_dma(ioc, smid); 822 mpt2sas_base_get_sense_buffer_dma(ioc, smid);
745 priv_sense = mpt2sas_base_get_sense_buffer(ioc, smid); 823 memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
746 memset(priv_sense, 0, SCSI_SENSE_BUFFERSIZE);
747 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) 824 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)
748 mpt2sas_base_put_smid_scsi_io(ioc, smid, 825 mpt2sas_base_put_smid_scsi_io(ioc, smid,
749 le16_to_cpu(mpi_request->FunctionDependent1)); 826 le16_to_cpu(mpi_request->FunctionDependent1));
@@ -754,9 +831,9 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
754 case MPI2_FUNCTION_SCSI_TASK_MGMT: 831 case MPI2_FUNCTION_SCSI_TASK_MGMT:
755 { 832 {
756 Mpi2SCSITaskManagementRequest_t *tm_request = 833 Mpi2SCSITaskManagementRequest_t *tm_request =
757 (Mpi2SCSITaskManagementRequest_t *)mpi_request; 834 (Mpi2SCSITaskManagementRequest_t *)request;
758 835
759 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "TASK_MGMT: " 836 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "TASK_MGMT: "
760 "handle(0x%04x), task_type(0x%02x)\n", ioc->name, 837 "handle(0x%04x), task_type(0x%02x)\n", ioc->name,
761 le16_to_cpu(tm_request->DevHandle), tm_request->TaskType)); 838 le16_to_cpu(tm_request->DevHandle), tm_request->TaskType));
762 839
@@ -851,7 +928,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
851 Mpi2SCSITaskManagementReply_t *tm_reply = 928 Mpi2SCSITaskManagementReply_t *tm_reply =
852 (Mpi2SCSITaskManagementReply_t *)mpi_reply; 929 (Mpi2SCSITaskManagementReply_t *)mpi_reply;
853 930
854 printk(MPT2SAS_DEBUG_FMT "TASK_MGMT: " 931 printk(MPT2SAS_INFO_FMT "TASK_MGMT: "
855 "IOCStatus(0x%04x), IOCLogInfo(0x%08x), " 932 "IOCStatus(0x%04x), IOCLogInfo(0x%08x), "
856 "TerminationCount(0x%08x)\n", ioc->name, 933 "TerminationCount(0x%08x)\n", ioc->name,
857 le16_to_cpu(tm_reply->IOCStatus), 934 le16_to_cpu(tm_reply->IOCStatus),
@@ -887,7 +964,8 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
887 MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function == 964 MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function ==
888 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { 965 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
889 sz = min_t(u32, karg.max_sense_bytes, SCSI_SENSE_BUFFERSIZE); 966 sz = min_t(u32, karg.max_sense_bytes, SCSI_SENSE_BUFFERSIZE);
890 if (copy_to_user(karg.sense_data_ptr, priv_sense, sz)) { 967 if (copy_to_user(karg.sense_data_ptr,
968 ioc->ctl_cmds.sense, sz)) {
891 printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, 969 printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__,
892 __LINE__, __func__); 970 __LINE__, __func__);
893 ret = -ENODATA; 971 ret = -ENODATA;
@@ -926,6 +1004,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
926 pci_free_consistent(ioc->pdev, data_out_sz, data_out, 1004 pci_free_consistent(ioc->pdev, data_out_sz, data_out,
927 data_out_dma); 1005 data_out_dma);
928 1006
1007 kfree(mpi_request);
929 ioc->ctl_cmds.status = MPT2_CMD_NOT_USED; 1008 ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
930 mutex_unlock(&ioc->ctl_cmds.mutex); 1009 mutex_unlock(&ioc->ctl_cmds.mutex);
931 return ret; 1010 return ret;
@@ -950,7 +1029,7 @@ _ctl_getiocinfo(void __user *arg)
950 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) 1029 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
951 return -ENODEV; 1030 return -ENODEV;
952 1031
953 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name, 1032 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
954 __func__)); 1033 __func__));
955 1034
956 memset(&karg, 0 , sizeof(karg)); 1035 memset(&karg, 0 , sizeof(karg));
@@ -998,7 +1077,7 @@ _ctl_eventquery(void __user *arg)
998 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) 1077 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
999 return -ENODEV; 1078 return -ENODEV;
1000 1079
1001 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name, 1080 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
1002 __func__)); 1081 __func__));
1003 1082
1004 karg.event_entries = MPT2SAS_CTL_EVENT_LOG_SIZE; 1083 karg.event_entries = MPT2SAS_CTL_EVENT_LOG_SIZE;
@@ -1031,7 +1110,7 @@ _ctl_eventenable(void __user *arg)
1031 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) 1110 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1032 return -ENODEV; 1111 return -ENODEV;
1033 1112
1034 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name, 1113 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
1035 __func__)); 1114 __func__));
1036 1115
1037 if (ioc->event_log) 1116 if (ioc->event_log)
@@ -1073,7 +1152,7 @@ _ctl_eventreport(void __user *arg)
1073 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) 1152 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1074 return -ENODEV; 1153 return -ENODEV;
1075 1154
1076 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name, 1155 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
1077 __func__)); 1156 __func__));
1078 1157
1079 number_bytes = karg.hdr.max_data_size - 1158 number_bytes = karg.hdr.max_data_size -
@@ -1118,7 +1197,7 @@ _ctl_do_reset(void __user *arg)
1118 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) 1197 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1119 return -ENODEV; 1198 return -ENODEV;
1120 1199
1121 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name, 1200 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
1122 __func__)); 1201 __func__));
1123 1202
1124 retval = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, 1203 retval = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
@@ -1219,7 +1298,7 @@ _ctl_btdh_mapping(void __user *arg)
1219 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) 1298 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1220 return -ENODEV; 1299 return -ENODEV;
1221 1300
1222 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, 1301 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1223 __func__)); 1302 __func__));
1224 1303
1225 rc = _ctl_btdh_search_sas_device(ioc, &karg); 1304 rc = _ctl_btdh_search_sas_device(ioc, &karg);
@@ -1288,7 +1367,7 @@ _ctl_diag_register_2(struct MPT2SAS_ADAPTER *ioc,
1288 u16 ioc_status; 1367 u16 ioc_status;
1289 u8 issue_reset = 0; 1368 u8 issue_reset = 0;
1290 1369
1291 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, 1370 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1292 __func__)); 1371 __func__));
1293 1372
1294 if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) { 1373 if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) {
@@ -1376,7 +1455,7 @@ _ctl_diag_register_2(struct MPT2SAS_ADAPTER *ioc,
1376 mpi_request->VF_ID = 0; /* TODO */ 1455 mpi_request->VF_ID = 0; /* TODO */
1377 mpi_request->VP_ID = 0; 1456 mpi_request->VP_ID = 0;
1378 1457
1379 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: diag_buffer(0x%p), " 1458 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: diag_buffer(0x%p), "
1380 "dma(0x%llx), sz(%d)\n", ioc->name, __func__, request_data, 1459 "dma(0x%llx), sz(%d)\n", ioc->name, __func__, request_data,
1381 (unsigned long long)request_data_dma, 1460 (unsigned long long)request_data_dma,
1382 le32_to_cpu(mpi_request->BufferLength))); 1461 le32_to_cpu(mpi_request->BufferLength)));
@@ -1414,10 +1493,10 @@ _ctl_diag_register_2(struct MPT2SAS_ADAPTER *ioc,
1414 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 1493 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
1415 ioc->diag_buffer_status[buffer_type] |= 1494 ioc->diag_buffer_status[buffer_type] |=
1416 MPT2_DIAG_BUFFER_IS_REGISTERED; 1495 MPT2_DIAG_BUFFER_IS_REGISTERED;
1417 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: success\n", 1496 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: success\n",
1418 ioc->name, __func__)); 1497 ioc->name, __func__));
1419 } else { 1498 } else {
1420 printk(MPT2SAS_DEBUG_FMT "%s: ioc_status(0x%04x) " 1499 printk(MPT2SAS_INFO_FMT "%s: ioc_status(0x%04x) "
1421 "log_info(0x%08x)\n", ioc->name, __func__, 1500 "log_info(0x%08x)\n", ioc->name, __func__,
1422 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); 1501 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
1423 rc = -EFAULT; 1502 rc = -EFAULT;
@@ -1541,7 +1620,7 @@ _ctl_diag_unregister(void __user *arg)
1541 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) 1620 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1542 return -ENODEV; 1621 return -ENODEV;
1543 1622
1544 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, 1623 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1545 __func__)); 1624 __func__));
1546 1625
1547 buffer_type = karg.unique_id & 0x000000ff; 1626 buffer_type = karg.unique_id & 0x000000ff;
@@ -1611,7 +1690,7 @@ _ctl_diag_query(void __user *arg)
1611 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) 1690 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1612 return -ENODEV; 1691 return -ENODEV;
1613 1692
1614 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, 1693 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1615 __func__)); 1694 __func__));
1616 1695
1617 karg.application_flags = 0; 1696 karg.application_flags = 0;
@@ -1689,7 +1768,7 @@ _ctl_send_release(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type, u8 *issue_reset)
1689 int rc; 1768 int rc;
1690 unsigned long timeleft; 1769 unsigned long timeleft;
1691 1770
1692 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, 1771 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1693 __func__)); 1772 __func__));
1694 1773
1695 rc = 0; 1774 rc = 0;
@@ -1697,7 +1776,7 @@ _ctl_send_release(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type, u8 *issue_reset)
1697 1776
1698 ioc_state = mpt2sas_base_get_iocstate(ioc, 1); 1777 ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
1699 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 1778 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
1700 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " 1779 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
1701 "skipping due to FAULT state\n", ioc->name, 1780 "skipping due to FAULT state\n", ioc->name,
1702 __func__)); 1781 __func__));
1703 rc = -EAGAIN; 1782 rc = -EAGAIN;
@@ -1759,10 +1838,10 @@ _ctl_send_release(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type, u8 *issue_reset)
1759 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 1838 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
1760 ioc->diag_buffer_status[buffer_type] |= 1839 ioc->diag_buffer_status[buffer_type] |=
1761 MPT2_DIAG_BUFFER_IS_RELEASED; 1840 MPT2_DIAG_BUFFER_IS_RELEASED;
1762 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: success\n", 1841 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: success\n",
1763 ioc->name, __func__)); 1842 ioc->name, __func__));
1764 } else { 1843 } else {
1765 printk(MPT2SAS_DEBUG_FMT "%s: ioc_status(0x%04x) " 1844 printk(MPT2SAS_INFO_FMT "%s: ioc_status(0x%04x) "
1766 "log_info(0x%08x)\n", ioc->name, __func__, 1845 "log_info(0x%08x)\n", ioc->name, __func__,
1767 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); 1846 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
1768 rc = -EFAULT; 1847 rc = -EFAULT;
@@ -1800,7 +1879,7 @@ _ctl_diag_release(void __user *arg, enum block_state state)
1800 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) 1879 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1801 return -ENODEV; 1880 return -ENODEV;
1802 1881
1803 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, 1882 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1804 __func__)); 1883 __func__));
1805 1884
1806 buffer_type = karg.unique_id & 0x000000ff; 1885 buffer_type = karg.unique_id & 0x000000ff;
@@ -1896,7 +1975,7 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state)
1896 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) 1975 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1897 return -ENODEV; 1976 return -ENODEV;
1898 1977
1899 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, 1978 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1900 __func__)); 1979 __func__));
1901 1980
1902 buffer_type = karg.unique_id & 0x000000ff; 1981 buffer_type = karg.unique_id & 0x000000ff;
@@ -1927,7 +2006,7 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state)
1927 } 2006 }
1928 2007
1929 diag_data = (void *)(request_data + karg.starting_offset); 2008 diag_data = (void *)(request_data + karg.starting_offset);
1930 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: diag_buffer(%p), " 2009 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: diag_buffer(%p), "
1931 "offset(%d), sz(%d)\n", ioc->name, __func__, 2010 "offset(%d), sz(%d)\n", ioc->name, __func__,
1932 diag_data, karg.starting_offset, karg.bytes_to_read)); 2011 diag_data, karg.starting_offset, karg.bytes_to_read));
1933 2012
@@ -1942,11 +2021,11 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state)
1942 if ((karg.flags & MPT2_FLAGS_REREGISTER) == 0) 2021 if ((karg.flags & MPT2_FLAGS_REREGISTER) == 0)
1943 return 0; 2022 return 0;
1944 2023
1945 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: Reregister " 2024 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: Reregister "
1946 "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type)); 2025 "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type));
1947 if ((ioc->diag_buffer_status[buffer_type] & 2026 if ((ioc->diag_buffer_status[buffer_type] &
1948 MPT2_DIAG_BUFFER_IS_RELEASED) == 0) { 2027 MPT2_DIAG_BUFFER_IS_RELEASED) == 0) {
1949 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " 2028 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
1950 "buffer_type(0x%02x) is still registered\n", ioc->name, 2029 "buffer_type(0x%02x) is still registered\n", ioc->name,
1951 __func__, buffer_type)); 2030 __func__, buffer_type));
1952 return 0; 2031 return 0;
@@ -2020,10 +2099,10 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state)
2020 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 2099 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
2021 ioc->diag_buffer_status[buffer_type] |= 2100 ioc->diag_buffer_status[buffer_type] |=
2022 MPT2_DIAG_BUFFER_IS_REGISTERED; 2101 MPT2_DIAG_BUFFER_IS_REGISTERED;
2023 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: success\n", 2102 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: success\n",
2024 ioc->name, __func__)); 2103 ioc->name, __func__));
2025 } else { 2104 } else {
2026 printk(MPT2SAS_DEBUG_FMT "%s: ioc_status(0x%04x) " 2105 printk(MPT2SAS_INFO_FMT "%s: ioc_status(0x%04x) "
2027 "log_info(0x%08x)\n", ioc->name, __func__, 2106 "log_info(0x%08x)\n", ioc->name, __func__,
2028 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); 2107 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
2029 rc = -EFAULT; 2108 rc = -EFAULT;
@@ -2077,7 +2156,7 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg)
2077 !ioc) 2156 !ioc)
2078 return -ENODEV; 2157 return -ENODEV;
2079 2158
2080 if (ioc->shost_recovery) 2159 if (ioc->shost_recovery || ioc->pci_error_recovery)
2081 return -EAGAIN; 2160 return -EAGAIN;
2082 2161
2083 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_command)) { 2162 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_command)) {
@@ -2140,7 +2219,7 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg)
2140 !ioc) 2219 !ioc)
2141 return -ENODEV; 2220 return -ENODEV;
2142 2221
2143 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT 2222 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT
2144 "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd)); 2223 "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd));
2145 break; 2224 break;
2146 } 2225 }
@@ -2196,7 +2275,7 @@ _ctl_compat_mpt_command(struct file *file, unsigned cmd, unsigned long arg)
2196 if (_ctl_verify_adapter(karg32.hdr.ioc_number, &ioc) == -1 || !ioc) 2275 if (_ctl_verify_adapter(karg32.hdr.ioc_number, &ioc) == -1 || !ioc)
2197 return -ENODEV; 2276 return -ENODEV;
2198 2277
2199 if (ioc->shost_recovery) 2278 if (ioc->shost_recovery || ioc->pci_error_recovery)
2200 return -EAGAIN; 2279 return -EAGAIN;
2201 2280
2202 memset(&karg, 0, sizeof(struct mpt2_ioctl_command)); 2281 memset(&karg, 0, sizeof(struct mpt2_ioctl_command));
@@ -2581,6 +2660,218 @@ _ctl_fwfault_debug_store(struct device *cdev,
2581static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR, 2660static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR,
2582 _ctl_fwfault_debug_show, _ctl_fwfault_debug_store); 2661 _ctl_fwfault_debug_show, _ctl_fwfault_debug_store);
2583 2662
2663
2664/**
2665 * _ctl_ioc_reset_count_show - ioc reset count
2666 * @cdev - pointer to embedded class device
2667 * @buf - the buffer returned
2668 *
2669 * This is firmware queue depth limit
2670 *
2671 * A sysfs 'read-only' shost attribute.
2672 */
2673static ssize_t
2674_ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr,
2675 char *buf)
2676{
2677 struct Scsi_Host *shost = class_to_shost(cdev);
2678 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2679
2680 return snprintf(buf, PAGE_SIZE, "%08d\n", ioc->ioc_reset_count);
2681}
2682static DEVICE_ATTR(ioc_reset_count, S_IRUGO,
2683 _ctl_ioc_reset_count_show, NULL);
2684
2685struct DIAG_BUFFER_START {
2686 u32 Size;
2687 u32 DiagVersion;
2688 u8 BufferType;
2689 u8 Reserved[3];
2690 u32 Reserved1;
2691 u32 Reserved2;
2692 u32 Reserved3;
2693};
2694/**
2695 * _ctl_host_trace_buffer_size_show - host buffer size (trace only)
2696 * @cdev - pointer to embedded class device
2697 * @buf - the buffer returned
2698 *
2699 * A sysfs 'read-only' shost attribute.
2700 */
2701static ssize_t
2702_ctl_host_trace_buffer_size_show(struct device *cdev,
2703 struct device_attribute *attr, char *buf)
2704{
2705 struct Scsi_Host *shost = class_to_shost(cdev);
2706 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2707 u32 size = 0;
2708 struct DIAG_BUFFER_START *request_data;
2709
2710 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
2711 printk(MPT2SAS_ERR_FMT "%s: host_trace_buffer is not "
2712 "registered\n", ioc->name, __func__);
2713 return 0;
2714 }
2715
2716 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
2717 MPT2_DIAG_BUFFER_IS_REGISTERED) == 0) {
2718 printk(MPT2SAS_ERR_FMT "%s: host_trace_buffer is not "
2719 "registered\n", ioc->name, __func__);
2720 return 0;
2721 }
2722
2723 request_data = (struct DIAG_BUFFER_START *)
2724 ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE];
2725 if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 ||
2726 le32_to_cpu(request_data->DiagVersion) == 0x01000000) &&
2727 le32_to_cpu(request_data->Reserved3) == 0x4742444c)
2728 size = le32_to_cpu(request_data->Size);
2729
2730 ioc->ring_buffer_sz = size;
2731 return snprintf(buf, PAGE_SIZE, "%d\n", size);
2732}
2733static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO,
2734 _ctl_host_trace_buffer_size_show, NULL);
2735
2736/**
2737 * _ctl_host_trace_buffer_show - firmware ring buffer (trace only)
2738 * @cdev - pointer to embedded class device
2739 * @buf - the buffer returned
2740 *
2741 * A sysfs 'read/write' shost attribute.
2742 *
2743 * You will only be able to read 4k bytes of ring buffer at a time.
2744 * In order to read beyond 4k bytes, you will have to write out the
2745 * offset to the same attribute, it will move the pointer.
2746 */
2747static ssize_t
2748_ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr,
2749 char *buf)
2750{
2751 struct Scsi_Host *shost = class_to_shost(cdev);
2752 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2753 void *request_data;
2754 u32 size;
2755
2756 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
2757 printk(MPT2SAS_ERR_FMT "%s: host_trace_buffer is not "
2758 "registered\n", ioc->name, __func__);
2759 return 0;
2760 }
2761
2762 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
2763 MPT2_DIAG_BUFFER_IS_REGISTERED) == 0) {
2764 printk(MPT2SAS_ERR_FMT "%s: host_trace_buffer is not "
2765 "registered\n", ioc->name, __func__);
2766 return 0;
2767 }
2768
2769 if (ioc->ring_buffer_offset > ioc->ring_buffer_sz)
2770 return 0;
2771
2772 size = ioc->ring_buffer_sz - ioc->ring_buffer_offset;
2773 size = (size > PAGE_SIZE) ? PAGE_SIZE : size;
2774 request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset;
2775 memcpy(buf, request_data, size);
2776 return size;
2777}
2778
2779static ssize_t
2780_ctl_host_trace_buffer_store(struct device *cdev, struct device_attribute *attr,
2781 const char *buf, size_t count)
2782{
2783 struct Scsi_Host *shost = class_to_shost(cdev);
2784 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2785 int val = 0;
2786
2787 if (sscanf(buf, "%d", &val) != 1)
2788 return -EINVAL;
2789
2790 ioc->ring_buffer_offset = val;
2791 return strlen(buf);
2792}
2793static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR,
2794 _ctl_host_trace_buffer_show, _ctl_host_trace_buffer_store);
2795
2796/*****************************************/
2797
2798/**
2799 * _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only)
2800 * @cdev - pointer to embedded class device
2801 * @buf - the buffer returned
2802 *
2803 * A sysfs 'read/write' shost attribute.
2804 *
2805 * This is a mechnism to post/release host_trace_buffers
2806 */
2807static ssize_t
2808_ctl_host_trace_buffer_enable_show(struct device *cdev,
2809 struct device_attribute *attr, char *buf)
2810{
2811 struct Scsi_Host *shost = class_to_shost(cdev);
2812 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2813
2814 if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) ||
2815 ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
2816 MPT2_DIAG_BUFFER_IS_REGISTERED) == 0))
2817 return snprintf(buf, PAGE_SIZE, "off\n");
2818 else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
2819 MPT2_DIAG_BUFFER_IS_RELEASED))
2820 return snprintf(buf, PAGE_SIZE, "release\n");
2821 else
2822 return snprintf(buf, PAGE_SIZE, "post\n");
2823}
2824
2825static ssize_t
2826_ctl_host_trace_buffer_enable_store(struct device *cdev,
2827 struct device_attribute *attr, const char *buf, size_t count)
2828{
2829 struct Scsi_Host *shost = class_to_shost(cdev);
2830 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2831 char str[10] = "";
2832 struct mpt2_diag_register diag_register;
2833 u8 issue_reset = 0;
2834
2835 if (sscanf(buf, "%s", str) != 1)
2836 return -EINVAL;
2837
2838 if (!strcmp(str, "post")) {
2839 /* exit out if host buffers are already posted */
2840 if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) &&
2841 (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
2842 MPT2_DIAG_BUFFER_IS_REGISTERED) &&
2843 ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
2844 MPT2_DIAG_BUFFER_IS_RELEASED) == 0))
2845 goto out;
2846 memset(&diag_register, 0, sizeof(struct mpt2_diag_register));
2847 printk(MPT2SAS_INFO_FMT "posting host trace buffers\n",
2848 ioc->name);
2849 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
2850 diag_register.requested_buffer_size = (1024 * 1024);
2851 diag_register.unique_id = 0x7075900;
2852 ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0;
2853 _ctl_diag_register_2(ioc, &diag_register);
2854 } else if (!strcmp(str, "release")) {
2855 /* exit out if host buffers are already released */
2856 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE])
2857 goto out;
2858 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
2859 MPT2_DIAG_BUFFER_IS_REGISTERED) == 0)
2860 goto out;
2861 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
2862 MPT2_DIAG_BUFFER_IS_RELEASED))
2863 goto out;
2864 printk(MPT2SAS_INFO_FMT "releasing host trace buffer\n",
2865 ioc->name);
2866 _ctl_send_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, &issue_reset);
2867 }
2868
2869 out:
2870 return strlen(buf);
2871}
2872static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR,
2873 _ctl_host_trace_buffer_enable_show, _ctl_host_trace_buffer_enable_store);
2874
2584struct device_attribute *mpt2sas_host_attrs[] = { 2875struct device_attribute *mpt2sas_host_attrs[] = {
2585 &dev_attr_version_fw, 2876 &dev_attr_version_fw,
2586 &dev_attr_version_bios, 2877 &dev_attr_version_bios,
@@ -2597,6 +2888,10 @@ struct device_attribute *mpt2sas_host_attrs[] = {
2597 &dev_attr_fwfault_debug, 2888 &dev_attr_fwfault_debug,
2598 &dev_attr_fw_queue_depth, 2889 &dev_attr_fw_queue_depth,
2599 &dev_attr_host_sas_address, 2890 &dev_attr_host_sas_address,
2891 &dev_attr_ioc_reset_count,
2892 &dev_attr_host_trace_buffer_size,
2893 &dev_attr_host_trace_buffer,
2894 &dev_attr_host_trace_buffer_enable,
2600 NULL, 2895 NULL,
2601}; 2896};
2602 2897
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index c5ff26a2a51d..6273abd0535e 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -70,6 +70,8 @@ static void _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
70 struct _sas_node *sas_expander); 70 struct _sas_node *sas_expander);
71static void _firmware_event_work(struct work_struct *work); 71static void _firmware_event_work(struct work_struct *work);
72 72
73static u8 _scsih_check_for_pending_tm(struct MPT2SAS_ADAPTER *ioc, u16 smid);
74
73/* global parameters */ 75/* global parameters */
74LIST_HEAD(mpt2sas_ioc_list); 76LIST_HEAD(mpt2sas_ioc_list);
75 77
@@ -84,6 +86,7 @@ static u8 config_cb_idx = -1;
84static int mpt_ids; 86static int mpt_ids;
85 87
86static u8 tm_tr_cb_idx = -1 ; 88static u8 tm_tr_cb_idx = -1 ;
89static u8 tm_tr_volume_cb_idx = -1 ;
87static u8 tm_sas_control_cb_idx = -1; 90static u8 tm_sas_control_cb_idx = -1;
88 91
89/* command line options */ 92/* command line options */
@@ -223,9 +226,12 @@ static struct pci_device_id scsih_pci_table[] = {
223 PCI_ANY_ID, PCI_ANY_ID }, 226 PCI_ANY_ID, PCI_ANY_ID },
224 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6, 227 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
225 PCI_ANY_ID, PCI_ANY_ID }, 228 PCI_ANY_ID, PCI_ANY_ID },
226 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_7, 229 /* Mustang ~ 2308 */
230 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
231 PCI_ANY_ID, PCI_ANY_ID },
232 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
227 PCI_ANY_ID, PCI_ANY_ID }, 233 PCI_ANY_ID, PCI_ANY_ID },
228 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_8, 234 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
229 PCI_ANY_ID, PCI_ANY_ID }, 235 PCI_ANY_ID, PCI_ANY_ID },
230 {0} /* Terminating entry */ 236 {0} /* Terminating entry */
231}; 237};
@@ -432,7 +438,7 @@ _scsih_determine_boot_device(struct MPT2SAS_ADAPTER *ioc,
432 (ioc->bios_pg2.ReqBootDeviceForm & 438 (ioc->bios_pg2.ReqBootDeviceForm &
433 MPI2_BIOSPAGE2_FORM_MASK), 439 MPI2_BIOSPAGE2_FORM_MASK),
434 &ioc->bios_pg2.RequestedBootDevice)) { 440 &ioc->bios_pg2.RequestedBootDevice)) {
435 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT 441 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT
436 "%s: req_boot_device(0x%016llx)\n", 442 "%s: req_boot_device(0x%016llx)\n",
437 ioc->name, __func__, 443 ioc->name, __func__,
438 (unsigned long long)sas_address)); 444 (unsigned long long)sas_address));
@@ -447,7 +453,7 @@ _scsih_determine_boot_device(struct MPT2SAS_ADAPTER *ioc,
447 (ioc->bios_pg2.ReqAltBootDeviceForm & 453 (ioc->bios_pg2.ReqAltBootDeviceForm &
448 MPI2_BIOSPAGE2_FORM_MASK), 454 MPI2_BIOSPAGE2_FORM_MASK),
449 &ioc->bios_pg2.RequestedAltBootDevice)) { 455 &ioc->bios_pg2.RequestedAltBootDevice)) {
450 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT 456 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT
451 "%s: req_alt_boot_device(0x%016llx)\n", 457 "%s: req_alt_boot_device(0x%016llx)\n",
452 ioc->name, __func__, 458 ioc->name, __func__,
453 (unsigned long long)sas_address)); 459 (unsigned long long)sas_address));
@@ -462,7 +468,7 @@ _scsih_determine_boot_device(struct MPT2SAS_ADAPTER *ioc,
462 (ioc->bios_pg2.CurrentBootDeviceForm & 468 (ioc->bios_pg2.CurrentBootDeviceForm &
463 MPI2_BIOSPAGE2_FORM_MASK), 469 MPI2_BIOSPAGE2_FORM_MASK),
464 &ioc->bios_pg2.CurrentBootDevice)) { 470 &ioc->bios_pg2.CurrentBootDevice)) {
465 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT 471 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT
466 "%s: current_boot_device(0x%016llx)\n", 472 "%s: current_boot_device(0x%016llx)\n",
467 ioc->name, __func__, 473 ioc->name, __func__,
468 (unsigned long long)sas_address)); 474 (unsigned long long)sas_address));
@@ -563,7 +569,7 @@ _scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc,
563{ 569{
564 unsigned long flags; 570 unsigned long flags;
565 571
566 dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: handle" 572 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: handle"
567 "(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__, 573 "(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
568 sas_device->handle, (unsigned long long)sas_device->sas_address)); 574 sas_device->handle, (unsigned long long)sas_device->sas_address));
569 575
@@ -590,7 +596,7 @@ _scsih_sas_device_init_add(struct MPT2SAS_ADAPTER *ioc,
590{ 596{
591 unsigned long flags; 597 unsigned long flags;
592 598
593 dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: handle" 599 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: handle"
594 "(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__, 600 "(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
595 sas_device->handle, (unsigned long long)sas_device->sas_address)); 601 sas_device->handle, (unsigned long long)sas_device->sas_address));
596 602
@@ -692,7 +698,7 @@ _scsih_raid_device_add(struct MPT2SAS_ADAPTER *ioc,
692{ 698{
693 unsigned long flags; 699 unsigned long flags;
694 700
695 dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: handle" 701 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: handle"
696 "(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__, 702 "(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
697 raid_device->handle, (unsigned long long)raid_device->wwid)); 703 raid_device->handle, (unsigned long long)raid_device->wwid));
698 704
@@ -1223,7 +1229,7 @@ _scsih_target_alloc(struct scsi_target *starget)
1223 sas_device->starget = starget; 1229 sas_device->starget = starget;
1224 sas_device->id = starget->id; 1230 sas_device->id = starget->id;
1225 sas_device->channel = starget->channel; 1231 sas_device->channel = starget->channel;
1226 if (sas_device->hidden_raid_component) 1232 if (test_bit(sas_device->handle, ioc->pd_handles))
1227 sas_target_priv_data->flags |= 1233 sas_target_priv_data->flags |=
1228 MPT_TARGET_FLAGS_RAID_COMPONENT; 1234 MPT_TARGET_FLAGS_RAID_COMPONENT;
1229 } 1235 }
@@ -1746,9 +1752,10 @@ _scsih_slave_configure(struct scsi_device *sdev)
1746 } 1752 }
1747 1753
1748 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " 1754 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), "
1749 "sas_addr(0x%016llx), device_name(0x%016llx)\n", 1755 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
1750 ds, sas_device->handle, 1756 ds, sas_device->handle,
1751 (unsigned long long)sas_device->sas_address, 1757 (unsigned long long)sas_device->sas_address,
1758 sas_device->phy,
1752 (unsigned long long)sas_device->device_name); 1759 (unsigned long long)sas_device->device_name);
1753 sdev_printk(KERN_INFO, sdev, "%s: " 1760 sdev_printk(KERN_INFO, sdev, "%s: "
1754 "enclosure_logical_id(0x%016llx), slot(%d)\n", ds, 1761 "enclosure_logical_id(0x%016llx), slot(%d)\n", ds,
@@ -1990,7 +1997,8 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
1990 goto err_out; 1997 goto err_out;
1991 } 1998 }
1992 1999
1993 if (ioc->shost_recovery || ioc->remove_host) { 2000 if (ioc->shost_recovery || ioc->remove_host ||
2001 ioc->pci_error_recovery) {
1994 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n", 2002 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
1995 __func__, ioc->name); 2003 __func__, ioc->name);
1996 rc = FAILED; 2004 rc = FAILED;
@@ -1999,7 +2007,7 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
1999 2007
2000 ioc_state = mpt2sas_base_get_iocstate(ioc, 0); 2008 ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
2001 if (ioc_state & MPI2_DOORBELL_USED) { 2009 if (ioc_state & MPI2_DOORBELL_USED) {
2002 dhsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "unexpected doorbell " 2010 dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "unexpected doorbell "
2003 "active!\n", ioc->name)); 2011 "active!\n", ioc->name));
2004 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, 2012 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
2005 FORCE_BIG_HAMMER); 2013 FORCE_BIG_HAMMER);
@@ -2116,8 +2124,59 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
2116} 2124}
2117 2125
2118/** 2126/**
2127 * _scsih_tm_display_info - displays info about the device
2128 * @ioc: per adapter struct
2129 * @scmd: pointer to scsi command object
2130 *
2131 * Called by task management callback handlers.
2132 */
2133static void
2134_scsih_tm_display_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
2135{
2136 struct scsi_target *starget = scmd->device->sdev_target;
2137 struct MPT2SAS_TARGET *priv_target = starget->hostdata;
2138 struct _sas_device *sas_device = NULL;
2139 unsigned long flags;
2140
2141 if (!priv_target)
2142 return;
2143
2144 scsi_print_command(scmd);
2145 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
2146 starget_printk(KERN_INFO, starget, "volume handle(0x%04x), "
2147 "volume wwid(0x%016llx)\n",
2148 priv_target->handle,
2149 (unsigned long long)priv_target->sas_address);
2150 } else {
2151 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2152 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
2153 priv_target->sas_address);
2154 if (sas_device) {
2155 if (priv_target->flags &
2156 MPT_TARGET_FLAGS_RAID_COMPONENT) {
2157 starget_printk(KERN_INFO, starget,
2158 "volume handle(0x%04x), "
2159 "volume wwid(0x%016llx)\n",
2160 sas_device->volume_handle,
2161 (unsigned long long)sas_device->volume_wwid);
2162 }
2163 starget_printk(KERN_INFO, starget,
2164 "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
2165 sas_device->handle,
2166 (unsigned long long)sas_device->sas_address,
2167 sas_device->phy);
2168 starget_printk(KERN_INFO, starget,
2169 "enclosure_logical_id(0x%016llx), slot(%d)\n",
2170 (unsigned long long)sas_device->enclosure_logical_id,
2171 sas_device->slot);
2172 }
2173 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2174 }
2175}
2176
2177/**
2119 * _scsih_abort - eh threads main abort routine 2178 * _scsih_abort - eh threads main abort routine
2120 * @sdev: scsi device struct 2179 * @scmd: pointer to scsi command object
2121 * 2180 *
2122 * Returns SUCCESS if command aborted else FAILED 2181 * Returns SUCCESS if command aborted else FAILED
2123 */ 2182 */
@@ -2130,14 +2189,14 @@ _scsih_abort(struct scsi_cmnd *scmd)
2130 u16 handle; 2189 u16 handle;
2131 int r; 2190 int r;
2132 2191
2133 printk(MPT2SAS_INFO_FMT "attempting task abort! scmd(%p)\n", 2192 sdev_printk(KERN_INFO, scmd->device, "attempting task abort! "
2134 ioc->name, scmd); 2193 "scmd(%p)\n", scmd);
2135 scsi_print_command(scmd); 2194 _scsih_tm_display_info(ioc, scmd);
2136 2195
2137 sas_device_priv_data = scmd->device->hostdata; 2196 sas_device_priv_data = scmd->device->hostdata;
2138 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 2197 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
2139 printk(MPT2SAS_INFO_FMT "device been deleted! scmd(%p)\n", 2198 sdev_printk(KERN_INFO, scmd->device, "device been deleted! "
2140 ioc->name, scmd); 2199 "scmd(%p)\n", scmd);
2141 scmd->result = DID_NO_CONNECT << 16; 2200 scmd->result = DID_NO_CONNECT << 16;
2142 scmd->scsi_done(scmd); 2201 scmd->scsi_done(scmd);
2143 r = SUCCESS; 2202 r = SUCCESS;
@@ -2169,14 +2228,14 @@ _scsih_abort(struct scsi_cmnd *scmd)
2169 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, scmd); 2228 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, scmd);
2170 2229
2171 out: 2230 out:
2172 printk(MPT2SAS_INFO_FMT "task abort: %s scmd(%p)\n", 2231 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
2173 ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 2232 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
2174 return r; 2233 return r;
2175} 2234}
2176 2235
2177/** 2236/**
2178 * _scsih_dev_reset - eh threads main device reset routine 2237 * _scsih_dev_reset - eh threads main device reset routine
2179 * @sdev: scsi device struct 2238 * @scmd: pointer to scsi command object
2180 * 2239 *
2181 * Returns SUCCESS if command aborted else FAILED 2240 * Returns SUCCESS if command aborted else FAILED
2182 */ 2241 */
@@ -2190,14 +2249,16 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)
2190 u16 handle; 2249 u16 handle;
2191 int r; 2250 int r;
2192 2251
2193 printk(MPT2SAS_INFO_FMT "attempting device reset! scmd(%p)\n", 2252 struct scsi_target *starget = scmd->device->sdev_target;
2194 ioc->name, scmd); 2253
2195 scsi_print_command(scmd); 2254 starget_printk(KERN_INFO, starget, "attempting target reset! "
2255 "scmd(%p)\n", scmd);
2256 _scsih_tm_display_info(ioc, scmd);
2196 2257
2197 sas_device_priv_data = scmd->device->hostdata; 2258 sas_device_priv_data = scmd->device->hostdata;
2198 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 2259 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
2199 printk(MPT2SAS_INFO_FMT "device been deleted! scmd(%p)\n", 2260 starget_printk(KERN_INFO, starget, "target been deleted! "
2200 ioc->name, scmd); 2261 "scmd(%p)\n", scmd);
2201 scmd->result = DID_NO_CONNECT << 16; 2262 scmd->result = DID_NO_CONNECT << 16;
2202 scmd->scsi_done(scmd); 2263 scmd->scsi_done(scmd);
2203 r = SUCCESS; 2264 r = SUCCESS;
@@ -2228,14 +2289,14 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)
2228 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, scmd); 2289 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, scmd);
2229 2290
2230 out: 2291 out:
2231 printk(MPT2SAS_INFO_FMT "device reset: %s scmd(%p)\n", 2292 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
2232 ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 2293 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
2233 return r; 2294 return r;
2234} 2295}
2235 2296
2236/** 2297/**
2237 * _scsih_target_reset - eh threads main target reset routine 2298 * _scsih_target_reset - eh threads main target reset routine
2238 * @sdev: scsi device struct 2299 * @scmd: pointer to scsi command object
2239 * 2300 *
2240 * Returns SUCCESS if command aborted else FAILED 2301 * Returns SUCCESS if command aborted else FAILED
2241 */ 2302 */
@@ -2248,15 +2309,16 @@ _scsih_target_reset(struct scsi_cmnd *scmd)
2248 unsigned long flags; 2309 unsigned long flags;
2249 u16 handle; 2310 u16 handle;
2250 int r; 2311 int r;
2312 struct scsi_target *starget = scmd->device->sdev_target;
2251 2313
2252 printk(MPT2SAS_INFO_FMT "attempting target reset! scmd(%p)\n", 2314 starget_printk(KERN_INFO, starget, "attempting target reset! "
2253 ioc->name, scmd); 2315 "scmd(%p)\n", scmd);
2254 scsi_print_command(scmd); 2316 _scsih_tm_display_info(ioc, scmd);
2255 2317
2256 sas_device_priv_data = scmd->device->hostdata; 2318 sas_device_priv_data = scmd->device->hostdata;
2257 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 2319 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
2258 printk(MPT2SAS_INFO_FMT "target been deleted! scmd(%p)\n", 2320 starget_printk(KERN_INFO, starget, "target been deleted! "
2259 ioc->name, scmd); 2321 "scmd(%p)\n", scmd);
2260 scmd->result = DID_NO_CONNECT << 16; 2322 scmd->result = DID_NO_CONNECT << 16;
2261 scmd->scsi_done(scmd); 2323 scmd->scsi_done(scmd);
2262 r = SUCCESS; 2324 r = SUCCESS;
@@ -2287,14 +2349,14 @@ _scsih_target_reset(struct scsi_cmnd *scmd)
2287 30, scmd); 2349 30, scmd);
2288 2350
2289 out: 2351 out:
2290 printk(MPT2SAS_INFO_FMT "target reset: %s scmd(%p)\n", 2352 starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
2291 ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 2353 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
2292 return r; 2354 return r;
2293} 2355}
2294 2356
2295/** 2357/**
2296 * _scsih_host_reset - eh threads main host reset routine 2358 * _scsih_host_reset - eh threads main host reset routine
2297 * @sdev: scsi device struct 2359 * @scmd: pointer to scsi command object
2298 * 2360 *
2299 * Returns SUCCESS if command aborted else FAILED 2361 * Returns SUCCESS if command aborted else FAILED
2300 */ 2362 */
@@ -2579,20 +2641,31 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2579 Mpi2SCSITaskManagementRequest_t *mpi_request; 2641 Mpi2SCSITaskManagementRequest_t *mpi_request;
2580 u16 smid; 2642 u16 smid;
2581 struct _sas_device *sas_device; 2643 struct _sas_device *sas_device;
2644 struct MPT2SAS_TARGET *sas_target_priv_data;
2582 unsigned long flags; 2645 unsigned long flags;
2583 struct _tr_list *delayed_tr; 2646 struct _tr_list *delayed_tr;
2584 2647
2585 if (ioc->shost_recovery || ioc->remove_host) { 2648 if (ioc->shost_recovery || ioc->remove_host ||
2649 ioc->pci_error_recovery) {
2586 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host reset in " 2650 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host reset in "
2587 "progress!\n", __func__, ioc->name)); 2651 "progress!\n", __func__, ioc->name));
2588 return; 2652 return;
2589 } 2653 }
2590 2654
2655 /* if PD, then return */
2656 if (test_bit(handle, ioc->pd_handles))
2657 return;
2658
2591 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2659 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2592 sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 2660 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
2593 if (sas_device && sas_device->hidden_raid_component) { 2661 if (sas_device && sas_device->starget &&
2594 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2662 sas_device->starget->hostdata) {
2595 return; 2663 sas_target_priv_data = sas_device->starget->hostdata;
2664 sas_target_priv_data->deleted = 1;
2665 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
2666 "setting delete flag: handle(0x%04x), "
2667 "sas_addr(0x%016llx)\n", ioc->name, handle,
2668 (unsigned long long) sas_device->sas_address));
2596 } 2669 }
2597 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2670 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2598 2671
@@ -2655,6 +2728,101 @@ _scsih_sas_control_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid,
2655} 2728}
2656 2729
2657/** 2730/**
2731 * _scsih_tm_tr_volume_send - send target reset request for volumes
2732 * @ioc: per adapter object
2733 * @handle: device handle
2734 * Context: interrupt time.
2735 *
2736 * This is designed to send muliple task management request at the same
2737 * time to the fifo. If the fifo is full, we will append the request,
2738 * and process it in a future completion.
2739 */
2740static void
2741_scsih_tm_tr_volume_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2742{
2743 Mpi2SCSITaskManagementRequest_t *mpi_request;
2744 u16 smid;
2745 struct _tr_list *delayed_tr;
2746
2747 if (ioc->shost_recovery || ioc->remove_host ||
2748 ioc->pci_error_recovery) {
2749 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host reset in "
2750 "progress!\n", __func__, ioc->name));
2751 return;
2752 }
2753
2754 smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
2755 if (!smid) {
2756 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
2757 if (!delayed_tr)
2758 return;
2759 INIT_LIST_HEAD(&delayed_tr->list);
2760 delayed_tr->handle = handle;
2761 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
2762 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
2763 "DELAYED:tr:handle(0x%04x), (open)\n",
2764 ioc->name, handle));
2765 return;
2766 }
2767
2768 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "tr_send:handle(0x%04x), "
2769 "(open), smid(%d), cb(%d)\n", ioc->name, handle, smid,
2770 ioc->tm_tr_volume_cb_idx));
2771 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
2772 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
2773 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2774 mpi_request->DevHandle = cpu_to_le16(handle);
2775 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2776 mpt2sas_base_put_smid_hi_priority(ioc, smid);
2777}
2778
2779/**
2780 * _scsih_tm_volume_tr_complete - target reset completion
2781 * @ioc: per adapter object
2782 * @smid: system request message index
2783 * @msix_index: MSIX table index supplied by the OS
2784 * @reply: reply message frame(lower 32bit addr)
2785 * Context: interrupt time.
2786 *
2787 * Return 1 meaning mf should be freed from _base_interrupt
2788 * 0 means the mf is freed from this function.
2789 */
2790static u8
2791_scsih_tm_volume_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid,
2792 u8 msix_index, u32 reply)
2793{
2794 u16 handle;
2795 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
2796 Mpi2SCSITaskManagementReply_t *mpi_reply =
2797 mpt2sas_base_get_reply_virt_addr(ioc, reply);
2798
2799 if (ioc->shost_recovery || ioc->remove_host ||
2800 ioc->pci_error_recovery) {
2801 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host reset in "
2802 "progress!\n", __func__, ioc->name));
2803 return 1;
2804 }
2805
2806 mpi_request_tm = mpt2sas_base_get_msg_frame(ioc, smid);
2807 handle = le16_to_cpu(mpi_request_tm->DevHandle);
2808 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
2809 dewtprintk(ioc, printk("spurious interrupt: "
2810 "handle(0x%04x:0x%04x), smid(%d)!!!\n", handle,
2811 le16_to_cpu(mpi_reply->DevHandle), smid));
2812 return 0;
2813 }
2814
2815 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
2816 "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
2817 "loginfo(0x%08x), completed(%d)\n", ioc->name,
2818 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
2819 le32_to_cpu(mpi_reply->IOCLogInfo),
2820 le32_to_cpu(mpi_reply->TerminationCount)));
2821
2822 return _scsih_check_for_pending_tm(ioc, smid);
2823}
2824
2825/**
2658 * _scsih_tm_tr_complete - 2826 * _scsih_tm_tr_complete -
2659 * @ioc: per adapter object 2827 * @ioc: per adapter object
2660 * @smid: system request message index 2828 * @smid: system request message index
@@ -2680,9 +2848,9 @@ _scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
2680 mpt2sas_base_get_reply_virt_addr(ioc, reply); 2848 mpt2sas_base_get_reply_virt_addr(ioc, reply);
2681 Mpi2SasIoUnitControlRequest_t *mpi_request; 2849 Mpi2SasIoUnitControlRequest_t *mpi_request;
2682 u16 smid_sas_ctrl; 2850 u16 smid_sas_ctrl;
2683 struct _tr_list *delayed_tr;
2684 2851
2685 if (ioc->shost_recovery || ioc->remove_host) { 2852 if (ioc->shost_recovery || ioc->remove_host ||
2853 ioc->pci_error_recovery) {
2686 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host reset in " 2854 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host reset in "
2687 "progress!\n", __func__, ioc->name)); 2855 "progress!\n", __func__, ioc->name));
2688 return 1; 2856 return 1;
@@ -2721,6 +2889,35 @@ _scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
2721 mpi_request->DevHandle = mpi_request_tm->DevHandle; 2889 mpi_request->DevHandle = mpi_request_tm->DevHandle;
2722 mpt2sas_base_put_smid_default(ioc, smid_sas_ctrl); 2890 mpt2sas_base_put_smid_default(ioc, smid_sas_ctrl);
2723 2891
2892 return _scsih_check_for_pending_tm(ioc, smid);
2893}
2894
2895/**
2896 * _scsih_check_for_pending_tm - check for pending task management
2897 * @ioc: per adapter object
2898 * @smid: system request message index
2899 *
2900 * This will check delayed target reset list, and feed the
2901 * next reqeust.
2902 *
2903 * Return 1 meaning mf should be freed from _base_interrupt
2904 * 0 means the mf is freed from this function.
2905 */
2906static u8
2907_scsih_check_for_pending_tm(struct MPT2SAS_ADAPTER *ioc, u16 smid)
2908{
2909 struct _tr_list *delayed_tr;
2910
2911 if (!list_empty(&ioc->delayed_tr_volume_list)) {
2912 delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
2913 struct _tr_list, list);
2914 mpt2sas_base_free_smid(ioc, smid);
2915 _scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
2916 list_del(&delayed_tr->list);
2917 kfree(delayed_tr);
2918 return 0;
2919 }
2920
2724 if (!list_empty(&ioc->delayed_tr_list)) { 2921 if (!list_empty(&ioc->delayed_tr_list)) {
2725 delayed_tr = list_entry(ioc->delayed_tr_list.next, 2922 delayed_tr = list_entry(ioc->delayed_tr_list.next,
2726 struct _tr_list, list); 2923 struct _tr_list, list);
@@ -2728,8 +2925,9 @@ _scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
2728 _scsih_tm_tr_send(ioc, delayed_tr->handle); 2925 _scsih_tm_tr_send(ioc, delayed_tr->handle);
2729 list_del(&delayed_tr->list); 2926 list_del(&delayed_tr->list);
2730 kfree(delayed_tr); 2927 kfree(delayed_tr);
2731 return 0; /* tells base_interrupt not to free mf */ 2928 return 0;
2732 } 2929 }
2930
2733 return 1; 2931 return 1;
2734} 2932}
2735 2933
@@ -2803,7 +3001,7 @@ _scsih_check_topo_delete_events(struct MPT2SAS_ADAPTER *ioc,
2803 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) { 3001 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
2804 if (le16_to_cpu(local_event_data->ExpanderDevHandle) == 3002 if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
2805 expander_handle) { 3003 expander_handle) {
2806 dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT 3004 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
2807 "setting ignoring flag\n", ioc->name)); 3005 "setting ignoring flag\n", ioc->name));
2808 fw_event->ignore = 1; 3006 fw_event->ignore = 1;
2809 } 3007 }
@@ -2813,6 +3011,165 @@ _scsih_check_topo_delete_events(struct MPT2SAS_ADAPTER *ioc,
2813} 3011}
2814 3012
2815/** 3013/**
3014 * _scsih_set_volume_delete_flag - setting volume delete flag
3015 * @ioc: per adapter object
3016 * @handle: device handle
3017 *
3018 * This
3019 * Return nothing.
3020 */
3021static void
3022_scsih_set_volume_delete_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3023{
3024 struct _raid_device *raid_device;
3025 struct MPT2SAS_TARGET *sas_target_priv_data;
3026 unsigned long flags;
3027
3028 spin_lock_irqsave(&ioc->raid_device_lock, flags);
3029 raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
3030 if (raid_device && raid_device->starget &&
3031 raid_device->starget->hostdata) {
3032 sas_target_priv_data =
3033 raid_device->starget->hostdata;
3034 sas_target_priv_data->deleted = 1;
3035 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
3036 "setting delete flag: handle(0x%04x), "
3037 "wwid(0x%016llx)\n", ioc->name, handle,
3038 (unsigned long long) raid_device->wwid));
3039 }
3040 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
3041}
3042
3043/**
3044 * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
3045 * @handle: input handle
3046 * @a: handle for volume a
3047 * @b: handle for volume b
3048 *
3049 * IR firmware only supports two raid volumes. The purpose of this
3050 * routine is to set the volume handle in either a or b. When the given
3051 * input handle is non-zero, or when a and b have not been set before.
3052 */
3053static void
3054_scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
3055{
3056 if (!handle || handle == *a || handle == *b)
3057 return;
3058 if (!*a)
3059 *a = handle;
3060 else if (!*b)
3061 *b = handle;
3062}
3063
3064/**
3065 * _scsih_check_ir_config_unhide_events - check for UNHIDE events
3066 * @ioc: per adapter object
3067 * @event_data: the event data payload
3068 * Context: interrupt time.
3069 *
3070 * This routine will send target reset to volume, followed by target
3071 * resets to the PDs. This is called when a PD has been removed, or
3072 * volume has been deleted or removed. When the target reset is sent
3073 * to volume, the PD target resets need to be queued to start upon
3074 * completion of the volume target reset.
3075 *
3076 * Return nothing.
3077 */
3078static void
3079_scsih_check_ir_config_unhide_events(struct MPT2SAS_ADAPTER *ioc,
3080 Mpi2EventDataIrConfigChangeList_t *event_data)
3081{
3082 Mpi2EventIrConfigElement_t *element;
3083 int i;
3084 u16 handle, volume_handle, a, b;
3085 struct _tr_list *delayed_tr;
3086
3087 a = 0;
3088 b = 0;
3089
3090 /* Volume Resets for Deleted or Removed */
3091 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
3092 for (i = 0; i < event_data->NumElements; i++, element++) {
3093 if (element->ReasonCode ==
3094 MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
3095 element->ReasonCode ==
3096 MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
3097 volume_handle = le16_to_cpu(element->VolDevHandle);
3098 _scsih_set_volume_delete_flag(ioc, volume_handle);
3099 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
3100 }
3101 }
3102
3103 /* Volume Resets for UNHIDE events */
3104 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
3105 for (i = 0; i < event_data->NumElements; i++, element++) {
3106 if (le32_to_cpu(event_data->Flags) &
3107 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
3108 continue;
3109 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
3110 volume_handle = le16_to_cpu(element->VolDevHandle);
3111 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
3112 }
3113 }
3114
3115 if (a)
3116 _scsih_tm_tr_volume_send(ioc, a);
3117 if (b)
3118 _scsih_tm_tr_volume_send(ioc, b);
3119
3120 /* PD target resets */
3121 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
3122 for (i = 0; i < event_data->NumElements; i++, element++) {
3123 if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
3124 continue;
3125 handle = le16_to_cpu(element->PhysDiskDevHandle);
3126 volume_handle = le16_to_cpu(element->VolDevHandle);
3127 clear_bit(handle, ioc->pd_handles);
3128 if (!volume_handle)
3129 _scsih_tm_tr_send(ioc, handle);
3130 else if (volume_handle == a || volume_handle == b) {
3131 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
3132 BUG_ON(!delayed_tr);
3133 INIT_LIST_HEAD(&delayed_tr->list);
3134 delayed_tr->handle = handle;
3135 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
3136 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
3137 "DELAYED:tr:handle(0x%04x), (open)\n", ioc->name,
3138 handle));
3139 } else
3140 _scsih_tm_tr_send(ioc, handle);
3141 }
3142}
3143
3144
3145/**
3146 * _scsih_check_volume_delete_events - set delete flag for volumes
3147 * @ioc: per adapter object
3148 * @event_data: the event data payload
3149 * Context: interrupt time.
3150 *
3151 * This will handle the case when the cable connected to entire volume is
3152 * pulled. We will take care of setting the deleted flag so normal IO will
3153 * not be sent.
3154 *
3155 * Return nothing.
3156 */
3157static void
3158_scsih_check_volume_delete_events(struct MPT2SAS_ADAPTER *ioc,
3159 Mpi2EventDataIrVolume_t *event_data)
3160{
3161 u32 state;
3162
3163 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
3164 return;
3165 state = le32_to_cpu(event_data->NewValue);
3166 if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
3167 MPI2_RAID_VOL_STATE_FAILED)
3168 _scsih_set_volume_delete_flag(ioc,
3169 le16_to_cpu(event_data->VolDevHandle));
3170}
3171
3172/**
2816 * _scsih_flush_running_cmds - completing outstanding commands. 3173 * _scsih_flush_running_cmds - completing outstanding commands.
2817 * @ioc: per adapter object 3174 * @ioc: per adapter object
2818 * 3175 *
@@ -2835,7 +3192,10 @@ _scsih_flush_running_cmds(struct MPT2SAS_ADAPTER *ioc)
2835 count++; 3192 count++;
2836 mpt2sas_base_free_smid(ioc, smid); 3193 mpt2sas_base_free_smid(ioc, smid);
2837 scsi_dma_unmap(scmd); 3194 scsi_dma_unmap(scmd);
2838 scmd->result = DID_RESET << 16; 3195 if (ioc->pci_error_recovery)
3196 scmd->result = DID_NO_CONNECT << 16;
3197 else
3198 scmd->result = DID_RESET << 16;
2839 scmd->scsi_done(scmd); 3199 scmd->scsi_done(scmd);
2840 } 3200 }
2841 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "completing %d cmds\n", 3201 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "completing %d cmds\n",
@@ -2858,9 +3218,7 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
2858 unsigned char prot_op = scsi_get_prot_op(scmd); 3218 unsigned char prot_op = scsi_get_prot_op(scmd);
2859 unsigned char prot_type = scsi_get_prot_type(scmd); 3219 unsigned char prot_type = scsi_get_prot_type(scmd);
2860 3220
2861 if (prot_type == SCSI_PROT_DIF_TYPE0 || 3221 if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
2862 prot_type == SCSI_PROT_DIF_TYPE2 ||
2863 prot_op == SCSI_PROT_NORMAL)
2864 return; 3222 return;
2865 3223
2866 if (prot_op == SCSI_PROT_READ_STRIP) 3224 if (prot_op == SCSI_PROT_READ_STRIP)
@@ -2882,7 +3240,13 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
2882 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; 3240 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2883 mpi_request->CDB.EEDP32.PrimaryReferenceTag = 3241 mpi_request->CDB.EEDP32.PrimaryReferenceTag =
2884 cpu_to_be32(scsi_get_lba(scmd)); 3242 cpu_to_be32(scsi_get_lba(scmd));
3243 break;
2885 3244
3245 case SCSI_PROT_DIF_TYPE2:
3246
3247 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
3248 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
3249 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2886 break; 3250 break;
2887 3251
2888 case SCSI_PROT_DIF_TYPE3: 3252 case SCSI_PROT_DIF_TYPE3:
@@ -2968,6 +3332,12 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
2968 return 0; 3332 return 0;
2969 } 3333 }
2970 3334
3335 if (ioc->pci_error_recovery) {
3336 scmd->result = DID_NO_CONNECT << 16;
3337 scmd->scsi_done(scmd);
3338 return 0;
3339 }
3340
2971 sas_target_priv_data = sas_device_priv_data->sas_target; 3341 sas_target_priv_data = sas_device_priv_data->sas_target;
2972 /* invalid device handle */ 3342 /* invalid device handle */
2973 if (sas_target_priv_data->handle == MPT2SAS_INVALID_DEVICE_HANDLE) { 3343 if (sas_target_priv_data->handle == MPT2SAS_INVALID_DEVICE_HANDLE) {
@@ -3013,7 +3383,7 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
3013 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; 3383 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
3014 /* Make sure Device is not raid volume */ 3384 /* Make sure Device is not raid volume */
3015 if (!_scsih_is_raid(&scmd->device->sdev_gendev) && 3385 if (!_scsih_is_raid(&scmd->device->sdev_gendev) &&
3016 sas_is_tlr_enabled(scmd->device)) 3386 sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
3017 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; 3387 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
3018 3388
3019 smid = mpt2sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd); 3389 smid = mpt2sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
@@ -3025,6 +3395,8 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
3025 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); 3395 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
3026 memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t)); 3396 memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t));
3027 _scsih_setup_eedp(scmd, mpi_request); 3397 _scsih_setup_eedp(scmd, mpi_request);
3398 if (scmd->cmd_len == 32)
3399 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
3028 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 3400 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
3029 if (sas_device_priv_data->sas_target->flags & 3401 if (sas_device_priv_data->sas_target->flags &
3030 MPT_TARGET_FLAGS_RAID_COMPONENT) 3402 MPT_TARGET_FLAGS_RAID_COMPONENT)
@@ -3119,6 +3491,13 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
3119 char *desc_scsi_status = NULL; 3491 char *desc_scsi_status = NULL;
3120 char *desc_scsi_state = ioc->tmp_string; 3492 char *desc_scsi_state = ioc->tmp_string;
3121 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); 3493 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
3494 struct _sas_device *sas_device = NULL;
3495 unsigned long flags;
3496 struct scsi_target *starget = scmd->device->sdev_target;
3497 struct MPT2SAS_TARGET *priv_target = starget->hostdata;
3498
3499 if (!priv_target)
3500 return;
3122 3501
3123 if (log_info == 0x31170000) 3502 if (log_info == 0x31170000)
3124 return; 3503 return;
@@ -3234,10 +3613,29 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
3234 strcat(desc_scsi_state, "autosense valid "); 3613 strcat(desc_scsi_state, "autosense valid ");
3235 3614
3236 scsi_print_command(scmd); 3615 scsi_print_command(scmd);
3237 printk(MPT2SAS_WARN_FMT "\tdev handle(0x%04x), " 3616
3238 "ioc_status(%s)(0x%04x), smid(%d)\n", ioc->name, 3617 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3239 le16_to_cpu(mpi_reply->DevHandle), desc_ioc_state, 3618 printk(MPT2SAS_WARN_FMT "\tvolume wwid(0x%016llx)\n", ioc->name,
3240 ioc_status, smid); 3619 (unsigned long long)priv_target->sas_address);
3620 } else {
3621 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3622 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
3623 priv_target->sas_address);
3624 if (sas_device) {
3625 printk(MPT2SAS_WARN_FMT "\tsas_address(0x%016llx), "
3626 "phy(%d)\n", ioc->name, sas_device->sas_address,
3627 sas_device->phy);
3628 printk(MPT2SAS_WARN_FMT
3629 "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
3630 ioc->name, sas_device->enclosure_logical_id,
3631 sas_device->slot);
3632 }
3633 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3634 }
3635
3636 printk(MPT2SAS_WARN_FMT "\thandle(0x%04x), ioc_status(%s)(0x%04x), "
3637 "smid(%d)\n", ioc->name, le16_to_cpu(mpi_reply->DevHandle),
3638 desc_ioc_state, ioc_status, smid);
3241 printk(MPT2SAS_WARN_FMT "\trequest_len(%d), underflow(%d), " 3639 printk(MPT2SAS_WARN_FMT "\trequest_len(%d), underflow(%d), "
3242 "resid(%d)\n", ioc->name, scsi_bufflen(scmd), scmd->underflow, 3640 "resid(%d)\n", ioc->name, scsi_bufflen(scmd), scmd->underflow,
3243 scsi_get_resid(scmd)); 3641 scsi_get_resid(scmd));
@@ -3772,7 +4170,7 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3772 if (!handle) 4170 if (!handle)
3773 return -1; 4171 return -1;
3774 4172
3775 if (ioc->shost_recovery) 4173 if (ioc->shost_recovery || ioc->pci_error_recovery)
3776 return -1; 4174 return -1;
3777 4175
3778 if ((mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, 4176 if ((mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
@@ -4178,7 +4576,7 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
4178 le16_to_cpu(sas_device_pg0.Slot); 4576 le16_to_cpu(sas_device_pg0.Slot);
4179 sas_device->device_info = device_info; 4577 sas_device->device_info = device_info;
4180 sas_device->sas_address = sas_address; 4578 sas_device->sas_address = sas_address;
4181 sas_device->hidden_raid_component = is_pd; 4579 sas_device->phy = sas_device_pg0.PhyNum;
4182 4580
4183 /* get enclosure_logical_id */ 4581 /* get enclosure_logical_id */
4184 if (sas_device->enclosure_handle && !(mpt2sas_config_get_enclosure_pg0( 4582 if (sas_device->enclosure_handle && !(mpt2sas_config_get_enclosure_pg0(
@@ -4199,62 +4597,6 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
4199} 4597}
4200 4598
4201/** 4599/**
4202 * _scsih_remove_pd_device - removing sas device pd object
4203 * @ioc: per adapter object
4204 * @sas_device_delete: the sas_device object
4205 *
4206 * For hidden raid components, we do driver-fw handshake from
4207 * hotplug work threads.
4208 * Return nothing.
4209 */
4210static void
4211_scsih_remove_pd_device(struct MPT2SAS_ADAPTER *ioc, struct _sas_device
4212 sas_device)
4213{
4214 Mpi2SasIoUnitControlReply_t mpi_reply;
4215 Mpi2SasIoUnitControlRequest_t mpi_request;
4216 u16 vol_handle, handle;
4217
4218 handle = sas_device.handle;
4219 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: handle(0x%04x),"
4220 " sas_addr(0x%016llx)\n", ioc->name, __func__, handle,
4221 (unsigned long long) sas_device.sas_address));
4222
4223 vol_handle = sas_device.volume_handle;
4224 if (!vol_handle)
4225 return;
4226 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "issue target reset: "
4227 "handle(0x%04x)\n", ioc->name, vol_handle));
4228 mpt2sas_scsih_issue_tm(ioc, vol_handle, 0, 0, 0,
4229 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30, NULL);
4230 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "issue target reset "
4231 "done: handle(0x%04x)\n", ioc->name, vol_handle));
4232 if (ioc->shost_recovery)
4233 return;
4234
4235 /* SAS_IO_UNIT_CNTR - send REMOVE_DEVICE */
4236 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "sas_iounit: handle"
4237 "(0x%04x)\n", ioc->name, handle));
4238 memset(&mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4239 mpi_request.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4240 mpi_request.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4241 mpi_request.DevHandle = cpu_to_le16(handle);
4242 if ((mpt2sas_base_sas_iounit_control(ioc, &mpi_reply,
4243 &mpi_request)) != 0)
4244 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
4245 ioc->name, __FILE__, __LINE__, __func__);
4246
4247 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "sas_iounit: ioc_status"
4248 "(0x%04x), loginfo(0x%08x)\n", ioc->name,
4249 le16_to_cpu(mpi_reply.IOCStatus),
4250 le32_to_cpu(mpi_reply.IOCLogInfo)));
4251
4252 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit: handle(0x%04x),"
4253 " sas_addr(0x%016llx)\n", ioc->name, __func__, handle,
4254 (unsigned long long) sas_device.sas_address));
4255}
4256
4257/**
4258 * _scsih_remove_device - removing sas device object 4600 * _scsih_remove_device - removing sas device object
4259 * @ioc: per adapter object 4601 * @ioc: per adapter object
4260 * @sas_device_delete: the sas_device object 4602 * @sas_device_delete: the sas_device object
@@ -4284,9 +4626,6 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc,
4284 sas_target_priv_data->deleted = 1; 4626 sas_target_priv_data->deleted = 1;
4285 } 4627 }
4286 4628
4287 if (sas_device_backup.hidden_raid_component)
4288 _scsih_remove_pd_device(ioc, sas_device_backup);
4289
4290 _scsih_ublock_io_device(ioc, sas_device_backup.handle); 4629 _scsih_ublock_io_device(ioc, sas_device_backup.handle);
4291 4630
4292 mpt2sas_transport_port_remove(ioc, sas_device_backup.sas_address, 4631 mpt2sas_transport_port_remove(ioc, sas_device_backup.sas_address,
@@ -4338,9 +4677,9 @@ _scsih_sas_topology_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
4338 status_str = "unknown status"; 4677 status_str = "unknown status";
4339 break; 4678 break;
4340 } 4679 }
4341 printk(MPT2SAS_DEBUG_FMT "sas topology change: (%s)\n", 4680 printk(MPT2SAS_INFO_FMT "sas topology change: (%s)\n",
4342 ioc->name, status_str); 4681 ioc->name, status_str);
4343 printk(KERN_DEBUG "\thandle(0x%04x), enclosure_handle(0x%04x) " 4682 printk(KERN_INFO "\thandle(0x%04x), enclosure_handle(0x%04x) "
4344 "start_phy(%02d), count(%d)\n", 4683 "start_phy(%02d), count(%d)\n",
4345 le16_to_cpu(event_data->ExpanderDevHandle), 4684 le16_to_cpu(event_data->ExpanderDevHandle),
4346 le16_to_cpu(event_data->EnclosureHandle), 4685 le16_to_cpu(event_data->EnclosureHandle),
@@ -4374,7 +4713,7 @@ _scsih_sas_topology_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
4374 } 4713 }
4375 link_rate = event_data->PHY[i].LinkRate >> 4; 4714 link_rate = event_data->PHY[i].LinkRate >> 4;
4376 prev_link_rate = event_data->PHY[i].LinkRate & 0xF; 4715 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
4377 printk(KERN_DEBUG "\tphy(%02d), attached_handle(0x%04x): %s:" 4716 printk(KERN_INFO "\tphy(%02d), attached_handle(0x%04x): %s:"
4378 " link rate: new(0x%02x), old(0x%02x)\n", phy_number, 4717 " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
4379 handle, status_str, link_rate, prev_link_rate); 4718 handle, status_str, link_rate, prev_link_rate);
4380 4719
@@ -4409,7 +4748,7 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
4409 _scsih_sas_topology_change_event_debug(ioc, event_data); 4748 _scsih_sas_topology_change_event_debug(ioc, event_data);
4410#endif 4749#endif
4411 4750
4412 if (ioc->shost_recovery || ioc->remove_host) 4751 if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
4413 return; 4752 return;
4414 4753
4415 if (!ioc->sas_hba.num_phys) 4754 if (!ioc->sas_hba.num_phys)
@@ -4418,7 +4757,7 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
4418 _scsih_sas_host_refresh(ioc); 4757 _scsih_sas_host_refresh(ioc);
4419 4758
4420 if (fw_event->ignore) { 4759 if (fw_event->ignore) {
4421 dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ignoring expander " 4760 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "ignoring expander "
4422 "event\n", ioc->name)); 4761 "event\n", ioc->name));
4423 return; 4762 return;
4424 } 4763 }
@@ -4444,11 +4783,12 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
4444 /* handle siblings events */ 4783 /* handle siblings events */
4445 for (i = 0; i < event_data->NumEntries; i++) { 4784 for (i = 0; i < event_data->NumEntries; i++) {
4446 if (fw_event->ignore) { 4785 if (fw_event->ignore) {
4447 dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ignoring " 4786 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "ignoring "
4448 "expander event\n", ioc->name)); 4787 "expander event\n", ioc->name));
4449 return; 4788 return;
4450 } 4789 }
4451 if (ioc->shost_recovery || ioc->remove_host) 4790 if (ioc->shost_recovery || ioc->remove_host ||
4791 ioc->pci_error_recovery)
4452 return; 4792 return;
4453 phy_number = event_data->StartPhyNum + i; 4793 phy_number = event_data->StartPhyNum + i;
4454 reason_code = event_data->PHY[i].PhyStatus & 4794 reason_code = event_data->PHY[i].PhyStatus &
@@ -4564,12 +4904,12 @@ _scsih_sas_device_status_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
4564 reason_str = "unknown reason"; 4904 reason_str = "unknown reason";
4565 break; 4905 break;
4566 } 4906 }
4567 printk(MPT2SAS_DEBUG_FMT "device status change: (%s)\n" 4907 printk(MPT2SAS_INFO_FMT "device status change: (%s)\n"
4568 "\thandle(0x%04x), sas address(0x%016llx)", ioc->name, 4908 "\thandle(0x%04x), sas address(0x%016llx)", ioc->name,
4569 reason_str, le16_to_cpu(event_data->DevHandle), 4909 reason_str, le16_to_cpu(event_data->DevHandle),
4570 (unsigned long long)le64_to_cpu(event_data->SASAddress)); 4910 (unsigned long long)le64_to_cpu(event_data->SASAddress));
4571 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA) 4911 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
4572 printk(MPT2SAS_DEBUG_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name, 4912 printk(MPT2SAS_INFO_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
4573 event_data->ASC, event_data->ASCQ); 4913 event_data->ASC, event_data->ASCQ);
4574 printk(KERN_INFO "\n"); 4914 printk(KERN_INFO "\n");
4575} 4915}
@@ -4653,7 +4993,7 @@ _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
4653 break; 4993 break;
4654 } 4994 }
4655 4995
4656 printk(MPT2SAS_DEBUG_FMT "enclosure status change: (%s)\n" 4996 printk(MPT2SAS_INFO_FMT "enclosure status change: (%s)\n"
4657 "\thandle(0x%04x), enclosure logical id(0x%016llx)" 4997 "\thandle(0x%04x), enclosure logical id(0x%016llx)"
4658 " number slots(%d)\n", ioc->name, reason_str, 4998 " number slots(%d)\n", ioc->name, reason_str,
4659 le16_to_cpu(event_data->EnclosureHandle), 4999 le16_to_cpu(event_data->EnclosureHandle),
@@ -4704,10 +5044,10 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
4704 Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data; 5044 Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data;
4705#endif 5045#endif
4706 u16 ioc_status; 5046 u16 ioc_status;
4707 dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "broadcast primative: " 5047 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "broadcast primative: "
4708 "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum, 5048 "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum,
4709 event_data->PortWidth)); 5049 event_data->PortWidth));
4710 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name, 5050 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
4711 __func__)); 5051 __func__));
4712 5052
4713 termination_count = 0; 5053 termination_count = 0;
@@ -4751,7 +5091,7 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
4751 } 5091 }
4752 ioc->broadcast_aen_busy = 0; 5092 ioc->broadcast_aen_busy = 0;
4753 5093
4754 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT 5094 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT
4755 "%s - exit, query_count = %d termination_count = %d\n", 5095 "%s - exit, query_count = %d termination_count = %d\n",
4756 ioc->name, __func__, query_count, termination_count)); 5096 ioc->name, __func__, query_count, termination_count));
4757} 5097}
@@ -4772,7 +5112,7 @@ _scsih_sas_discovery_event(struct MPT2SAS_ADAPTER *ioc,
4772 5112
4773#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 5113#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
4774 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) { 5114 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
4775 printk(MPT2SAS_DEBUG_FMT "discovery event: (%s)", ioc->name, 5115 printk(MPT2SAS_INFO_FMT "discovery event: (%s)", ioc->name,
4776 (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ? 5116 (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
4777 "start" : "stop"); 5117 "start" : "stop");
4778 if (event_data->DiscoveryStatus) 5118 if (event_data->DiscoveryStatus)
@@ -4883,17 +5223,15 @@ _scsih_sas_volume_add(struct MPT2SAS_ADAPTER *ioc,
4883/** 5223/**
4884 * _scsih_sas_volume_delete - delete volume 5224 * _scsih_sas_volume_delete - delete volume
4885 * @ioc: per adapter object 5225 * @ioc: per adapter object
4886 * @element: IR config element data 5226 * @handle: volume device handle
4887 * Context: user. 5227 * Context: user.
4888 * 5228 *
4889 * Return nothing. 5229 * Return nothing.
4890 */ 5230 */
4891static void 5231static void
4892_scsih_sas_volume_delete(struct MPT2SAS_ADAPTER *ioc, 5232_scsih_sas_volume_delete(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4893 Mpi2EventIrConfigElement_t *element)
4894{ 5233{
4895 struct _raid_device *raid_device; 5234 struct _raid_device *raid_device;
4896 u16 handle = le16_to_cpu(element->VolDevHandle);
4897 unsigned long flags; 5235 unsigned long flags;
4898 struct MPT2SAS_TARGET *sas_target_priv_data; 5236 struct MPT2SAS_TARGET *sas_target_priv_data;
4899 5237
@@ -4907,6 +5245,9 @@ _scsih_sas_volume_delete(struct MPT2SAS_ADAPTER *ioc,
4907 sas_target_priv_data->deleted = 1; 5245 sas_target_priv_data->deleted = 1;
4908 scsi_remove_target(&raid_device->starget->dev); 5246 scsi_remove_target(&raid_device->starget->dev);
4909 } 5247 }
5248 printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), wwid"
5249 "(0x%016llx)\n", ioc->name, raid_device->handle,
5250 (unsigned long long) raid_device->wwid);
4910 _scsih_raid_device_remove(ioc, raid_device); 5251 _scsih_raid_device_remove(ioc, raid_device);
4911} 5252}
4912 5253
@@ -4935,7 +5276,7 @@ _scsih_sas_pd_expose(struct MPT2SAS_ADAPTER *ioc,
4935 /* exposing raid component */ 5276 /* exposing raid component */
4936 sas_device->volume_handle = 0; 5277 sas_device->volume_handle = 0;
4937 sas_device->volume_wwid = 0; 5278 sas_device->volume_wwid = 0;
4938 sas_device->hidden_raid_component = 0; 5279 clear_bit(handle, ioc->pd_handles);
4939 _scsih_reprobe_target(sas_device->starget, 0); 5280 _scsih_reprobe_target(sas_device->starget, 0);
4940} 5281}
4941 5282
@@ -4966,7 +5307,7 @@ _scsih_sas_pd_hide(struct MPT2SAS_ADAPTER *ioc,
4966 &sas_device->volume_handle); 5307 &sas_device->volume_handle);
4967 mpt2sas_config_get_volume_wwid(ioc, sas_device->volume_handle, 5308 mpt2sas_config_get_volume_wwid(ioc, sas_device->volume_handle,
4968 &sas_device->volume_wwid); 5309 &sas_device->volume_wwid);
4969 sas_device->hidden_raid_component = 1; 5310 set_bit(handle, ioc->pd_handles);
4970 _scsih_reprobe_target(sas_device->starget, 1); 5311 _scsih_reprobe_target(sas_device->starget, 1);
4971} 5312}
4972 5313
@@ -5015,13 +5356,13 @@ _scsih_sas_pd_add(struct MPT2SAS_ADAPTER *ioc,
5015 u64 sas_address; 5356 u64 sas_address;
5016 u16 parent_handle; 5357 u16 parent_handle;
5017 5358
5359 set_bit(handle, ioc->pd_handles);
5360
5018 spin_lock_irqsave(&ioc->sas_device_lock, flags); 5361 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5019 sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 5362 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
5020 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5363 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5021 if (sas_device) { 5364 if (sas_device)
5022 sas_device->hidden_raid_component = 1;
5023 return; 5365 return;
5024 }
5025 5366
5026 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 5367 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
5027 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 5368 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
@@ -5066,7 +5407,7 @@ _scsih_sas_ir_config_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
5066 5407
5067 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 5408 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
5068 5409
5069 printk(MPT2SAS_DEBUG_FMT "raid config change: (%s), elements(%d)\n", 5410 printk(MPT2SAS_INFO_FMT "raid config change: (%s), elements(%d)\n",
5070 ioc->name, (le32_to_cpu(event_data->Flags) & 5411 ioc->name, (le32_to_cpu(event_data->Flags) &
5071 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 5412 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ?
5072 "foreign" : "native", event_data->NumElements); 5413 "foreign" : "native", event_data->NumElements);
@@ -5119,7 +5460,7 @@ _scsih_sas_ir_config_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
5119 element_str = "unknown element"; 5460 element_str = "unknown element";
5120 break; 5461 break;
5121 } 5462 }
5122 printk(KERN_DEBUG "\t(%s:%s), vol handle(0x%04x), " 5463 printk(KERN_INFO "\t(%s:%s), vol handle(0x%04x), "
5123 "pd handle(0x%04x), pd num(0x%02x)\n", element_str, 5464 "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
5124 reason_str, le16_to_cpu(element->VolDevHandle), 5465 reason_str, le16_to_cpu(element->VolDevHandle),
5125 le16_to_cpu(element->PhysDiskDevHandle), 5466 le16_to_cpu(element->PhysDiskDevHandle),
@@ -5165,7 +5506,8 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc,
5165 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: 5506 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
5166 case MPI2_EVENT_IR_CHANGE_RC_REMOVED: 5507 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
5167 if (!foreign_config) 5508 if (!foreign_config)
5168 _scsih_sas_volume_delete(ioc, element); 5509 _scsih_sas_volume_delete(ioc,
5510 le16_to_cpu(element->VolDevHandle));
5169 break; 5511 break;
5170 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: 5512 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
5171 _scsih_sas_pd_hide(ioc, element); 5513 _scsih_sas_pd_hide(ioc, element);
@@ -5201,7 +5543,6 @@ _scsih_sas_ir_volume_event(struct MPT2SAS_ADAPTER *ioc,
5201 u16 handle; 5543 u16 handle;
5202 u32 state; 5544 u32 state;
5203 int rc; 5545 int rc;
5204 struct MPT2SAS_TARGET *sas_target_priv_data;
5205 Mpi2EventDataIrVolume_t *event_data = fw_event->event_data; 5546 Mpi2EventDataIrVolume_t *event_data = fw_event->event_data;
5206 5547
5207 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) 5548 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
@@ -5209,30 +5550,24 @@ _scsih_sas_ir_volume_event(struct MPT2SAS_ADAPTER *ioc,
5209 5550
5210 handle = le16_to_cpu(event_data->VolDevHandle); 5551 handle = le16_to_cpu(event_data->VolDevHandle);
5211 state = le32_to_cpu(event_data->NewValue); 5552 state = le32_to_cpu(event_data->NewValue);
5212 dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: handle(0x%04x), " 5553 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: handle(0x%04x), "
5213 "old(0x%08x), new(0x%08x)\n", ioc->name, __func__, handle, 5554 "old(0x%08x), new(0x%08x)\n", ioc->name, __func__, handle,
5214 le32_to_cpu(event_data->PreviousValue), state)); 5555 le32_to_cpu(event_data->PreviousValue), state));
5215 5556
5216 spin_lock_irqsave(&ioc->raid_device_lock, flags);
5217 raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
5218 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
5219
5220 switch (state) { 5557 switch (state) {
5221 case MPI2_RAID_VOL_STATE_MISSING: 5558 case MPI2_RAID_VOL_STATE_MISSING:
5222 case MPI2_RAID_VOL_STATE_FAILED: 5559 case MPI2_RAID_VOL_STATE_FAILED:
5223 if (!raid_device) 5560 _scsih_sas_volume_delete(ioc, handle);
5224 break;
5225 if (raid_device->starget) {
5226 sas_target_priv_data = raid_device->starget->hostdata;
5227 sas_target_priv_data->deleted = 1;
5228 scsi_remove_target(&raid_device->starget->dev);
5229 }
5230 _scsih_raid_device_remove(ioc, raid_device);
5231 break; 5561 break;
5232 5562
5233 case MPI2_RAID_VOL_STATE_ONLINE: 5563 case MPI2_RAID_VOL_STATE_ONLINE:
5234 case MPI2_RAID_VOL_STATE_DEGRADED: 5564 case MPI2_RAID_VOL_STATE_DEGRADED:
5235 case MPI2_RAID_VOL_STATE_OPTIMAL: 5565 case MPI2_RAID_VOL_STATE_OPTIMAL:
5566
5567 spin_lock_irqsave(&ioc->raid_device_lock, flags);
5568 raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
5569 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
5570
5236 if (raid_device) 5571 if (raid_device)
5237 break; 5572 break;
5238 5573
@@ -5297,23 +5632,25 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc,
5297 handle = le16_to_cpu(event_data->PhysDiskDevHandle); 5632 handle = le16_to_cpu(event_data->PhysDiskDevHandle);
5298 state = le32_to_cpu(event_data->NewValue); 5633 state = le32_to_cpu(event_data->NewValue);
5299 5634
5300 dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: handle(0x%04x), " 5635 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: handle(0x%04x), "
5301 "old(0x%08x), new(0x%08x)\n", ioc->name, __func__, handle, 5636 "old(0x%08x), new(0x%08x)\n", ioc->name, __func__, handle,
5302 le32_to_cpu(event_data->PreviousValue), state)); 5637 le32_to_cpu(event_data->PreviousValue), state));
5303 5638
5304 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5305 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
5306 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5307
5308 switch (state) { 5639 switch (state) {
5309 case MPI2_RAID_PD_STATE_ONLINE: 5640 case MPI2_RAID_PD_STATE_ONLINE:
5310 case MPI2_RAID_PD_STATE_DEGRADED: 5641 case MPI2_RAID_PD_STATE_DEGRADED:
5311 case MPI2_RAID_PD_STATE_REBUILDING: 5642 case MPI2_RAID_PD_STATE_REBUILDING:
5312 case MPI2_RAID_PD_STATE_OPTIMAL: 5643 case MPI2_RAID_PD_STATE_OPTIMAL:
5313 if (sas_device) { 5644 case MPI2_RAID_PD_STATE_HOT_SPARE:
5314 sas_device->hidden_raid_component = 1; 5645
5646 set_bit(handle, ioc->pd_handles);
5647
5648 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5649 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
5650 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5651
5652 if (sas_device)
5315 return; 5653 return;
5316 }
5317 5654
5318 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, 5655 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
5319 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 5656 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
@@ -5343,7 +5680,6 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc,
5343 case MPI2_RAID_PD_STATE_OFFLINE: 5680 case MPI2_RAID_PD_STATE_OFFLINE:
5344 case MPI2_RAID_PD_STATE_NOT_CONFIGURED: 5681 case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
5345 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE: 5682 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
5346 case MPI2_RAID_PD_STATE_HOT_SPARE:
5347 default: 5683 default:
5348 break; 5684 break;
5349 } 5685 }
@@ -5471,7 +5807,7 @@ _scsih_task_set_full(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
5471 sas_address = sas_device->sas_address; 5807 sas_address = sas_device->sas_address;
5472 5808
5473 /* if hidden raid component, then change to volume characteristics */ 5809 /* if hidden raid component, then change to volume characteristics */
5474 if (sas_device->hidden_raid_component && sas_device->volume_handle) { 5810 if (test_bit(handle, ioc->pd_handles) && sas_device->volume_handle) {
5475 spin_lock_irqsave(&ioc->raid_device_lock, flags); 5811 spin_lock_irqsave(&ioc->raid_device_lock, flags);
5476 raid_device = _scsih_raid_device_find_by_handle( 5812 raid_device = _scsih_raid_device_find_by_handle(
5477 ioc, sas_device->volume_handle); 5813 ioc, sas_device->volume_handle);
@@ -5485,7 +5821,7 @@ _scsih_task_set_full(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
5485 } 5821 }
5486 5822
5487 if (ioc->logging_level & MPT_DEBUG_TASK_SET_FULL) 5823 if (ioc->logging_level & MPT_DEBUG_TASK_SET_FULL)
5488 starget_printk(KERN_DEBUG, sas_device->starget, "task set " 5824 starget_printk(KERN_INFO, sas_device->starget, "task set "
5489 "full: handle(0x%04x), sas_addr(0x%016llx), depth(%d)\n", 5825 "full: handle(0x%04x), sas_addr(0x%016llx), depth(%d)\n",
5490 handle, (unsigned long long)sas_address, current_depth); 5826 handle, (unsigned long long)sas_address, current_depth);
5491 5827
@@ -5696,9 +6032,12 @@ static void
5696_scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc) 6032_scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc)
5697{ 6033{
5698 Mpi2RaidVolPage1_t volume_pg1; 6034 Mpi2RaidVolPage1_t volume_pg1;
6035 Mpi2RaidVolPage0_t volume_pg0;
6036 Mpi2RaidPhysDiskPage0_t pd_pg0;
5699 Mpi2ConfigReply_t mpi_reply; 6037 Mpi2ConfigReply_t mpi_reply;
5700 u16 ioc_status; 6038 u16 ioc_status;
5701 u16 handle; 6039 u16 handle;
6040 u8 phys_disk_num;
5702 6041
5703 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__); 6042 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__);
5704 6043
@@ -5713,8 +6052,32 @@ _scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc)
5713 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) 6052 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
5714 break; 6053 break;
5715 handle = le16_to_cpu(volume_pg1.DevHandle); 6054 handle = le16_to_cpu(volume_pg1.DevHandle);
5716 _scsih_mark_responding_raid_device(ioc, 6055
5717 le64_to_cpu(volume_pg1.WWID), handle); 6056 if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
6057 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
6058 sizeof(Mpi2RaidVolPage0_t)))
6059 continue;
6060
6061 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
6062 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
6063 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
6064 _scsih_mark_responding_raid_device(ioc,
6065 le64_to_cpu(volume_pg1.WWID), handle);
6066 }
6067
6068 /* refresh the pd_handles */
6069 phys_disk_num = 0xFF;
6070 memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
6071 while (!(mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
6072 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
6073 phys_disk_num))) {
6074 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6075 MPI2_IOCSTATUS_MASK;
6076 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
6077 break;
6078 phys_disk_num = pd_pg0.PhysDiskNum;
6079 handle = le16_to_cpu(pd_pg0.DevHandle);
6080 set_bit(handle, ioc->pd_handles);
5718 } 6081 }
5719} 6082}
5720 6083
@@ -5876,11 +6239,11 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
5876{ 6239{
5877 switch (reset_phase) { 6240 switch (reset_phase) {
5878 case MPT2_IOC_PRE_RESET: 6241 case MPT2_IOC_PRE_RESET:
5879 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " 6242 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
5880 "MPT2_IOC_PRE_RESET\n", ioc->name, __func__)); 6243 "MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
5881 break; 6244 break;
5882 case MPT2_IOC_AFTER_RESET: 6245 case MPT2_IOC_AFTER_RESET:
5883 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " 6246 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
5884 "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__)); 6247 "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
5885 if (ioc->scsih_cmds.status & MPT2_CMD_PENDING) { 6248 if (ioc->scsih_cmds.status & MPT2_CMD_PENDING) {
5886 ioc->scsih_cmds.status |= MPT2_CMD_RESET; 6249 ioc->scsih_cmds.status |= MPT2_CMD_RESET;
@@ -5897,7 +6260,7 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
5897 _scsih_queue_rescan(ioc); 6260 _scsih_queue_rescan(ioc);
5898 break; 6261 break;
5899 case MPT2_IOC_DONE_RESET: 6262 case MPT2_IOC_DONE_RESET:
5900 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " 6263 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
5901 "MPT2_IOC_DONE_RESET\n", ioc->name, __func__)); 6264 "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
5902 _scsih_sas_host_refresh(ioc); 6265 _scsih_sas_host_refresh(ioc);
5903 _scsih_prep_device_scan(ioc); 6266 _scsih_prep_device_scan(ioc);
@@ -5925,7 +6288,8 @@ _firmware_event_work(struct work_struct *work)
5925 struct MPT2SAS_ADAPTER *ioc = fw_event->ioc; 6288 struct MPT2SAS_ADAPTER *ioc = fw_event->ioc;
5926 6289
5927 /* the queue is being flushed so ignore this event */ 6290 /* the queue is being flushed so ignore this event */
5928 if (ioc->remove_host || fw_event->cancel_pending_work) { 6291 if (ioc->remove_host || fw_event->cancel_pending_work ||
6292 ioc->pci_error_recovery) {
5929 _scsih_fw_event_free(ioc, fw_event); 6293 _scsih_fw_event_free(ioc, fw_event);
5930 return; 6294 return;
5931 } 6295 }
@@ -6007,7 +6371,7 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
6007 u16 sz; 6371 u16 sz;
6008 6372
6009 /* events turned off due to host reset or driver unloading */ 6373 /* events turned off due to host reset or driver unloading */
6010 if (ioc->remove_host) 6374 if (ioc->remove_host || ioc->pci_error_recovery)
6011 return 1; 6375 return 1;
6012 6376
6013 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 6377 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
@@ -6034,14 +6398,21 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
6034 (Mpi2EventDataSasTopologyChangeList_t *) 6398 (Mpi2EventDataSasTopologyChangeList_t *)
6035 mpi_reply->EventData); 6399 mpi_reply->EventData);
6036 break; 6400 break;
6037 6401 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
6402 _scsih_check_ir_config_unhide_events(ioc,
6403 (Mpi2EventDataIrConfigChangeList_t *)
6404 mpi_reply->EventData);
6405 break;
6406 case MPI2_EVENT_IR_VOLUME:
6407 _scsih_check_volume_delete_events(ioc,
6408 (Mpi2EventDataIrVolume_t *)
6409 mpi_reply->EventData);
6410 break;
6038 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 6411 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
6039 case MPI2_EVENT_IR_OPERATION_STATUS: 6412 case MPI2_EVENT_IR_OPERATION_STATUS:
6040 case MPI2_EVENT_SAS_DISCOVERY: 6413 case MPI2_EVENT_SAS_DISCOVERY:
6041 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: 6414 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
6042 case MPI2_EVENT_IR_VOLUME:
6043 case MPI2_EVENT_IR_PHYSICAL_DISK: 6415 case MPI2_EVENT_IR_PHYSICAL_DISK:
6044 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
6045 case MPI2_EVENT_TASK_SET_FULL: 6416 case MPI2_EVENT_TASK_SET_FULL:
6046 break; 6417 break;
6047 6418
@@ -6548,9 +6919,11 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
6548 ioc->scsih_cb_idx = scsih_cb_idx; 6919 ioc->scsih_cb_idx = scsih_cb_idx;
6549 ioc->config_cb_idx = config_cb_idx; 6920 ioc->config_cb_idx = config_cb_idx;
6550 ioc->tm_tr_cb_idx = tm_tr_cb_idx; 6921 ioc->tm_tr_cb_idx = tm_tr_cb_idx;
6922 ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
6551 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx; 6923 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
6552 ioc->logging_level = logging_level; 6924 ioc->logging_level = logging_level;
6553 /* misc semaphores and spin locks */ 6925 /* misc semaphores and spin locks */
6926 mutex_init(&ioc->reset_in_progress_mutex);
6554 spin_lock_init(&ioc->ioc_reset_in_progress_lock); 6927 spin_lock_init(&ioc->ioc_reset_in_progress_lock);
6555 spin_lock_init(&ioc->scsi_lookup_lock); 6928 spin_lock_init(&ioc->scsi_lookup_lock);
6556 spin_lock_init(&ioc->sas_device_lock); 6929 spin_lock_init(&ioc->sas_device_lock);
@@ -6565,9 +6938,10 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
6565 INIT_LIST_HEAD(&ioc->raid_device_list); 6938 INIT_LIST_HEAD(&ioc->raid_device_list);
6566 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list); 6939 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
6567 INIT_LIST_HEAD(&ioc->delayed_tr_list); 6940 INIT_LIST_HEAD(&ioc->delayed_tr_list);
6941 INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
6568 6942
6569 /* init shost parameters */ 6943 /* init shost parameters */
6570 shost->max_cmd_len = 16; 6944 shost->max_cmd_len = 32;
6571 shost->max_lun = max_lun; 6945 shost->max_lun = max_lun;
6572 shost->transportt = mpt2sas_transport_template; 6946 shost->transportt = mpt2sas_transport_template;
6573 shost->unique_id = ioc->id; 6947 shost->unique_id = ioc->id;
@@ -6580,7 +6954,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
6580 } 6954 }
6581 6955
6582 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION 6956 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
6583 | SHOST_DIF_TYPE3_PROTECTION); 6957 | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION);
6584 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); 6958 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
6585 6959
6586 /* event thread */ 6960 /* event thread */
@@ -6700,12 +7074,17 @@ _scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
6700 case pci_channel_io_normal: 7074 case pci_channel_io_normal:
6701 return PCI_ERS_RESULT_CAN_RECOVER; 7075 return PCI_ERS_RESULT_CAN_RECOVER;
6702 case pci_channel_io_frozen: 7076 case pci_channel_io_frozen:
7077 /* Fatal error, prepare for slot reset */
7078 ioc->pci_error_recovery = 1;
6703 scsi_block_requests(ioc->shost); 7079 scsi_block_requests(ioc->shost);
6704 mpt2sas_base_stop_watchdog(ioc); 7080 mpt2sas_base_stop_watchdog(ioc);
6705 mpt2sas_base_free_resources(ioc); 7081 mpt2sas_base_free_resources(ioc);
6706 return PCI_ERS_RESULT_NEED_RESET; 7082 return PCI_ERS_RESULT_NEED_RESET;
6707 case pci_channel_io_perm_failure: 7083 case pci_channel_io_perm_failure:
6708 _scsih_remove(pdev); 7084 /* Permanent error, prepare for device removal */
7085 ioc->pci_error_recovery = 1;
7086 mpt2sas_base_stop_watchdog(ioc);
7087 _scsih_flush_running_cmds(ioc);
6709 return PCI_ERS_RESULT_DISCONNECT; 7088 return PCI_ERS_RESULT_DISCONNECT;
6710 } 7089 }
6711 return PCI_ERS_RESULT_NEED_RESET; 7090 return PCI_ERS_RESULT_NEED_RESET;
@@ -6729,7 +7108,9 @@ _scsih_pci_slot_reset(struct pci_dev *pdev)
6729 printk(MPT2SAS_INFO_FMT "PCI error: slot reset callback!!\n", 7108 printk(MPT2SAS_INFO_FMT "PCI error: slot reset callback!!\n",
6730 ioc->name); 7109 ioc->name);
6731 7110
7111 ioc->pci_error_recovery = 0;
6732 ioc->pdev = pdev; 7112 ioc->pdev = pdev;
7113 pci_restore_state(pdev);
6733 rc = mpt2sas_base_map_resources(ioc); 7114 rc = mpt2sas_base_map_resources(ioc);
6734 if (rc) 7115 if (rc)
6735 return PCI_ERS_RESULT_DISCONNECT; 7116 return PCI_ERS_RESULT_DISCONNECT;
@@ -6867,6 +7248,10 @@ _scsih_init(void)
6867 7248
6868 tm_tr_cb_idx = mpt2sas_base_register_callback_handler( 7249 tm_tr_cb_idx = mpt2sas_base_register_callback_handler(
6869 _scsih_tm_tr_complete); 7250 _scsih_tm_tr_complete);
7251
7252 tm_tr_volume_cb_idx = mpt2sas_base_register_callback_handler(
7253 _scsih_tm_volume_tr_complete);
7254
6870 tm_sas_control_cb_idx = mpt2sas_base_register_callback_handler( 7255 tm_sas_control_cb_idx = mpt2sas_base_register_callback_handler(
6871 _scsih_sas_control_complete); 7256 _scsih_sas_control_complete);
6872 7257
@@ -6906,6 +7291,7 @@ _scsih_exit(void)
6906 mpt2sas_base_release_callback_handler(ctl_cb_idx); 7291 mpt2sas_base_release_callback_handler(ctl_cb_idx);
6907 7292
6908 mpt2sas_base_release_callback_handler(tm_tr_cb_idx); 7293 mpt2sas_base_release_callback_handler(tm_tr_cb_idx);
7294 mpt2sas_base_release_callback_handler(tm_tr_volume_cb_idx);
6909 mpt2sas_base_release_callback_handler(tm_sas_control_cb_idx); 7295 mpt2sas_base_release_callback_handler(tm_sas_control_cb_idx);
6910 7296
6911 /* raid transport support */ 7297 /* raid transport support */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 2727c3b65104..b55c6dc07470 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -140,7 +140,7 @@ _transport_set_identify(struct MPT2SAS_ADAPTER *ioc, u16 handle,
140 u32 device_info; 140 u32 device_info;
141 u32 ioc_status; 141 u32 ioc_status;
142 142
143 if (ioc->shost_recovery) { 143 if (ioc->shost_recovery || ioc->pci_error_recovery) {
144 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n", 144 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
145 __func__, ioc->name); 145 __func__, ioc->name);
146 return -EFAULT; 146 return -EFAULT;
@@ -302,7 +302,7 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
302 u64 *sas_address_le; 302 u64 *sas_address_le;
303 u16 wait_state_count; 303 u16 wait_state_count;
304 304
305 if (ioc->shost_recovery) { 305 if (ioc->shost_recovery || ioc->pci_error_recovery) {
306 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n", 306 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
307 __func__, ioc->name); 307 __func__, ioc->name);
308 return -EFAULT; 308 return -EFAULT;
@@ -397,7 +397,7 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
397 sizeof(struct rep_manu_reply), data_out_dma + 397 sizeof(struct rep_manu_reply), data_out_dma +
398 sizeof(struct rep_manu_request)); 398 sizeof(struct rep_manu_request));
399 399
400 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "report_manufacture - " 400 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "report_manufacture - "
401 "send to sas_addr(0x%016llx)\n", ioc->name, 401 "send to sas_addr(0x%016llx)\n", ioc->name,
402 (unsigned long long)sas_address)); 402 (unsigned long long)sas_address));
403 mpt2sas_base_put_smid_default(ioc, smid); 403 mpt2sas_base_put_smid_default(ioc, smid);
@@ -415,7 +415,7 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
415 goto issue_host_reset; 415 goto issue_host_reset;
416 } 416 }
417 417
418 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "report_manufacture - " 418 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "report_manufacture - "
419 "complete\n", ioc->name)); 419 "complete\n", ioc->name));
420 420
421 if (ioc->transport_cmds.status & MPT2_CMD_REPLY_VALID) { 421 if (ioc->transport_cmds.status & MPT2_CMD_REPLY_VALID) {
@@ -423,7 +423,7 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
423 423
424 mpi_reply = ioc->transport_cmds.reply; 424 mpi_reply = ioc->transport_cmds.reply;
425 425
426 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT 426 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
427 "report_manufacture - reply data transfer size(%d)\n", 427 "report_manufacture - reply data transfer size(%d)\n",
428 ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength))); 428 ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
429 429
@@ -449,7 +449,7 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
449 manufacture_reply->component_revision_id; 449 manufacture_reply->component_revision_id;
450 } 450 }
451 } else 451 } else
452 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT 452 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
453 "report_manufacture - no reply\n", ioc->name)); 453 "report_manufacture - no reply\n", ioc->name));
454 454
455 issue_host_reset: 455 issue_host_reset:
@@ -894,7 +894,7 @@ mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
894 struct _sas_node *sas_node; 894 struct _sas_node *sas_node;
895 struct _sas_phy *mpt2sas_phy; 895 struct _sas_phy *mpt2sas_phy;
896 896
897 if (ioc->shost_recovery) 897 if (ioc->shost_recovery || ioc->pci_error_recovery)
898 return; 898 return;
899 899
900 spin_lock_irqsave(&ioc->sas_node_lock, flags); 900 spin_lock_irqsave(&ioc->sas_node_lock, flags);
@@ -940,22 +940,230 @@ rphy_to_ioc(struct sas_rphy *rphy)
940 return shost_priv(shost); 940 return shost_priv(shost);
941} 941}
942 942
943static struct _sas_phy * 943
944_transport_find_local_phy(struct MPT2SAS_ADAPTER *ioc, struct sas_phy *phy) 944/* report phy error log structure */
945struct phy_error_log_request{
946 u8 smp_frame_type; /* 0x40 */
947 u8 function; /* 0x11 */
948 u8 allocated_response_length;
949 u8 request_length; /* 02 */
950 u8 reserved_1[5];
951 u8 phy_identifier;
952 u8 reserved_2[2];
953};
954
955/* report phy error log reply structure */
956struct phy_error_log_reply{
957 u8 smp_frame_type; /* 0x41 */
958 u8 function; /* 0x11 */
959 u8 function_result;
960 u8 response_length;
961 u16 expander_change_count;
962 u8 reserved_1[3];
963 u8 phy_identifier;
964 u8 reserved_2[2];
965 u32 invalid_dword;
966 u32 running_disparity_error;
967 u32 loss_of_dword_sync;
968 u32 phy_reset_problem;
969};
970
971/**
972 * _transport_get_expander_phy_error_log - return expander counters
973 * @ioc: per adapter object
974 * @phy: The sas phy object
975 *
976 * Returns 0 for success, non-zero for failure.
977 *
978 */
979static int
980_transport_get_expander_phy_error_log(struct MPT2SAS_ADAPTER *ioc,
981 struct sas_phy *phy)
945{ 982{
946 int i; 983 Mpi2SmpPassthroughRequest_t *mpi_request;
984 Mpi2SmpPassthroughReply_t *mpi_reply;
985 struct phy_error_log_request *phy_error_log_request;
986 struct phy_error_log_reply *phy_error_log_reply;
987 int rc;
988 u16 smid;
989 u32 ioc_state;
990 unsigned long timeleft;
991 void *psge;
992 u32 sgl_flags;
993 u8 issue_reset = 0;
994 void *data_out = NULL;
995 dma_addr_t data_out_dma;
996 u32 sz;
997 u64 *sas_address_le;
998 u16 wait_state_count;
947 999
948 for (i = 0; i < ioc->sas_hba.num_phys; i++) 1000 if (ioc->shost_recovery || ioc->pci_error_recovery) {
949 if (ioc->sas_hba.phy[i].phy == phy) 1001 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
950 return(&ioc->sas_hba.phy[i]); 1002 __func__, ioc->name);
951 return NULL; 1003 return -EFAULT;
1004 }
1005
1006 mutex_lock(&ioc->transport_cmds.mutex);
1007
1008 if (ioc->transport_cmds.status != MPT2_CMD_NOT_USED) {
1009 printk(MPT2SAS_ERR_FMT "%s: transport_cmds in use\n",
1010 ioc->name, __func__);
1011 rc = -EAGAIN;
1012 goto out;
1013 }
1014 ioc->transport_cmds.status = MPT2_CMD_PENDING;
1015
1016 wait_state_count = 0;
1017 ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
1018 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
1019 if (wait_state_count++ == 10) {
1020 printk(MPT2SAS_ERR_FMT
1021 "%s: failed due to ioc not operational\n",
1022 ioc->name, __func__);
1023 rc = -EFAULT;
1024 goto out;
1025 }
1026 ssleep(1);
1027 ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
1028 printk(MPT2SAS_INFO_FMT "%s: waiting for "
1029 "operational state(count=%d)\n", ioc->name,
1030 __func__, wait_state_count);
1031 }
1032 if (wait_state_count)
1033 printk(MPT2SAS_INFO_FMT "%s: ioc is operational\n",
1034 ioc->name, __func__);
1035
1036 smid = mpt2sas_base_get_smid(ioc, ioc->transport_cb_idx);
1037 if (!smid) {
1038 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
1039 ioc->name, __func__);
1040 rc = -EAGAIN;
1041 goto out;
1042 }
1043
1044 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
1045 ioc->transport_cmds.smid = smid;
1046
1047 sz = sizeof(struct phy_error_log_request) +
1048 sizeof(struct phy_error_log_reply);
1049 data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma);
1050 if (!data_out) {
1051 printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__,
1052 __LINE__, __func__);
1053 rc = -ENOMEM;
1054 mpt2sas_base_free_smid(ioc, smid);
1055 goto out;
1056 }
1057
1058 rc = -EINVAL;
1059 memset(data_out, 0, sz);
1060 phy_error_log_request = data_out;
1061 phy_error_log_request->smp_frame_type = 0x40;
1062 phy_error_log_request->function = 0x11;
1063 phy_error_log_request->request_length = 2;
1064 phy_error_log_request->allocated_response_length = 0;
1065 phy_error_log_request->phy_identifier = phy->number;
1066
1067 memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
1068 mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
1069 mpi_request->PhysicalPort = 0xFF;
1070 mpi_request->VF_ID = 0; /* TODO */
1071 mpi_request->VP_ID = 0;
1072 sas_address_le = (u64 *)&mpi_request->SASAddress;
1073 *sas_address_le = cpu_to_le64(phy->identify.sas_address);
1074 mpi_request->RequestDataLength =
1075 cpu_to_le16(sizeof(struct phy_error_log_request));
1076 psge = &mpi_request->SGL;
1077
1078 /* WRITE sgel first */
1079 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1080 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1081 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1082 ioc->base_add_sg_single(psge, sgl_flags |
1083 sizeof(struct phy_error_log_request), data_out_dma);
1084
1085 /* incr sgel */
1086 psge += ioc->sge_size;
1087
1088 /* READ sgel last */
1089 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1090 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1091 MPI2_SGE_FLAGS_END_OF_LIST);
1092 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1093 ioc->base_add_sg_single(psge, sgl_flags |
1094 sizeof(struct phy_error_log_reply), data_out_dma +
1095 sizeof(struct phy_error_log_request));
1096
1097 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "phy_error_log - "
1098 "send to sas_addr(0x%016llx), phy(%d)\n", ioc->name,
1099 (unsigned long long)phy->identify.sas_address, phy->number));
1100 mpt2sas_base_put_smid_default(ioc, smid);
1101 init_completion(&ioc->transport_cmds.done);
1102 timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
1103 10*HZ);
1104
1105 if (!(ioc->transport_cmds.status & MPT2_CMD_COMPLETE)) {
1106 printk(MPT2SAS_ERR_FMT "%s: timeout\n",
1107 ioc->name, __func__);
1108 _debug_dump_mf(mpi_request,
1109 sizeof(Mpi2SmpPassthroughRequest_t)/4);
1110 if (!(ioc->transport_cmds.status & MPT2_CMD_RESET))
1111 issue_reset = 1;
1112 goto issue_host_reset;
1113 }
1114
1115 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "phy_error_log - "
1116 "complete\n", ioc->name));
1117
1118 if (ioc->transport_cmds.status & MPT2_CMD_REPLY_VALID) {
1119
1120 mpi_reply = ioc->transport_cmds.reply;
1121
1122 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
1123 "phy_error_log - reply data transfer size(%d)\n",
1124 ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
1125
1126 if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
1127 sizeof(struct phy_error_log_reply))
1128 goto out;
1129
1130 phy_error_log_reply = data_out +
1131 sizeof(struct phy_error_log_request);
1132
1133 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
1134 "phy_error_log - function_result(%d)\n",
1135 ioc->name, phy_error_log_reply->function_result));
1136
1137 phy->invalid_dword_count =
1138 be32_to_cpu(phy_error_log_reply->invalid_dword);
1139 phy->running_disparity_error_count =
1140 be32_to_cpu(phy_error_log_reply->running_disparity_error);
1141 phy->loss_of_dword_sync_count =
1142 be32_to_cpu(phy_error_log_reply->loss_of_dword_sync);
1143 phy->phy_reset_problem_count =
1144 be32_to_cpu(phy_error_log_reply->phy_reset_problem);
1145 rc = 0;
1146 } else
1147 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
1148 "phy_error_log - no reply\n", ioc->name));
1149
1150 issue_host_reset:
1151 if (issue_reset)
1152 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
1153 FORCE_BIG_HAMMER);
1154 out:
1155 ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
1156 if (data_out)
1157 pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma);
1158
1159 mutex_unlock(&ioc->transport_cmds.mutex);
1160 return rc;
952} 1161}
953 1162
954/** 1163/**
955 * _transport_get_linkerrors - 1164 * _transport_get_linkerrors - return phy counters for both hba and expanders
956 * @phy: The sas phy object 1165 * @phy: The sas phy object
957 * 1166 *
958 * Only support sas_host direct attached phys.
959 * Returns 0 for success, non-zero for failure. 1167 * Returns 0 for success, non-zero for failure.
960 * 1168 *
961 */ 1169 */
@@ -963,17 +1171,24 @@ static int
963_transport_get_linkerrors(struct sas_phy *phy) 1171_transport_get_linkerrors(struct sas_phy *phy)
964{ 1172{
965 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy); 1173 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
966 struct _sas_phy *mpt2sas_phy; 1174 unsigned long flags;
967 Mpi2ConfigReply_t mpi_reply; 1175 Mpi2ConfigReply_t mpi_reply;
968 Mpi2SasPhyPage1_t phy_pg1; 1176 Mpi2SasPhyPage1_t phy_pg1;
969 1177
970 mpt2sas_phy = _transport_find_local_phy(ioc, phy); 1178 spin_lock_irqsave(&ioc->sas_node_lock, flags);
971 1179 if (_transport_sas_node_find_by_sas_address(ioc,
972 if (!mpt2sas_phy) /* this phy not on sas_host */ 1180 phy->identify.sas_address) == NULL) {
1181 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
973 return -EINVAL; 1182 return -EINVAL;
1183 }
1184 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
974 1185
1186 if (phy->identify.sas_address != ioc->sas_hba.sas_address)
1187 return _transport_get_expander_phy_error_log(ioc, phy);
1188
1189 /* get hba phy error logs */
975 if ((mpt2sas_config_get_phy_pg1(ioc, &mpi_reply, &phy_pg1, 1190 if ((mpt2sas_config_get_phy_pg1(ioc, &mpi_reply, &phy_pg1,
976 mpt2sas_phy->phy_id))) { 1191 phy->number))) {
977 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 1192 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
978 ioc->name, __FILE__, __LINE__, __func__); 1193 ioc->name, __FILE__, __LINE__, __func__);
979 return -ENXIO; 1194 return -ENXIO;
@@ -982,8 +1197,7 @@ _transport_get_linkerrors(struct sas_phy *phy)
982 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) 1197 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
983 printk(MPT2SAS_INFO_FMT "phy(%d), ioc_status" 1198 printk(MPT2SAS_INFO_FMT "phy(%d), ioc_status"
984 "(0x%04x), loginfo(0x%08x)\n", ioc->name, 1199 "(0x%04x), loginfo(0x%08x)\n", ioc->name,
985 mpt2sas_phy->phy_id, 1200 phy->number, le16_to_cpu(mpi_reply.IOCStatus),
986 le16_to_cpu(mpi_reply.IOCStatus),
987 le32_to_cpu(mpi_reply.IOCLogInfo)); 1201 le32_to_cpu(mpi_reply.IOCLogInfo));
988 1202
989 phy->invalid_dword_count = le32_to_cpu(phy_pg1.InvalidDwordCount); 1203 phy->invalid_dword_count = le32_to_cpu(phy_pg1.InvalidDwordCount);
@@ -1007,18 +1221,18 @@ static int
1007_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) 1221_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
1008{ 1222{
1009 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy); 1223 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
1010 struct _sas_node *sas_expander; 1224 struct _sas_device *sas_device;
1011 unsigned long flags; 1225 unsigned long flags;
1012 1226
1013 spin_lock_irqsave(&ioc->sas_node_lock, flags); 1227 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1014 sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc, 1228 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
1015 rphy->identify.sas_address); 1229 rphy->identify.sas_address);
1016 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 1230 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1017 1231
1018 if (!sas_expander) 1232 if (!sas_device)
1019 return -ENXIO; 1233 return -ENXIO;
1020 1234
1021 *identifier = sas_expander->enclosure_logical_id; 1235 *identifier = sas_device->enclosure_logical_id;
1022 return 0; 1236 return 0;
1023} 1237}
1024 1238
@@ -1046,32 +1260,260 @@ _transport_get_bay_identifier(struct sas_rphy *rphy)
1046 return sas_device->slot; 1260 return sas_device->slot;
1047} 1261}
1048 1262
1263/* phy control request structure */
1264struct phy_control_request{
1265 u8 smp_frame_type; /* 0x40 */
1266 u8 function; /* 0x91 */
1267 u8 allocated_response_length;
1268 u8 request_length; /* 0x09 */
1269 u16 expander_change_count;
1270 u8 reserved_1[3];
1271 u8 phy_identifier;
1272 u8 phy_operation;
1273 u8 reserved_2[13];
1274 u64 attached_device_name;
1275 u8 programmed_min_physical_link_rate;
1276 u8 programmed_max_physical_link_rate;
1277 u8 reserved_3[6];
1278};
1279
1280/* phy control reply structure */
1281struct phy_control_reply{
1282 u8 smp_frame_type; /* 0x41 */
1283 u8 function; /* 0x11 */
1284 u8 function_result;
1285 u8 response_length;
1286};
1287
1288#define SMP_PHY_CONTROL_LINK_RESET (0x01)
1289#define SMP_PHY_CONTROL_HARD_RESET (0x02)
1290#define SMP_PHY_CONTROL_DISABLE (0x03)
1291
1292/**
1293 * _transport_expander_phy_control - expander phy control
1294 * @ioc: per adapter object
1295 * @phy: The sas phy object
1296 *
1297 * Returns 0 for success, non-zero for failure.
1298 *
1299 */
1300static int
1301_transport_expander_phy_control(struct MPT2SAS_ADAPTER *ioc,
1302 struct sas_phy *phy, u8 phy_operation)
1303{
1304 Mpi2SmpPassthroughRequest_t *mpi_request;
1305 Mpi2SmpPassthroughReply_t *mpi_reply;
1306 struct phy_control_request *phy_control_request;
1307 struct phy_control_reply *phy_control_reply;
1308 int rc;
1309 u16 smid;
1310 u32 ioc_state;
1311 unsigned long timeleft;
1312 void *psge;
1313 u32 sgl_flags;
1314 u8 issue_reset = 0;
1315 void *data_out = NULL;
1316 dma_addr_t data_out_dma;
1317 u32 sz;
1318 u64 *sas_address_le;
1319 u16 wait_state_count;
1320
1321 if (ioc->shost_recovery) {
1322 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
1323 __func__, ioc->name);
1324 return -EFAULT;
1325 }
1326
1327 mutex_lock(&ioc->transport_cmds.mutex);
1328
1329 if (ioc->transport_cmds.status != MPT2_CMD_NOT_USED) {
1330 printk(MPT2SAS_ERR_FMT "%s: transport_cmds in use\n",
1331 ioc->name, __func__);
1332 rc = -EAGAIN;
1333 goto out;
1334 }
1335 ioc->transport_cmds.status = MPT2_CMD_PENDING;
1336
1337 wait_state_count = 0;
1338 ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
1339 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
1340 if (wait_state_count++ == 10) {
1341 printk(MPT2SAS_ERR_FMT
1342 "%s: failed due to ioc not operational\n",
1343 ioc->name, __func__);
1344 rc = -EFAULT;
1345 goto out;
1346 }
1347 ssleep(1);
1348 ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
1349 printk(MPT2SAS_INFO_FMT "%s: waiting for "
1350 "operational state(count=%d)\n", ioc->name,
1351 __func__, wait_state_count);
1352 }
1353 if (wait_state_count)
1354 printk(MPT2SAS_INFO_FMT "%s: ioc is operational\n",
1355 ioc->name, __func__);
1356
1357 smid = mpt2sas_base_get_smid(ioc, ioc->transport_cb_idx);
1358 if (!smid) {
1359 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
1360 ioc->name, __func__);
1361 rc = -EAGAIN;
1362 goto out;
1363 }
1364
1365 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
1366 ioc->transport_cmds.smid = smid;
1367
1368 sz = sizeof(struct phy_control_request) +
1369 sizeof(struct phy_control_reply);
1370 data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma);
1371 if (!data_out) {
1372 printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__,
1373 __LINE__, __func__);
1374 rc = -ENOMEM;
1375 mpt2sas_base_free_smid(ioc, smid);
1376 goto out;
1377 }
1378
1379 rc = -EINVAL;
1380 memset(data_out, 0, sz);
1381 phy_control_request = data_out;
1382 phy_control_request->smp_frame_type = 0x40;
1383 phy_control_request->function = 0x91;
1384 phy_control_request->request_length = 9;
1385 phy_control_request->allocated_response_length = 0;
1386 phy_control_request->phy_identifier = phy->number;
1387 phy_control_request->phy_operation = phy_operation;
1388 phy_control_request->programmed_min_physical_link_rate =
1389 phy->minimum_linkrate << 4;
1390 phy_control_request->programmed_max_physical_link_rate =
1391 phy->maximum_linkrate << 4;
1392
1393 memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
1394 mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
1395 mpi_request->PhysicalPort = 0xFF;
1396 mpi_request->VF_ID = 0; /* TODO */
1397 mpi_request->VP_ID = 0;
1398 sas_address_le = (u64 *)&mpi_request->SASAddress;
1399 *sas_address_le = cpu_to_le64(phy->identify.sas_address);
1400 mpi_request->RequestDataLength =
1401 cpu_to_le16(sizeof(struct phy_error_log_request));
1402 psge = &mpi_request->SGL;
1403
1404 /* WRITE sgel first */
1405 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1406 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1407 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1408 ioc->base_add_sg_single(psge, sgl_flags |
1409 sizeof(struct phy_control_request), data_out_dma);
1410
1411 /* incr sgel */
1412 psge += ioc->sge_size;
1413
1414 /* READ sgel last */
1415 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1416 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1417 MPI2_SGE_FLAGS_END_OF_LIST);
1418 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1419 ioc->base_add_sg_single(psge, sgl_flags |
1420 sizeof(struct phy_control_reply), data_out_dma +
1421 sizeof(struct phy_control_request));
1422
1423 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "phy_control - "
1424 "send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n", ioc->name,
1425 (unsigned long long)phy->identify.sas_address, phy->number,
1426 phy_operation));
1427 mpt2sas_base_put_smid_default(ioc, smid);
1428 init_completion(&ioc->transport_cmds.done);
1429 timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
1430 10*HZ);
1431
1432 if (!(ioc->transport_cmds.status & MPT2_CMD_COMPLETE)) {
1433 printk(MPT2SAS_ERR_FMT "%s: timeout\n",
1434 ioc->name, __func__);
1435 _debug_dump_mf(mpi_request,
1436 sizeof(Mpi2SmpPassthroughRequest_t)/4);
1437 if (!(ioc->transport_cmds.status & MPT2_CMD_RESET))
1438 issue_reset = 1;
1439 goto issue_host_reset;
1440 }
1441
1442 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "phy_control - "
1443 "complete\n", ioc->name));
1444
1445 if (ioc->transport_cmds.status & MPT2_CMD_REPLY_VALID) {
1446
1447 mpi_reply = ioc->transport_cmds.reply;
1448
1449 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
1450 "phy_control - reply data transfer size(%d)\n",
1451 ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
1452
1453 if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
1454 sizeof(struct phy_control_reply))
1455 goto out;
1456
1457 phy_control_reply = data_out +
1458 sizeof(struct phy_control_request);
1459
1460 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
1461 "phy_control - function_result(%d)\n",
1462 ioc->name, phy_control_reply->function_result));
1463
1464 rc = 0;
1465 } else
1466 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
1467 "phy_control - no reply\n", ioc->name));
1468
1469 issue_host_reset:
1470 if (issue_reset)
1471 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
1472 FORCE_BIG_HAMMER);
1473 out:
1474 ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
1475 if (data_out)
1476 pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma);
1477
1478 mutex_unlock(&ioc->transport_cmds.mutex);
1479 return rc;
1480}
1481
1049/** 1482/**
1050 * _transport_phy_reset - 1483 * _transport_phy_reset -
1051 * @phy: The sas phy object 1484 * @phy: The sas phy object
1052 * @hard_reset: 1485 * @hard_reset:
1053 * 1486 *
1054 * Only support sas_host direct attached phys.
1055 * Returns 0 for success, non-zero for failure. 1487 * Returns 0 for success, non-zero for failure.
1056 */ 1488 */
1057static int 1489static int
1058_transport_phy_reset(struct sas_phy *phy, int hard_reset) 1490_transport_phy_reset(struct sas_phy *phy, int hard_reset)
1059{ 1491{
1060 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy); 1492 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
1061 struct _sas_phy *mpt2sas_phy;
1062 Mpi2SasIoUnitControlReply_t mpi_reply; 1493 Mpi2SasIoUnitControlReply_t mpi_reply;
1063 Mpi2SasIoUnitControlRequest_t mpi_request; 1494 Mpi2SasIoUnitControlRequest_t mpi_request;
1495 unsigned long flags;
1064 1496
1065 mpt2sas_phy = _transport_find_local_phy(ioc, phy); 1497 spin_lock_irqsave(&ioc->sas_node_lock, flags);
1066 1498 if (_transport_sas_node_find_by_sas_address(ioc,
1067 if (!mpt2sas_phy) /* this phy not on sas_host */ 1499 phy->identify.sas_address) == NULL) {
1500 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1068 return -EINVAL; 1501 return -EINVAL;
1502 }
1503 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1504
1505 /* handle expander phys */
1506 if (phy->identify.sas_address != ioc->sas_hba.sas_address)
1507 return _transport_expander_phy_control(ioc, phy,
1508 (hard_reset == 1) ? SMP_PHY_CONTROL_HARD_RESET :
1509 SMP_PHY_CONTROL_LINK_RESET);
1069 1510
1511 /* handle hba phys */
1070 memset(&mpi_request, 0, sizeof(Mpi2SasIoUnitControlReply_t)); 1512 memset(&mpi_request, 0, sizeof(Mpi2SasIoUnitControlReply_t));
1071 mpi_request.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 1513 mpi_request.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
1072 mpi_request.Operation = hard_reset ? 1514 mpi_request.Operation = hard_reset ?
1073 MPI2_SAS_OP_PHY_HARD_RESET : MPI2_SAS_OP_PHY_LINK_RESET; 1515 MPI2_SAS_OP_PHY_HARD_RESET : MPI2_SAS_OP_PHY_LINK_RESET;
1074 mpi_request.PhyNum = mpt2sas_phy->phy_id; 1516 mpi_request.PhyNum = phy->number;
1075 1517
1076 if ((mpt2sas_base_sas_iounit_control(ioc, &mpi_reply, &mpi_request))) { 1518 if ((mpt2sas_base_sas_iounit_control(ioc, &mpi_reply, &mpi_request))) {
1077 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 1519 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
@@ -1082,8 +1524,7 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset)
1082 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) 1524 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
1083 printk(MPT2SAS_INFO_FMT "phy(%d), ioc_status" 1525 printk(MPT2SAS_INFO_FMT "phy(%d), ioc_status"
1084 "(0x%04x), loginfo(0x%08x)\n", ioc->name, 1526 "(0x%04x), loginfo(0x%08x)\n", ioc->name,
1085 mpt2sas_phy->phy_id, 1527 phy->number, le16_to_cpu(mpi_reply.IOCStatus),
1086 le16_to_cpu(mpi_reply.IOCStatus),
1087 le32_to_cpu(mpi_reply.IOCLogInfo)); 1528 le32_to_cpu(mpi_reply.IOCLogInfo));
1088 1529
1089 return 0; 1530 return 0;
@@ -1101,17 +1542,28 @@ static int
1101_transport_phy_enable(struct sas_phy *phy, int enable) 1542_transport_phy_enable(struct sas_phy *phy, int enable)
1102{ 1543{
1103 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy); 1544 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
1104 struct _sas_phy *mpt2sas_phy;
1105 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; 1545 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
1106 Mpi2ConfigReply_t mpi_reply; 1546 Mpi2ConfigReply_t mpi_reply;
1107 u16 ioc_status; 1547 u16 ioc_status;
1108 u16 sz; 1548 u16 sz;
1109 int rc = 0; 1549 int rc = 0;
1550 unsigned long flags;
1110 1551
1111 mpt2sas_phy = _transport_find_local_phy(ioc, phy); 1552 spin_lock_irqsave(&ioc->sas_node_lock, flags);
1112 1553 if (_transport_sas_node_find_by_sas_address(ioc,
1113 if (!mpt2sas_phy) /* this phy not on sas_host */ 1554 phy->identify.sas_address) == NULL) {
1555 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1114 return -EINVAL; 1556 return -EINVAL;
1557 }
1558 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1559
1560 /* handle expander phys */
1561 if (phy->identify.sas_address != ioc->sas_hba.sas_address)
1562 return _transport_expander_phy_control(ioc, phy,
1563 (enable == 1) ? SMP_PHY_CONTROL_LINK_RESET :
1564 SMP_PHY_CONTROL_DISABLE);
1565
1566 /* handle hba phys */
1115 1567
1116 /* sas_iounit page 1 */ 1568 /* sas_iounit page 1 */
1117 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys * 1569 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
@@ -1140,14 +1592,18 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
1140 } 1592 }
1141 1593
1142 if (enable) 1594 if (enable)
1143 sas_iounit_pg1->PhyData[mpt2sas_phy->phy_id].PhyFlags 1595 sas_iounit_pg1->PhyData[phy->number].PhyFlags
1144 &= ~MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE; 1596 &= ~MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
1145 else 1597 else
1146 sas_iounit_pg1->PhyData[mpt2sas_phy->phy_id].PhyFlags 1598 sas_iounit_pg1->PhyData[phy->number].PhyFlags
1147 |= MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE; 1599 |= MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
1148 1600
1149 mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, sz); 1601 mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, sz);
1150 1602
1603 /* link reset */
1604 if (enable)
1605 _transport_phy_reset(phy, 0);
1606
1151 out: 1607 out:
1152 kfree(sas_iounit_pg1); 1608 kfree(sas_iounit_pg1);
1153 return rc; 1609 return rc;
@@ -1165,7 +1621,6 @@ static int
1165_transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates) 1621_transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
1166{ 1622{
1167 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy); 1623 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
1168 struct _sas_phy *mpt2sas_phy;
1169 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; 1624 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
1170 Mpi2SasPhyPage0_t phy_pg0; 1625 Mpi2SasPhyPage0_t phy_pg0;
1171 Mpi2ConfigReply_t mpi_reply; 1626 Mpi2ConfigReply_t mpi_reply;
@@ -1173,11 +1628,15 @@ _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
1173 u16 sz; 1628 u16 sz;
1174 int i; 1629 int i;
1175 int rc = 0; 1630 int rc = 0;
1631 unsigned long flags;
1176 1632
1177 mpt2sas_phy = _transport_find_local_phy(ioc, phy); 1633 spin_lock_irqsave(&ioc->sas_node_lock, flags);
1178 1634 if (_transport_sas_node_find_by_sas_address(ioc,
1179 if (!mpt2sas_phy) /* this phy not on sas_host */ 1635 phy->identify.sas_address) == NULL) {
1636 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1180 return -EINVAL; 1637 return -EINVAL;
1638 }
1639 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1181 1640
1182 if (!rates->minimum_linkrate) 1641 if (!rates->minimum_linkrate)
1183 rates->minimum_linkrate = phy->minimum_linkrate; 1642 rates->minimum_linkrate = phy->minimum_linkrate;
@@ -1189,6 +1648,16 @@ _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
1189 else if (rates->maximum_linkrate > phy->maximum_linkrate_hw) 1648 else if (rates->maximum_linkrate > phy->maximum_linkrate_hw)
1190 rates->maximum_linkrate = phy->maximum_linkrate_hw; 1649 rates->maximum_linkrate = phy->maximum_linkrate_hw;
1191 1650
1651 /* handle expander phys */
1652 if (phy->identify.sas_address != ioc->sas_hba.sas_address) {
1653 phy->minimum_linkrate = rates->minimum_linkrate;
1654 phy->maximum_linkrate = rates->maximum_linkrate;
1655 return _transport_expander_phy_control(ioc, phy,
1656 SMP_PHY_CONTROL_LINK_RESET);
1657 }
1658
1659 /* handle hba phys */
1660
1192 /* sas_iounit page 1 */ 1661 /* sas_iounit page 1 */
1193 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys * 1662 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
1194 sizeof(Mpi2SasIOUnit1PhyData_t)); 1663 sizeof(Mpi2SasIOUnit1PhyData_t));
@@ -1216,7 +1685,7 @@ _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
1216 } 1685 }
1217 1686
1218 for (i = 0; i < ioc->sas_hba.num_phys; i++) { 1687 for (i = 0; i < ioc->sas_hba.num_phys; i++) {
1219 if (mpt2sas_phy->phy_id != i) { 1688 if (phy->number != i) {
1220 sas_iounit_pg1->PhyData[i].MaxMinLinkRate = 1689 sas_iounit_pg1->PhyData[i].MaxMinLinkRate =
1221 (ioc->sas_hba.phy[i].phy->minimum_linkrate + 1690 (ioc->sas_hba.phy[i].phy->minimum_linkrate +
1222 (ioc->sas_hba.phy[i].phy->maximum_linkrate << 4)); 1691 (ioc->sas_hba.phy[i].phy->maximum_linkrate << 4));
@@ -1240,7 +1709,7 @@ _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
1240 1709
1241 /* read phy page 0, then update the rates in the sas transport phy */ 1710 /* read phy page 0, then update the rates in the sas transport phy */
1242 if (!mpt2sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, 1711 if (!mpt2sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
1243 mpt2sas_phy->phy_id)) { 1712 phy->number)) {
1244 phy->minimum_linkrate = _transport_convert_phy_link_rate( 1713 phy->minimum_linkrate = _transport_convert_phy_link_rate(
1245 phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK); 1714 phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
1246 phy->maximum_linkrate = _transport_convert_phy_link_rate( 1715 phy->maximum_linkrate = _transport_convert_phy_link_rate(
@@ -1392,7 +1861,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1392 ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(rsp) + 4), 1861 ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(rsp) + 4),
1393 dma_addr_in); 1862 dma_addr_in);
1394 1863
1395 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s - " 1864 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "%s - "
1396 "sending smp request\n", ioc->name, __func__)); 1865 "sending smp request\n", ioc->name, __func__));
1397 1866
1398 mpt2sas_base_put_smid_default(ioc, smid); 1867 mpt2sas_base_put_smid_default(ioc, smid);
@@ -1410,14 +1879,14 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1410 goto issue_host_reset; 1879 goto issue_host_reset;
1411 } 1880 }
1412 1881
1413 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s - " 1882 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "%s - "
1414 "complete\n", ioc->name, __func__)); 1883 "complete\n", ioc->name, __func__));
1415 1884
1416 if (ioc->transport_cmds.status & MPT2_CMD_REPLY_VALID) { 1885 if (ioc->transport_cmds.status & MPT2_CMD_REPLY_VALID) {
1417 1886
1418 mpi_reply = ioc->transport_cmds.reply; 1887 mpi_reply = ioc->transport_cmds.reply;
1419 1888
1420 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT 1889 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
1421 "%s - reply data transfer size(%d)\n", 1890 "%s - reply data transfer size(%d)\n",
1422 ioc->name, __func__, 1891 ioc->name, __func__,
1423 le16_to_cpu(mpi_reply->ResponseDataLength))); 1892 le16_to_cpu(mpi_reply->ResponseDataLength)));
@@ -1428,7 +1897,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1428 rsp->resid_len -= 1897 rsp->resid_len -=
1429 le16_to_cpu(mpi_reply->ResponseDataLength); 1898 le16_to_cpu(mpi_reply->ResponseDataLength);
1430 } else { 1899 } else {
1431 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT 1900 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
1432 "%s - no reply\n", ioc->name, __func__)); 1901 "%s - no reply\n", ioc->name, __func__));
1433 rc = -ENXIO; 1902 rc = -ENXIO;
1434 } 1903 }
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index f5e321791903..adedaa916ecb 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1483,7 +1483,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1483 } 1483 }
1484 1484
1485 if (task->task_status.resp == SAS_TASK_COMPLETE && 1485 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1486 task->task_status.stat == SAM_GOOD) { 1486 task->task_status.stat == SAM_STAT_GOOD) {
1487 res = TMF_RESP_FUNC_COMPLETE; 1487 res = TMF_RESP_FUNC_COMPLETE;
1488 break; 1488 break;
1489 } 1489 }
@@ -1640,7 +1640,7 @@ int mvs_abort_task(struct sas_task *task)
1640 struct mvs_tmf_task tmf_task; 1640 struct mvs_tmf_task tmf_task;
1641 struct domain_device *dev = task->dev; 1641 struct domain_device *dev = task->dev;
1642 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; 1642 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
1643 struct mvs_info *mvi = mvi_dev->mvi_info; 1643 struct mvs_info *mvi;
1644 int rc = TMF_RESP_FUNC_FAILED; 1644 int rc = TMF_RESP_FUNC_FAILED;
1645 unsigned long flags; 1645 unsigned long flags;
1646 u32 tag; 1646 u32 tag;
@@ -1650,6 +1650,8 @@ int mvs_abort_task(struct sas_task *task)
1650 rc = TMF_RESP_FUNC_FAILED; 1650 rc = TMF_RESP_FUNC_FAILED;
1651 } 1651 }
1652 1652
1653 mvi = mvi_dev->mvi_info;
1654
1653 spin_lock_irqsave(&task->task_state_lock, flags); 1655 spin_lock_irqsave(&task->task_state_lock, flags);
1654 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1656 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1655 spin_unlock_irqrestore(&task->task_state_lock, flags); 1657 spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -1756,7 +1758,7 @@ static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
1756 struct mvs_device *mvi_dev = task->dev->lldd_dev; 1758 struct mvs_device *mvi_dev = task->dev->lldd_dev;
1757 struct task_status_struct *tstat = &task->task_status; 1759 struct task_status_struct *tstat = &task->task_status;
1758 struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; 1760 struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
1759 int stat = SAM_GOOD; 1761 int stat = SAM_STAT_GOOD;
1760 1762
1761 1763
1762 resp->frame_len = sizeof(struct dev_to_host_fis); 1764 resp->frame_len = sizeof(struct dev_to_host_fis);
@@ -1788,13 +1790,13 @@ static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1788 1790
1789 MVS_CHIP_DISP->command_active(mvi, slot_idx); 1791 MVS_CHIP_DISP->command_active(mvi, slot_idx);
1790 1792
1791 stat = SAM_CHECK_COND; 1793 stat = SAM_STAT_CHECK_CONDITION;
1792 switch (task->task_proto) { 1794 switch (task->task_proto) {
1793 case SAS_PROTOCOL_SSP: 1795 case SAS_PROTOCOL_SSP:
1794 stat = SAS_ABORTED_TASK; 1796 stat = SAS_ABORTED_TASK;
1795 break; 1797 break;
1796 case SAS_PROTOCOL_SMP: 1798 case SAS_PROTOCOL_SMP:
1797 stat = SAM_CHECK_COND; 1799 stat = SAM_STAT_CHECK_CONDITION;
1798 break; 1800 break;
1799 1801
1800 case SAS_PROTOCOL_SATA: 1802 case SAS_PROTOCOL_SATA:
@@ -1879,7 +1881,7 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1879 case SAS_PROTOCOL_SSP: 1881 case SAS_PROTOCOL_SSP:
1880 /* hw says status == 0, datapres == 0 */ 1882 /* hw says status == 0, datapres == 0 */
1881 if (rx_desc & RXQ_GOOD) { 1883 if (rx_desc & RXQ_GOOD) {
1882 tstat->stat = SAM_GOOD; 1884 tstat->stat = SAM_STAT_GOOD;
1883 tstat->resp = SAS_TASK_COMPLETE; 1885 tstat->resp = SAS_TASK_COMPLETE;
1884 } 1886 }
1885 /* response frame present */ 1887 /* response frame present */
@@ -1888,12 +1890,12 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1888 sizeof(struct mvs_err_info); 1890 sizeof(struct mvs_err_info);
1889 sas_ssp_task_response(mvi->dev, task, iu); 1891 sas_ssp_task_response(mvi->dev, task, iu);
1890 } else 1892 } else
1891 tstat->stat = SAM_CHECK_COND; 1893 tstat->stat = SAM_STAT_CHECK_CONDITION;
1892 break; 1894 break;
1893 1895
1894 case SAS_PROTOCOL_SMP: { 1896 case SAS_PROTOCOL_SMP: {
1895 struct scatterlist *sg_resp = &task->smp_task.smp_resp; 1897 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
1896 tstat->stat = SAM_GOOD; 1898 tstat->stat = SAM_STAT_GOOD;
1897 to = kmap_atomic(sg_page(sg_resp), KM_IRQ0); 1899 to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
1898 memcpy(to + sg_resp->offset, 1900 memcpy(to + sg_resp->offset,
1899 slot->response + sizeof(struct mvs_err_info), 1901 slot->response + sizeof(struct mvs_err_info),
@@ -1910,7 +1912,7 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1910 } 1912 }
1911 1913
1912 default: 1914 default:
1913 tstat->stat = SAM_CHECK_COND; 1915 tstat->stat = SAM_STAT_CHECK_CONDITION;
1914 break; 1916 break;
1915 } 1917 }
1916 if (!slot->port->port_attached) { 1918 if (!slot->port->port_attached) {
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 5ff8261c5d67..356ad268de6d 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -1480,7 +1480,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
1480 ",param = %d \n", param)); 1480 ",param = %d \n", param));
1481 if (param == 0) { 1481 if (param == 0) {
1482 ts->resp = SAS_TASK_COMPLETE; 1482 ts->resp = SAS_TASK_COMPLETE;
1483 ts->stat = SAM_GOOD; 1483 ts->stat = SAM_STAT_GOOD;
1484 } else { 1484 } else {
1485 ts->resp = SAS_TASK_COMPLETE; 1485 ts->resp = SAS_TASK_COMPLETE;
1486 ts->stat = SAS_PROTO_RESPONSE; 1486 ts->stat = SAS_PROTO_RESPONSE;
@@ -1909,7 +1909,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
1909 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); 1909 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
1910 if (param == 0) { 1910 if (param == 0) {
1911 ts->resp = SAS_TASK_COMPLETE; 1911 ts->resp = SAS_TASK_COMPLETE;
1912 ts->stat = SAM_GOOD; 1912 ts->stat = SAM_STAT_GOOD;
1913 } else { 1913 } else {
1914 u8 len; 1914 u8 len;
1915 ts->resp = SAS_TASK_COMPLETE; 1915 ts->resp = SAS_TASK_COMPLETE;
@@ -2450,7 +2450,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2450 case IO_SUCCESS: 2450 case IO_SUCCESS:
2451 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); 2451 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
2452 ts->resp = SAS_TASK_COMPLETE; 2452 ts->resp = SAS_TASK_COMPLETE;
2453 ts->stat = SAM_GOOD; 2453 ts->stat = SAM_STAT_GOOD;
2454 if (pm8001_dev) 2454 if (pm8001_dev)
2455 pm8001_dev->running_req--; 2455 pm8001_dev->running_req--;
2456 break; 2456 break;
@@ -2479,19 +2479,19 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2479 PM8001_IO_DBG(pm8001_ha, 2479 PM8001_IO_DBG(pm8001_ha,
2480 pm8001_printk("IO_ERROR_HW_TIMEOUT\n")); 2480 pm8001_printk("IO_ERROR_HW_TIMEOUT\n"));
2481 ts->resp = SAS_TASK_COMPLETE; 2481 ts->resp = SAS_TASK_COMPLETE;
2482 ts->stat = SAM_BUSY; 2482 ts->stat = SAM_STAT_BUSY;
2483 break; 2483 break;
2484 case IO_XFER_ERROR_BREAK: 2484 case IO_XFER_ERROR_BREAK:
2485 PM8001_IO_DBG(pm8001_ha, 2485 PM8001_IO_DBG(pm8001_ha,
2486 pm8001_printk("IO_XFER_ERROR_BREAK\n")); 2486 pm8001_printk("IO_XFER_ERROR_BREAK\n"));
2487 ts->resp = SAS_TASK_COMPLETE; 2487 ts->resp = SAS_TASK_COMPLETE;
2488 ts->stat = SAM_BUSY; 2488 ts->stat = SAM_STAT_BUSY;
2489 break; 2489 break;
2490 case IO_XFER_ERROR_PHY_NOT_READY: 2490 case IO_XFER_ERROR_PHY_NOT_READY:
2491 PM8001_IO_DBG(pm8001_ha, 2491 PM8001_IO_DBG(pm8001_ha,
2492 pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); 2492 pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
2493 ts->resp = SAS_TASK_COMPLETE; 2493 ts->resp = SAS_TASK_COMPLETE;
2494 ts->stat = SAM_BUSY; 2494 ts->stat = SAM_STAT_BUSY;
2495 break; 2495 break;
2496 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: 2496 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
2497 PM8001_IO_DBG(pm8001_ha, 2497 PM8001_IO_DBG(pm8001_ha,
@@ -3260,7 +3260,7 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3260 case IO_SUCCESS: 3260 case IO_SUCCESS:
3261 PM8001_EH_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); 3261 PM8001_EH_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
3262 ts->resp = SAS_TASK_COMPLETE; 3262 ts->resp = SAS_TASK_COMPLETE;
3263 ts->stat = SAM_GOOD; 3263 ts->stat = SAM_STAT_GOOD;
3264 break; 3264 break;
3265 case IO_NOT_VALID: 3265 case IO_NOT_VALID:
3266 PM8001_EH_DBG(pm8001_ha, pm8001_printk("IO_NOT_VALID\n")); 3266 PM8001_EH_DBG(pm8001_ha, pm8001_printk("IO_NOT_VALID\n"));
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index cd02ceaf67ff..6ae059ebb4bb 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -763,7 +763,7 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
763 } 763 }
764 764
765 if (task->task_status.resp == SAS_TASK_COMPLETE && 765 if (task->task_status.resp == SAS_TASK_COMPLETE &&
766 task->task_status.stat == SAM_GOOD) { 766 task->task_status.stat == SAM_STAT_GOOD) {
767 res = TMF_RESP_FUNC_COMPLETE; 767 res = TMF_RESP_FUNC_COMPLETE;
768 break; 768 break;
769 } 769 }
@@ -853,7 +853,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
853 } 853 }
854 854
855 if (task->task_status.resp == SAS_TASK_COMPLETE && 855 if (task->task_status.resp == SAS_TASK_COMPLETE &&
856 task->task_status.stat == SAM_GOOD) { 856 task->task_status.stat == SAM_STAT_GOOD) {
857 res = TMF_RESP_FUNC_COMPLETE; 857 res = TMF_RESP_FUNC_COMPLETE;
858 break; 858 break;
859 859
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index c44e4ab4e938..ecc45c8b4e6b 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -113,6 +113,7 @@ static struct pmcraid_chip_details pmcraid_chip_cfg[] = {
113 .global_intr_mask = 0x00034, 113 .global_intr_mask = 0x00034,
114 .ioa_host_intr = 0x0009C, 114 .ioa_host_intr = 0x0009C,
115 .ioa_host_intr_clr = 0x000A0, 115 .ioa_host_intr_clr = 0x000A0,
116 .ioa_host_msix_intr = 0x7FC40,
116 .ioa_host_mask = 0x7FC28, 117 .ioa_host_mask = 0x7FC28,
117 .ioa_host_mask_clr = 0x7FC28, 118 .ioa_host_mask_clr = 0x7FC28,
118 .host_ioa_intr = 0x00020, 119 .host_ioa_intr = 0x00020,
@@ -154,8 +155,12 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
154 u8 target, bus, lun; 155 u8 target, bus, lun;
155 unsigned long lock_flags; 156 unsigned long lock_flags;
156 int rc = -ENXIO; 157 int rc = -ENXIO;
158 u16 fw_version;
159
157 pinstance = shost_priv(scsi_dev->host); 160 pinstance = shost_priv(scsi_dev->host);
158 161
162 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
163
159 /* Driver exposes VSET and GSCSI resources only; all other device types 164 /* Driver exposes VSET and GSCSI resources only; all other device types
160 * are not exposed. Resource list is synchronized using resource lock 165 * are not exposed. Resource list is synchronized using resource lock
161 * so any traversal or modifications to the list should be done inside 166 * so any traversal or modifications to the list should be done inside
@@ -166,7 +171,11 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
166 171
167 /* do not expose VSETs with order-ids > MAX_VSET_TARGETS */ 172 /* do not expose VSETs with order-ids > MAX_VSET_TARGETS */
168 if (RES_IS_VSET(temp->cfg_entry)) { 173 if (RES_IS_VSET(temp->cfg_entry)) {
169 target = temp->cfg_entry.unique_flags1; 174 if (fw_version <= PMCRAID_FW_VERSION_1)
175 target = temp->cfg_entry.unique_flags1;
176 else
177 target = temp->cfg_entry.array_id & 0xFF;
178
170 if (target > PMCRAID_MAX_VSET_TARGETS) 179 if (target > PMCRAID_MAX_VSET_TARGETS)
171 continue; 180 continue;
172 bus = PMCRAID_VSET_BUS_ID; 181 bus = PMCRAID_VSET_BUS_ID;
@@ -283,7 +292,7 @@ static void pmcraid_slave_destroy(struct scsi_device *scsi_dev)
283 * @reason: calling context 292 * @reason: calling context
284 * 293 *
285 * Return value 294 * Return value
286 * actual depth set 295 * actual depth set
287 */ 296 */
288static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth, 297static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth,
289 int reason) 298 int reason)
@@ -305,7 +314,7 @@ static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth,
305 * @tag: type of tags to use 314 * @tag: type of tags to use
306 * 315 *
307 * Return value: 316 * Return value:
308 * actual queue type set 317 * actual queue type set
309 */ 318 */
310static int pmcraid_change_queue_type(struct scsi_device *scsi_dev, int tag) 319static int pmcraid_change_queue_type(struct scsi_device *scsi_dev, int tag)
311{ 320{
@@ -357,6 +366,7 @@ void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index)
357 * processed by IOA 366 * processed by IOA
358 */ 367 */
359 memset(&cmd->ioa_cb->ioarcb.cdb, 0, PMCRAID_MAX_CDB_LEN); 368 memset(&cmd->ioa_cb->ioarcb.cdb, 0, PMCRAID_MAX_CDB_LEN);
369 ioarcb->hrrq_id = 0;
360 ioarcb->request_flags0 = 0; 370 ioarcb->request_flags0 = 0;
361 ioarcb->request_flags1 = 0; 371 ioarcb->request_flags1 = 0;
362 ioarcb->cmd_timeout = 0; 372 ioarcb->cmd_timeout = 0;
@@ -368,13 +378,15 @@ void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index)
368 ioarcb->add_cmd_param_offset = 0; 378 ioarcb->add_cmd_param_offset = 0;
369 cmd->ioa_cb->ioasa.ioasc = 0; 379 cmd->ioa_cb->ioasa.ioasc = 0;
370 cmd->ioa_cb->ioasa.residual_data_length = 0; 380 cmd->ioa_cb->ioasa.residual_data_length = 0;
371 cmd->u.time_left = 0; 381 cmd->time_left = 0;
372 } 382 }
373 383
374 cmd->cmd_done = NULL; 384 cmd->cmd_done = NULL;
375 cmd->scsi_cmd = NULL; 385 cmd->scsi_cmd = NULL;
376 cmd->release = 0; 386 cmd->release = 0;
377 cmd->completion_req = 0; 387 cmd->completion_req = 0;
388 cmd->sense_buffer = 0;
389 cmd->sense_buffer_dma = 0;
378 cmd->dma_handle = 0; 390 cmd->dma_handle = 0;
379 init_timer(&cmd->timer); 391 init_timer(&cmd->timer);
380} 392}
@@ -449,7 +461,9 @@ void pmcraid_return_cmd(struct pmcraid_cmd *cmd)
449 */ 461 */
450static u32 pmcraid_read_interrupts(struct pmcraid_instance *pinstance) 462static u32 pmcraid_read_interrupts(struct pmcraid_instance *pinstance)
451{ 463{
452 return ioread32(pinstance->int_regs.ioa_host_interrupt_reg); 464 return (pinstance->interrupt_mode) ?
465 ioread32(pinstance->int_regs.ioa_host_msix_interrupt_reg) :
466 ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
453} 467}
454 468
455/** 469/**
@@ -469,10 +483,15 @@ static void pmcraid_disable_interrupts(
469 u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg); 483 u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg);
470 u32 nmask = gmask | GLOBAL_INTERRUPT_MASK; 484 u32 nmask = gmask | GLOBAL_INTERRUPT_MASK;
471 485
472 iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg);
473 iowrite32(intrs, pinstance->int_regs.ioa_host_interrupt_clr_reg); 486 iowrite32(intrs, pinstance->int_regs.ioa_host_interrupt_clr_reg);
474 iowrite32(intrs, pinstance->int_regs.ioa_host_interrupt_mask_reg); 487 iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg);
475 ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg); 488 ioread32(pinstance->int_regs.global_interrupt_mask_reg);
489
490 if (!pinstance->interrupt_mode) {
491 iowrite32(intrs,
492 pinstance->int_regs.ioa_host_interrupt_mask_reg);
493 ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
494 }
476} 495}
477 496
478/** 497/**
@@ -493,8 +512,12 @@ static void pmcraid_enable_interrupts(
493 u32 nmask = gmask & (~GLOBAL_INTERRUPT_MASK); 512 u32 nmask = gmask & (~GLOBAL_INTERRUPT_MASK);
494 513
495 iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg); 514 iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg);
496 iowrite32(~intrs, pinstance->int_regs.ioa_host_interrupt_mask_reg); 515
497 ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg); 516 if (!pinstance->interrupt_mode) {
517 iowrite32(~intrs,
518 pinstance->int_regs.ioa_host_interrupt_mask_reg);
519 ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
520 }
498 521
499 pmcraid_info("enabled interrupts global mask = %x intr_mask = %x\n", 522 pmcraid_info("enabled interrupts global mask = %x intr_mask = %x\n",
500 ioread32(pinstance->int_regs.global_interrupt_mask_reg), 523 ioread32(pinstance->int_regs.global_interrupt_mask_reg),
@@ -502,6 +525,39 @@ static void pmcraid_enable_interrupts(
502} 525}
503 526
504/** 527/**
528 * pmcraid_clr_trans_op - clear trans to op interrupt
529 *
530 * @pinstance: pointer to per adapter instance structure
531 *
532 * Return Value
533 * None
534 */
535static void pmcraid_clr_trans_op(
536 struct pmcraid_instance *pinstance
537)
538{
539 unsigned long lock_flags;
540
541 if (!pinstance->interrupt_mode) {
542 iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
543 pinstance->int_regs.ioa_host_interrupt_mask_reg);
544 ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
545 iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
546 pinstance->int_regs.ioa_host_interrupt_clr_reg);
547 ioread32(pinstance->int_regs.ioa_host_interrupt_clr_reg);
548 }
549
550 if (pinstance->reset_cmd != NULL) {
551 del_timer(&pinstance->reset_cmd->timer);
552 spin_lock_irqsave(
553 pinstance->host->host_lock, lock_flags);
554 pinstance->reset_cmd->cmd_done(pinstance->reset_cmd);
555 spin_unlock_irqrestore(
556 pinstance->host->host_lock, lock_flags);
557 }
558}
559
560/**
505 * pmcraid_reset_type - Determine the required reset type 561 * pmcraid_reset_type - Determine the required reset type
506 * @pinstance: pointer to adapter instance structure 562 * @pinstance: pointer to adapter instance structure
507 * 563 *
@@ -536,7 +592,7 @@ static void pmcraid_reset_type(struct pmcraid_instance *pinstance)
536 * pmcraid_bist_done - completion function for PCI BIST 592 * pmcraid_bist_done - completion function for PCI BIST
537 * @cmd: pointer to reset command 593 * @cmd: pointer to reset command
538 * Return Value 594 * Return Value
539 * none 595 * none
540 */ 596 */
541 597
542static void pmcraid_ioa_reset(struct pmcraid_cmd *); 598static void pmcraid_ioa_reset(struct pmcraid_cmd *);
@@ -552,16 +608,16 @@ static void pmcraid_bist_done(struct pmcraid_cmd *cmd)
552 608
553 /* If PCI config space can't be accessed wait for another two secs */ 609 /* If PCI config space can't be accessed wait for another two secs */
554 if ((rc != PCIBIOS_SUCCESSFUL || (!(pci_reg & PCI_COMMAND_MEMORY))) && 610 if ((rc != PCIBIOS_SUCCESSFUL || (!(pci_reg & PCI_COMMAND_MEMORY))) &&
555 cmd->u.time_left > 0) { 611 cmd->time_left > 0) {
556 pmcraid_info("BIST not complete, waiting another 2 secs\n"); 612 pmcraid_info("BIST not complete, waiting another 2 secs\n");
557 cmd->timer.expires = jiffies + cmd->u.time_left; 613 cmd->timer.expires = jiffies + cmd->time_left;
558 cmd->u.time_left = 0; 614 cmd->time_left = 0;
559 cmd->timer.data = (unsigned long)cmd; 615 cmd->timer.data = (unsigned long)cmd;
560 cmd->timer.function = 616 cmd->timer.function =
561 (void (*)(unsigned long))pmcraid_bist_done; 617 (void (*)(unsigned long))pmcraid_bist_done;
562 add_timer(&cmd->timer); 618 add_timer(&cmd->timer);
563 } else { 619 } else {
564 cmd->u.time_left = 0; 620 cmd->time_left = 0;
565 pmcraid_info("BIST is complete, proceeding with reset\n"); 621 pmcraid_info("BIST is complete, proceeding with reset\n");
566 spin_lock_irqsave(pinstance->host->host_lock, lock_flags); 622 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
567 pmcraid_ioa_reset(cmd); 623 pmcraid_ioa_reset(cmd);
@@ -585,10 +641,10 @@ static void pmcraid_start_bist(struct pmcraid_cmd *cmd)
585 pinstance->int_regs.host_ioa_interrupt_reg); 641 pinstance->int_regs.host_ioa_interrupt_reg);
586 doorbells = ioread32(pinstance->int_regs.host_ioa_interrupt_reg); 642 doorbells = ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
587 intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg); 643 intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
588 pmcraid_info("doorbells after start bist: %x intrs: %x \n", 644 pmcraid_info("doorbells after start bist: %x intrs: %x\n",
589 doorbells, intrs); 645 doorbells, intrs);
590 646
591 cmd->u.time_left = msecs_to_jiffies(PMCRAID_BIST_TIMEOUT); 647 cmd->time_left = msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
592 cmd->timer.data = (unsigned long)cmd; 648 cmd->timer.data = (unsigned long)cmd;
593 cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_BIST_TIMEOUT); 649 cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
594 cmd->timer.function = (void (*)(unsigned long))pmcraid_bist_done; 650 cmd->timer.function = (void (*)(unsigned long))pmcraid_bist_done;
@@ -612,7 +668,7 @@ static void pmcraid_reset_alert_done(struct pmcraid_cmd *cmd)
612 * some more time to wait, restart the timer 668 * some more time to wait, restart the timer
613 */ 669 */
614 if (((status & INTRS_CRITICAL_OP_IN_PROGRESS) == 0) || 670 if (((status & INTRS_CRITICAL_OP_IN_PROGRESS) == 0) ||
615 cmd->u.time_left <= 0) { 671 cmd->time_left <= 0) {
616 pmcraid_info("critical op is reset proceeding with reset\n"); 672 pmcraid_info("critical op is reset proceeding with reset\n");
617 spin_lock_irqsave(pinstance->host->host_lock, lock_flags); 673 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
618 pmcraid_ioa_reset(cmd); 674 pmcraid_ioa_reset(cmd);
@@ -620,7 +676,7 @@ static void pmcraid_reset_alert_done(struct pmcraid_cmd *cmd)
620 } else { 676 } else {
621 pmcraid_info("critical op is not yet reset waiting again\n"); 677 pmcraid_info("critical op is not yet reset waiting again\n");
622 /* restart timer if some more time is available to wait */ 678 /* restart timer if some more time is available to wait */
623 cmd->u.time_left -= PMCRAID_CHECK_FOR_RESET_TIMEOUT; 679 cmd->time_left -= PMCRAID_CHECK_FOR_RESET_TIMEOUT;
624 cmd->timer.data = (unsigned long)cmd; 680 cmd->timer.data = (unsigned long)cmd;
625 cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT; 681 cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
626 cmd->timer.function = 682 cmd->timer.function =
@@ -638,6 +694,7 @@ static void pmcraid_reset_alert_done(struct pmcraid_cmd *cmd)
638 * successfully written to IOA. Returns non-zero in case pci_config_space 694 * successfully written to IOA. Returns non-zero in case pci_config_space
639 * is not accessible 695 * is not accessible
640 */ 696 */
697static void pmcraid_notify_ioastate(struct pmcraid_instance *, u32);
641static void pmcraid_reset_alert(struct pmcraid_cmd *cmd) 698static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
642{ 699{
643 struct pmcraid_instance *pinstance = cmd->drv_inst; 700 struct pmcraid_instance *pinstance = cmd->drv_inst;
@@ -658,7 +715,7 @@ static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
658 * OPERATION bit is reset. A timer is started to wait for this 715 * OPERATION bit is reset. A timer is started to wait for this
659 * bit to be reset. 716 * bit to be reset.
660 */ 717 */
661 cmd->u.time_left = PMCRAID_RESET_TIMEOUT; 718 cmd->time_left = PMCRAID_RESET_TIMEOUT;
662 cmd->timer.data = (unsigned long)cmd; 719 cmd->timer.data = (unsigned long)cmd;
663 cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT; 720 cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
664 cmd->timer.function = 721 cmd->timer.function =
@@ -693,7 +750,8 @@ static void pmcraid_timeout_handler(struct pmcraid_cmd *cmd)
693 unsigned long lock_flags; 750 unsigned long lock_flags;
694 751
695 dev_info(&pinstance->pdev->dev, 752 dev_info(&pinstance->pdev->dev,
696 "Adapter being reset due to command timeout.\n"); 753 "Adapter being reset due to cmd(CDB[0] = %x) timeout\n",
754 cmd->ioa_cb->ioarcb.cdb[0]);
697 755
698 /* Command timeouts result in hard reset sequence. The command that got 756 /* Command timeouts result in hard reset sequence. The command that got
699 * timed out may be the one used as part of reset sequence. In this 757 * timed out may be the one used as part of reset sequence. In this
@@ -736,9 +794,14 @@ static void pmcraid_timeout_handler(struct pmcraid_cmd *cmd)
736 */ 794 */
737 if (cmd == pinstance->reset_cmd) 795 if (cmd == pinstance->reset_cmd)
738 cmd->cmd_done = pmcraid_ioa_reset; 796 cmd->cmd_done = pmcraid_ioa_reset;
739
740 } 797 }
741 798
799 /* Notify apps of important IOA bringup/bringdown sequences */
800 if (pinstance->scn.ioa_state != PMC_DEVICE_EVENT_RESET_START &&
801 pinstance->scn.ioa_state != PMC_DEVICE_EVENT_SHUTDOWN_START)
802 pmcraid_notify_ioastate(pinstance,
803 PMC_DEVICE_EVENT_RESET_START);
804
742 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT; 805 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
743 scsi_block_requests(pinstance->host); 806 scsi_block_requests(pinstance->host);
744 pmcraid_reset_alert(cmd); 807 pmcraid_reset_alert(cmd);
@@ -866,7 +929,7 @@ static void _pmcraid_fire_command(struct pmcraid_cmd *cmd)
866 /* Add this command block to pending cmd pool. We do this prior to 929 /* Add this command block to pending cmd pool. We do this prior to
867 * writting IOARCB to ioarrin because IOA might complete the command 930 * writting IOARCB to ioarrin because IOA might complete the command
868 * by the time we are about to add it to the list. Response handler 931 * by the time we are about to add it to the list. Response handler
869 * (isr/tasklet) looks for cmb block in the pending pending list. 932 * (isr/tasklet) looks for cmd block in the pending pending list.
870 */ 933 */
871 spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags); 934 spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
872 list_add_tail(&cmd->free_list, &pinstance->pending_cmd_pool); 935 list_add_tail(&cmd->free_list, &pinstance->pending_cmd_pool);
@@ -916,6 +979,23 @@ static void pmcraid_send_cmd(
916} 979}
917 980
918/** 981/**
982 * pmcraid_ioa_shutdown_done - completion function for IOA shutdown command
983 * @cmd: pointer to the command block used for sending IOA shutdown command
984 *
985 * Return value
986 * None
987 */
988static void pmcraid_ioa_shutdown_done(struct pmcraid_cmd *cmd)
989{
990 struct pmcraid_instance *pinstance = cmd->drv_inst;
991 unsigned long lock_flags;
992
993 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
994 pmcraid_ioa_reset(cmd);
995 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
996}
997
998/**
919 * pmcraid_ioa_shutdown - sends SHUTDOWN command to ioa 999 * pmcraid_ioa_shutdown - sends SHUTDOWN command to ioa
920 * 1000 *
921 * @cmd: pointer to the command block used as part of reset sequence 1001 * @cmd: pointer to the command block used as part of reset sequence
@@ -943,30 +1023,112 @@ static void pmcraid_ioa_shutdown(struct pmcraid_cmd *cmd)
943 pmcraid_info("firing normal shutdown command (%d) to IOA\n", 1023 pmcraid_info("firing normal shutdown command (%d) to IOA\n",
944 le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle)); 1024 le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle));
945 1025
946 pmcraid_send_cmd(cmd, pmcraid_ioa_reset, 1026 pmcraid_notify_ioastate(cmd->drv_inst, PMC_DEVICE_EVENT_SHUTDOWN_START);
1027
1028 pmcraid_send_cmd(cmd, pmcraid_ioa_shutdown_done,
947 PMCRAID_SHUTDOWN_TIMEOUT, 1029 PMCRAID_SHUTDOWN_TIMEOUT,
948 pmcraid_timeout_handler); 1030 pmcraid_timeout_handler);
949} 1031}
950 1032
951/** 1033/**
952 * pmcraid_identify_hrrq - registers host rrq buffers with IOA 1034 * pmcraid_get_fwversion_done - completion function for get_fwversion
953 * @cmd: pointer to command block to be used for identify hrrq 1035 *
1036 * @cmd: pointer to command block used to send INQUIRY command
954 * 1037 *
955 * Return Value 1038 * Return Value
956 * 0 in case of success, otherwise non-zero failure code 1039 * none
957 */ 1040 */
958
959static void pmcraid_querycfg(struct pmcraid_cmd *); 1041static void pmcraid_querycfg(struct pmcraid_cmd *);
960 1042
1043static void pmcraid_get_fwversion_done(struct pmcraid_cmd *cmd)
1044{
1045 struct pmcraid_instance *pinstance = cmd->drv_inst;
1046 u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
1047 unsigned long lock_flags;
1048
1049 /* configuration table entry size depends on firmware version. If fw
1050 * version is not known, it is not possible to interpret IOA config
1051 * table
1052 */
1053 if (ioasc) {
1054 pmcraid_err("IOA Inquiry failed with %x\n", ioasc);
1055 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
1056 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
1057 pmcraid_reset_alert(cmd);
1058 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
1059 } else {
1060 pmcraid_querycfg(cmd);
1061 }
1062}
1063
1064/**
1065 * pmcraid_get_fwversion - reads firmware version information
1066 *
1067 * @cmd: pointer to command block used to send INQUIRY command
1068 *
1069 * Return Value
1070 * none
1071 */
1072static void pmcraid_get_fwversion(struct pmcraid_cmd *cmd)
1073{
1074 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
1075 struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
1076 struct pmcraid_instance *pinstance = cmd->drv_inst;
1077 u16 data_size = sizeof(struct pmcraid_inquiry_data);
1078
1079 pmcraid_reinit_cmdblk(cmd);
1080 ioarcb->request_type = REQ_TYPE_SCSI;
1081 ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
1082 ioarcb->cdb[0] = INQUIRY;
1083 ioarcb->cdb[1] = 1;
1084 ioarcb->cdb[2] = 0xD0;
1085 ioarcb->cdb[3] = (data_size >> 8) & 0xFF;
1086 ioarcb->cdb[4] = data_size & 0xFF;
1087
1088 /* Since entire inquiry data it can be part of IOARCB itself
1089 */
1090 ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
1091 offsetof(struct pmcraid_ioarcb,
1092 add_data.u.ioadl[0]));
1093 ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
1094 ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
1095
1096 ioarcb->request_flags0 |= NO_LINK_DESCS;
1097 ioarcb->data_transfer_length = cpu_to_le32(data_size);
1098 ioadl = &(ioarcb->add_data.u.ioadl[0]);
1099 ioadl->flags = IOADL_FLAGS_LAST_DESC;
1100 ioadl->address = cpu_to_le64(pinstance->inq_data_baddr);
1101 ioadl->data_len = cpu_to_le32(data_size);
1102
1103 pmcraid_send_cmd(cmd, pmcraid_get_fwversion_done,
1104 PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
1105}
1106
1107/**
1108 * pmcraid_identify_hrrq - registers host rrq buffers with IOA
1109 * @cmd: pointer to command block to be used for identify hrrq
1110 *
1111 * Return Value
1112 * none
1113 */
961static void pmcraid_identify_hrrq(struct pmcraid_cmd *cmd) 1114static void pmcraid_identify_hrrq(struct pmcraid_cmd *cmd)
962{ 1115{
963 struct pmcraid_instance *pinstance = cmd->drv_inst; 1116 struct pmcraid_instance *pinstance = cmd->drv_inst;
964 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; 1117 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
965 int index = 0; 1118 int index = cmd->hrrq_index;
966 __be64 hrrq_addr = cpu_to_be64(pinstance->hrrq_start_bus_addr[index]); 1119 __be64 hrrq_addr = cpu_to_be64(pinstance->hrrq_start_bus_addr[index]);
967 u32 hrrq_size = cpu_to_be32(sizeof(u32) * PMCRAID_MAX_CMD); 1120 u32 hrrq_size = cpu_to_be32(sizeof(u32) * PMCRAID_MAX_CMD);
1121 void (*done_function)(struct pmcraid_cmd *);
968 1122
969 pmcraid_reinit_cmdblk(cmd); 1123 pmcraid_reinit_cmdblk(cmd);
1124 cmd->hrrq_index = index + 1;
1125
1126 if (cmd->hrrq_index < pinstance->num_hrrq) {
1127 done_function = pmcraid_identify_hrrq;
1128 } else {
1129 cmd->hrrq_index = 0;
1130 done_function = pmcraid_get_fwversion;
1131 }
970 1132
971 /* Initialize ioarcb */ 1133 /* Initialize ioarcb */
972 ioarcb->request_type = REQ_TYPE_IOACMD; 1134 ioarcb->request_type = REQ_TYPE_IOACMD;
@@ -980,8 +1142,8 @@ static void pmcraid_identify_hrrq(struct pmcraid_cmd *cmd)
980 /* IOA expects 64-bit pci address to be written in B.E format 1142 /* IOA expects 64-bit pci address to be written in B.E format
981 * (i.e cdb[2]=MSByte..cdb[9]=LSB. 1143 * (i.e cdb[2]=MSByte..cdb[9]=LSB.
982 */ 1144 */
983 pmcraid_info("HRRQ_IDENTIFY with hrrq:ioarcb => %llx:%llx\n", 1145 pmcraid_info("HRRQ_IDENTIFY with hrrq:ioarcb:index => %llx:%llx:%x\n",
984 hrrq_addr, ioarcb->ioarcb_bus_addr); 1146 hrrq_addr, ioarcb->ioarcb_bus_addr, index);
985 1147
986 memcpy(&(ioarcb->cdb[2]), &hrrq_addr, sizeof(hrrq_addr)); 1148 memcpy(&(ioarcb->cdb[2]), &hrrq_addr, sizeof(hrrq_addr));
987 memcpy(&(ioarcb->cdb[10]), &hrrq_size, sizeof(hrrq_size)); 1149 memcpy(&(ioarcb->cdb[10]), &hrrq_size, sizeof(hrrq_size));
@@ -990,7 +1152,7 @@ static void pmcraid_identify_hrrq(struct pmcraid_cmd *cmd)
990 * Note that this gets called even during reset from SCSI mid-layer 1152 * Note that this gets called even during reset from SCSI mid-layer
991 * or tasklet 1153 * or tasklet
992 */ 1154 */
993 pmcraid_send_cmd(cmd, pmcraid_querycfg, 1155 pmcraid_send_cmd(cmd, done_function,
994 PMCRAID_INTERNAL_TIMEOUT, 1156 PMCRAID_INTERNAL_TIMEOUT,
995 pmcraid_timeout_handler); 1157 pmcraid_timeout_handler);
996} 1158}
@@ -1047,7 +1209,7 @@ static struct pmcraid_cmd *pmcraid_init_hcam
1047 } 1209 }
1048 1210
1049 if (type == PMCRAID_HCAM_CODE_CONFIG_CHANGE) { 1211 if (type == PMCRAID_HCAM_CODE_CONFIG_CHANGE) {
1050 rcb_size = sizeof(struct pmcraid_hcam_ccn); 1212 rcb_size = sizeof(struct pmcraid_hcam_ccn_ext);
1051 cmd_done = pmcraid_process_ccn; 1213 cmd_done = pmcraid_process_ccn;
1052 dma = pinstance->ccn.baddr + PMCRAID_AEN_HDR_SIZE; 1214 dma = pinstance->ccn.baddr + PMCRAID_AEN_HDR_SIZE;
1053 hcam = &pinstance->ccn; 1215 hcam = &pinstance->ccn;
@@ -1094,7 +1256,7 @@ static struct pmcraid_cmd *pmcraid_init_hcam
1094 * This function will send a Host Controlled Async command to IOA. 1256 * This function will send a Host Controlled Async command to IOA.
1095 * 1257 *
1096 * Return value: 1258 * Return value:
1097 * none 1259 * none
1098 */ 1260 */
1099static void pmcraid_send_hcam(struct pmcraid_instance *pinstance, u8 type) 1261static void pmcraid_send_hcam(struct pmcraid_instance *pinstance, u8 type)
1100{ 1262{
@@ -1202,18 +1364,25 @@ static void pmcraid_cancel_ldn(struct pmcraid_cmd *cmd)
1202/** 1364/**
1203 * pmcraid_expose_resource - check if the resource can be exposed to OS 1365 * pmcraid_expose_resource - check if the resource can be exposed to OS
1204 * 1366 *
1367 * @fw_version: firmware version code
1205 * @cfgte: pointer to configuration table entry of the resource 1368 * @cfgte: pointer to configuration table entry of the resource
1206 * 1369 *
1207 * Return value: 1370 * Return value:
1208 * true if resource can be added to midlayer, false(0) otherwise 1371 * true if resource can be added to midlayer, false(0) otherwise
1209 */ 1372 */
1210static int pmcraid_expose_resource(struct pmcraid_config_table_entry *cfgte) 1373static int pmcraid_expose_resource(u16 fw_version,
1374 struct pmcraid_config_table_entry *cfgte)
1211{ 1375{
1212 int retval = 0; 1376 int retval = 0;
1213 1377
1214 if (cfgte->resource_type == RES_TYPE_VSET) 1378 if (cfgte->resource_type == RES_TYPE_VSET) {
1215 retval = ((cfgte->unique_flags1 & 0x80) == 0); 1379 if (fw_version <= PMCRAID_FW_VERSION_1)
1216 else if (cfgte->resource_type == RES_TYPE_GSCSI) 1380 retval = ((cfgte->unique_flags1 & 0x80) == 0);
1381 else
1382 retval = ((cfgte->unique_flags0 & 0x80) == 0 &&
1383 (cfgte->unique_flags1 & 0x80) == 0);
1384
1385 } else if (cfgte->resource_type == RES_TYPE_GSCSI)
1217 retval = (RES_BUS(cfgte->resource_address) != 1386 retval = (RES_BUS(cfgte->resource_address) !=
1218 PMCRAID_VIRTUAL_ENCL_BUS_ID); 1387 PMCRAID_VIRTUAL_ENCL_BUS_ID);
1219 return retval; 1388 return retval;
@@ -1246,8 +1415,8 @@ static struct genl_family pmcraid_event_family = {
1246 * pmcraid_netlink_init - registers pmcraid_event_family 1415 * pmcraid_netlink_init - registers pmcraid_event_family
1247 * 1416 *
1248 * Return value: 1417 * Return value:
1249 * 0 if the pmcraid_event_family is successfully registered 1418 * 0 if the pmcraid_event_family is successfully registered
1250 * with netlink generic, non-zero otherwise 1419 * with netlink generic, non-zero otherwise
1251 */ 1420 */
1252static int pmcraid_netlink_init(void) 1421static int pmcraid_netlink_init(void)
1253{ 1422{
@@ -1268,7 +1437,7 @@ static int pmcraid_netlink_init(void)
1268 * pmcraid_netlink_release - unregisters pmcraid_event_family 1437 * pmcraid_netlink_release - unregisters pmcraid_event_family
1269 * 1438 *
1270 * Return value: 1439 * Return value:
1271 * none 1440 * none
1272 */ 1441 */
1273static void pmcraid_netlink_release(void) 1442static void pmcraid_netlink_release(void)
1274{ 1443{
@@ -1283,31 +1452,30 @@ static void pmcraid_netlink_release(void)
1283 * Return value: 1452 * Return value:
1284 * 0 if success, error value in case of any failure. 1453 * 0 if success, error value in case of any failure.
1285 */ 1454 */
1286static int pmcraid_notify_aen(struct pmcraid_instance *pinstance, u8 type) 1455static int pmcraid_notify_aen(
1456 struct pmcraid_instance *pinstance,
1457 struct pmcraid_aen_msg *aen_msg,
1458 u32 data_size
1459)
1287{ 1460{
1288 struct sk_buff *skb; 1461 struct sk_buff *skb;
1289 struct pmcraid_aen_msg *aen_msg;
1290 void *msg_header; 1462 void *msg_header;
1291 int data_size, total_size; 1463 u32 total_size, nla_genl_hdr_total_size;
1292 int result; 1464 int result;
1293 1465
1294
1295 if (type == PMCRAID_HCAM_CODE_LOG_DATA) {
1296 aen_msg = pinstance->ldn.msg;
1297 data_size = pinstance->ldn.hcam->data_len;
1298 } else {
1299 aen_msg = pinstance->ccn.msg;
1300 data_size = pinstance->ccn.hcam->data_len;
1301 }
1302
1303 data_size += sizeof(struct pmcraid_hcam_hdr);
1304 aen_msg->hostno = (pinstance->host->unique_id << 16 | 1466 aen_msg->hostno = (pinstance->host->unique_id << 16 |
1305 MINOR(pinstance->cdev.dev)); 1467 MINOR(pinstance->cdev.dev));
1306 aen_msg->length = data_size; 1468 aen_msg->length = data_size;
1469
1307 data_size += sizeof(*aen_msg); 1470 data_size += sizeof(*aen_msg);
1308 1471
1309 total_size = nla_total_size(data_size); 1472 total_size = nla_total_size(data_size);
1310 skb = genlmsg_new(total_size, GFP_ATOMIC); 1473 /* Add GENL_HDR to total_size */
1474 nla_genl_hdr_total_size =
1475 (total_size + (GENL_HDRLEN +
1476 ((struct genl_family *)&pmcraid_event_family)->hdrsize)
1477 + NLMSG_HDRLEN);
1478 skb = genlmsg_new(nla_genl_hdr_total_size, GFP_ATOMIC);
1311 1479
1312 1480
1313 if (!skb) { 1481 if (!skb) {
@@ -1329,7 +1497,7 @@ static int pmcraid_notify_aen(struct pmcraid_instance *pinstance, u8 type)
1329 result = nla_put(skb, PMCRAID_AEN_ATTR_EVENT, data_size, aen_msg); 1497 result = nla_put(skb, PMCRAID_AEN_ATTR_EVENT, data_size, aen_msg);
1330 1498
1331 if (result) { 1499 if (result) {
1332 pmcraid_err("failed to copy AEN attribute data \n"); 1500 pmcraid_err("failed to copy AEN attribute data\n");
1333 nlmsg_free(skb); 1501 nlmsg_free(skb);
1334 return -EINVAL; 1502 return -EINVAL;
1335 } 1503 }
@@ -1350,13 +1518,57 @@ static int pmcraid_notify_aen(struct pmcraid_instance *pinstance, u8 type)
1350 * value. 1518 * value.
1351 */ 1519 */
1352 if (result) 1520 if (result)
1353 pmcraid_info("failed to send %s event message %x!\n", 1521 pmcraid_info("error (%x) sending aen event message\n", result);
1354 type == PMCRAID_HCAM_CODE_LOG_DATA ? "LDN" : "CCN",
1355 result);
1356 return result; 1522 return result;
1357} 1523}
1358 1524
1359/** 1525/**
1526 * pmcraid_notify_ccn - notifies about CCN event msg to user space
1527 * @pinstance: pointer adapter instance structure
1528 *
1529 * Return value:
1530 * 0 if success, error value in case of any failure
1531 */
1532static int pmcraid_notify_ccn(struct pmcraid_instance *pinstance)
1533{
1534 return pmcraid_notify_aen(pinstance,
1535 pinstance->ccn.msg,
1536 pinstance->ccn.hcam->data_len +
1537 sizeof(struct pmcraid_hcam_hdr));
1538}
1539
1540/**
1541 * pmcraid_notify_ldn - notifies about CCN event msg to user space
1542 * @pinstance: pointer adapter instance structure
1543 *
1544 * Return value:
1545 * 0 if success, error value in case of any failure
1546 */
1547static int pmcraid_notify_ldn(struct pmcraid_instance *pinstance)
1548{
1549 return pmcraid_notify_aen(pinstance,
1550 pinstance->ldn.msg,
1551 pinstance->ldn.hcam->data_len +
1552 sizeof(struct pmcraid_hcam_hdr));
1553}
1554
1555/**
1556 * pmcraid_notify_ioastate - sends IOA state event msg to user space
1557 * @pinstance: pointer adapter instance structure
1558 * @evt: controller state event to be sent
1559 *
1560 * Return value:
1561 * 0 if success, error value in case of any failure
1562 */
1563static void pmcraid_notify_ioastate(struct pmcraid_instance *pinstance, u32 evt)
1564{
1565 pinstance->scn.ioa_state = evt;
1566 pmcraid_notify_aen(pinstance,
1567 &pinstance->scn.msg,
1568 sizeof(u32));
1569}
1570
1571/**
1360 * pmcraid_handle_config_change - Handle a config change from the adapter 1572 * pmcraid_handle_config_change - Handle a config change from the adapter
1361 * @pinstance: pointer to per adapter instance structure 1573 * @pinstance: pointer to per adapter instance structure
1362 * 1574 *
@@ -1375,10 +1587,12 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1375 unsigned long host_lock_flags; 1587 unsigned long host_lock_flags;
1376 u32 new_entry = 1; 1588 u32 new_entry = 1;
1377 u32 hidden_entry = 0; 1589 u32 hidden_entry = 0;
1590 u16 fw_version;
1378 int rc; 1591 int rc;
1379 1592
1380 ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam; 1593 ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam;
1381 cfg_entry = &ccn_hcam->cfg_entry; 1594 cfg_entry = &ccn_hcam->cfg_entry;
1595 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
1382 1596
1383 pmcraid_info 1597 pmcraid_info
1384 ("CCN(%x): %x type: %x lost: %x flags: %x res: %x:%x:%x:%x\n", 1598 ("CCN(%x): %x type: %x lost: %x flags: %x res: %x:%x:%x:%x\n",
@@ -1391,7 +1605,10 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1391 RES_IS_VSET(*cfg_entry) ? PMCRAID_VSET_BUS_ID : 1605 RES_IS_VSET(*cfg_entry) ? PMCRAID_VSET_BUS_ID :
1392 (RES_IS_GSCSI(*cfg_entry) ? PMCRAID_PHYS_BUS_ID : 1606 (RES_IS_GSCSI(*cfg_entry) ? PMCRAID_PHYS_BUS_ID :
1393 RES_BUS(cfg_entry->resource_address)), 1607 RES_BUS(cfg_entry->resource_address)),
1394 RES_IS_VSET(*cfg_entry) ? cfg_entry->unique_flags1 : 1608 RES_IS_VSET(*cfg_entry) ?
1609 (fw_version <= PMCRAID_FW_VERSION_1 ?
1610 cfg_entry->unique_flags1 :
1611 cfg_entry->array_id & 0xFF) :
1395 RES_TARGET(cfg_entry->resource_address), 1612 RES_TARGET(cfg_entry->resource_address),
1396 RES_LUN(cfg_entry->resource_address)); 1613 RES_LUN(cfg_entry->resource_address));
1397 1614
@@ -1415,11 +1632,16 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1415 */ 1632 */
1416 if (pinstance->ccn.hcam->notification_type == 1633 if (pinstance->ccn.hcam->notification_type ==
1417 NOTIFICATION_TYPE_ENTRY_CHANGED && 1634 NOTIFICATION_TYPE_ENTRY_CHANGED &&
1418 cfg_entry->resource_type == RES_TYPE_VSET && 1635 cfg_entry->resource_type == RES_TYPE_VSET) {
1419 cfg_entry->unique_flags1 & 0x80) { 1636
1420 hidden_entry = 1; 1637 if (fw_version <= PMCRAID_FW_VERSION_1)
1421 } else if (!pmcraid_expose_resource(cfg_entry)) 1638 hidden_entry = (cfg_entry->unique_flags1 & 0x80) != 0;
1639 else
1640 hidden_entry = (cfg_entry->unique_flags1 & 0x80) != 0;
1641
1642 } else if (!pmcraid_expose_resource(fw_version, cfg_entry)) {
1422 goto out_notify_apps; 1643 goto out_notify_apps;
1644 }
1423 1645
1424 spin_lock_irqsave(&pinstance->resource_lock, lock_flags); 1646 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
1425 list_for_each_entry(res, &pinstance->used_res_q, queue) { 1647 list_for_each_entry(res, &pinstance->used_res_q, queue) {
@@ -1466,13 +1688,15 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1466 list_add_tail(&res->queue, &pinstance->used_res_q); 1688 list_add_tail(&res->queue, &pinstance->used_res_q);
1467 } 1689 }
1468 1690
1469 memcpy(&res->cfg_entry, cfg_entry, 1691 memcpy(&res->cfg_entry, cfg_entry, pinstance->config_table_entry_size);
1470 sizeof(struct pmcraid_config_table_entry));
1471 1692
1472 if (pinstance->ccn.hcam->notification_type == 1693 if (pinstance->ccn.hcam->notification_type ==
1473 NOTIFICATION_TYPE_ENTRY_DELETED || hidden_entry) { 1694 NOTIFICATION_TYPE_ENTRY_DELETED || hidden_entry) {
1474 if (res->scsi_dev) { 1695 if (res->scsi_dev) {
1475 res->cfg_entry.unique_flags1 &= 0x7F; 1696 if (fw_version <= PMCRAID_FW_VERSION_1)
1697 res->cfg_entry.unique_flags1 &= 0x7F;
1698 else
1699 res->cfg_entry.array_id &= 0xFF;
1476 res->change_detected = RES_CHANGE_DEL; 1700 res->change_detected = RES_CHANGE_DEL;
1477 res->cfg_entry.resource_handle = 1701 res->cfg_entry.resource_handle =
1478 PMCRAID_INVALID_RES_HANDLE; 1702 PMCRAID_INVALID_RES_HANDLE;
@@ -1491,7 +1715,7 @@ out_notify_apps:
1491 1715
1492 /* Notify configuration changes to registered applications.*/ 1716 /* Notify configuration changes to registered applications.*/
1493 if (!pmcraid_disable_aen) 1717 if (!pmcraid_disable_aen)
1494 pmcraid_notify_aen(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE); 1718 pmcraid_notify_ccn(pinstance);
1495 1719
1496 cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE); 1720 cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
1497 if (cmd) 1721 if (cmd)
@@ -1528,7 +1752,7 @@ void pmcraid_ioasc_logger(u32 ioasc, struct pmcraid_cmd *cmd)
1528 return; 1752 return;
1529 1753
1530 /* log the error string */ 1754 /* log the error string */
1531 pmcraid_err("cmd [%d] for resource %x failed with %x(%s)\n", 1755 pmcraid_err("cmd [%x] for resource %x failed with %x(%s)\n",
1532 cmd->ioa_cb->ioarcb.cdb[0], 1756 cmd->ioa_cb->ioarcb.cdb[0],
1533 cmd->ioa_cb->ioarcb.resource_handle, 1757 cmd->ioa_cb->ioarcb.resource_handle,
1534 le32_to_cpu(ioasc), error_info->error_string); 1758 le32_to_cpu(ioasc), error_info->error_string);
@@ -1663,7 +1887,7 @@ static void pmcraid_process_ldn(struct pmcraid_cmd *cmd)
1663 } 1887 }
1664 /* send netlink message for HCAM notification if enabled */ 1888 /* send netlink message for HCAM notification if enabled */
1665 if (!pmcraid_disable_aen) 1889 if (!pmcraid_disable_aen)
1666 pmcraid_notify_aen(pinstance, PMCRAID_HCAM_CODE_LOG_DATA); 1890 pmcraid_notify_ldn(pinstance);
1667 1891
1668 cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA); 1892 cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA);
1669 if (cmd) 1893 if (cmd)
@@ -1701,10 +1925,13 @@ static void pmcraid_unregister_hcams(struct pmcraid_cmd *cmd)
1701 atomic_set(&pinstance->ldn.ignore, 1); 1925 atomic_set(&pinstance->ldn.ignore, 1);
1702 1926
1703 /* If adapter reset was forced as part of runtime reset sequence, 1927 /* If adapter reset was forced as part of runtime reset sequence,
1704 * start the reset sequence. 1928 * start the reset sequence. Reset will be triggered even in case
1929 * IOA unit_check.
1705 */ 1930 */
1706 if (pinstance->force_ioa_reset && !pinstance->ioa_bringdown) { 1931 if ((pinstance->force_ioa_reset && !pinstance->ioa_bringdown) ||
1932 pinstance->ioa_unit_check) {
1707 pinstance->force_ioa_reset = 0; 1933 pinstance->force_ioa_reset = 0;
1934 pinstance->ioa_unit_check = 0;
1708 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT; 1935 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
1709 pmcraid_reset_alert(cmd); 1936 pmcraid_reset_alert(cmd);
1710 return; 1937 return;
@@ -1735,10 +1962,13 @@ static int pmcraid_reset_enable_ioa(struct pmcraid_instance *pinstance)
1735 pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS); 1962 pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
1736 1963
1737 if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) { 1964 if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) {
1738 iowrite32(INTRS_TRANSITION_TO_OPERATIONAL, 1965 if (!pinstance->interrupt_mode) {
1739 pinstance->int_regs.ioa_host_interrupt_mask_reg); 1966 iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
1740 iowrite32(INTRS_TRANSITION_TO_OPERATIONAL, 1967 pinstance->int_regs.
1741 pinstance->int_regs.ioa_host_interrupt_clr_reg); 1968 ioa_host_interrupt_mask_reg);
1969 iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
1970 pinstance->int_regs.ioa_host_interrupt_clr_reg);
1971 }
1742 return 1; 1972 return 1;
1743 } else { 1973 } else {
1744 return 0; 1974 return 0;
@@ -1777,8 +2007,19 @@ static void pmcraid_soft_reset(struct pmcraid_cmd *cmd)
1777 doorbell = DOORBELL_RUNTIME_RESET | 2007 doorbell = DOORBELL_RUNTIME_RESET |
1778 DOORBELL_ENABLE_DESTRUCTIVE_DIAGS; 2008 DOORBELL_ENABLE_DESTRUCTIVE_DIAGS;
1779 2009
2010 /* Since we do RESET_ALERT and Start BIST we have to again write
2011 * MSIX Doorbell to indicate the interrupt mode
2012 */
2013 if (pinstance->interrupt_mode) {
2014 iowrite32(DOORBELL_INTR_MODE_MSIX,
2015 pinstance->int_regs.host_ioa_interrupt_reg);
2016 ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
2017 }
2018
1780 iowrite32(doorbell, pinstance->int_regs.host_ioa_interrupt_reg); 2019 iowrite32(doorbell, pinstance->int_regs.host_ioa_interrupt_reg);
2020 ioread32(pinstance->int_regs.host_ioa_interrupt_reg),
1781 int_reg = ioread32(pinstance->int_regs.ioa_host_interrupt_reg); 2021 int_reg = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
2022
1782 pmcraid_info("Waiting for IOA to become operational %x:%x\n", 2023 pmcraid_info("Waiting for IOA to become operational %x:%x\n",
1783 ioread32(pinstance->int_regs.host_ioa_interrupt_reg), 2024 ioread32(pinstance->int_regs.host_ioa_interrupt_reg),
1784 int_reg); 2025 int_reg);
@@ -1854,7 +2095,8 @@ static void pmcraid_fail_outstanding_cmds(struct pmcraid_instance *pinstance)
1854 } else if (cmd->cmd_done == pmcraid_internal_done || 2095 } else if (cmd->cmd_done == pmcraid_internal_done ||
1855 cmd->cmd_done == pmcraid_erp_done) { 2096 cmd->cmd_done == pmcraid_erp_done) {
1856 cmd->cmd_done(cmd); 2097 cmd->cmd_done(cmd);
1857 } else if (cmd->cmd_done != pmcraid_ioa_reset) { 2098 } else if (cmd->cmd_done != pmcraid_ioa_reset &&
2099 cmd->cmd_done != pmcraid_ioa_shutdown_done) {
1858 pmcraid_return_cmd(cmd); 2100 pmcraid_return_cmd(cmd);
1859 } 2101 }
1860 2102
@@ -1964,6 +2206,13 @@ static void pmcraid_ioa_reset(struct pmcraid_cmd *cmd)
1964 pinstance->ioa_reset_attempts = 0; 2206 pinstance->ioa_reset_attempts = 0;
1965 pmcraid_err("IOA didn't respond marking it as dead\n"); 2207 pmcraid_err("IOA didn't respond marking it as dead\n");
1966 pinstance->ioa_state = IOA_STATE_DEAD; 2208 pinstance->ioa_state = IOA_STATE_DEAD;
2209
2210 if (pinstance->ioa_bringdown)
2211 pmcraid_notify_ioastate(pinstance,
2212 PMC_DEVICE_EVENT_SHUTDOWN_FAILED);
2213 else
2214 pmcraid_notify_ioastate(pinstance,
2215 PMC_DEVICE_EVENT_RESET_FAILED);
1967 reset_complete = 1; 2216 reset_complete = 1;
1968 break; 2217 break;
1969 } 2218 }
@@ -1971,7 +2220,6 @@ static void pmcraid_ioa_reset(struct pmcraid_cmd *cmd)
1971 /* Once either bist or pci reset is done, restore PCI config 2220 /* Once either bist or pci reset is done, restore PCI config
1972 * space. If this fails, proceed with hard reset again 2221 * space. If this fails, proceed with hard reset again
1973 */ 2222 */
1974
1975 if (pci_restore_state(pinstance->pdev)) { 2223 if (pci_restore_state(pinstance->pdev)) {
1976 pmcraid_info("config-space error resetting again\n"); 2224 pmcraid_info("config-space error resetting again\n");
1977 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT; 2225 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
@@ -2002,6 +2250,8 @@ static void pmcraid_ioa_reset(struct pmcraid_cmd *cmd)
2002 pinstance->ioa_shutdown_type = SHUTDOWN_NONE; 2250 pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
2003 pinstance->ioa_bringdown = 0; 2251 pinstance->ioa_bringdown = 0;
2004 pinstance->ioa_state = IOA_STATE_UNKNOWN; 2252 pinstance->ioa_state = IOA_STATE_UNKNOWN;
2253 pmcraid_notify_ioastate(pinstance,
2254 PMC_DEVICE_EVENT_SHUTDOWN_SUCCESS);
2005 reset_complete = 1; 2255 reset_complete = 1;
2006 } else { 2256 } else {
2007 /* bring-up IOA, so proceed with soft reset 2257 /* bring-up IOA, so proceed with soft reset
@@ -2051,6 +2301,8 @@ static void pmcraid_ioa_reset(struct pmcraid_cmd *cmd)
2051 */ 2301 */
2052 if (pinstance->ioa_shutdown_type == SHUTDOWN_NONE && 2302 if (pinstance->ioa_shutdown_type == SHUTDOWN_NONE &&
2053 pinstance->force_ioa_reset == 0) { 2303 pinstance->force_ioa_reset == 0) {
2304 pmcraid_notify_ioastate(pinstance,
2305 PMC_DEVICE_EVENT_RESET_SUCCESS);
2054 reset_complete = 1; 2306 reset_complete = 1;
2055 } else { 2307 } else {
2056 if (pinstance->ioa_shutdown_type != SHUTDOWN_NONE) 2308 if (pinstance->ioa_shutdown_type != SHUTDOWN_NONE)
@@ -2116,6 +2368,8 @@ static void pmcraid_initiate_reset(struct pmcraid_instance *pinstance)
2116 pinstance->ioa_shutdown_type = SHUTDOWN_NONE; 2368 pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
2117 pinstance->reset_cmd = cmd; 2369 pinstance->reset_cmd = cmd;
2118 pinstance->force_ioa_reset = 1; 2370 pinstance->force_ioa_reset = 1;
2371 pmcraid_notify_ioastate(pinstance,
2372 PMC_DEVICE_EVENT_RESET_START);
2119 pmcraid_ioa_reset(cmd); 2373 pmcraid_ioa_reset(cmd);
2120 } 2374 }
2121} 2375}
@@ -2191,7 +2445,7 @@ static int pmcraid_reset_reload(
2191 wait_event(pinstance->reset_wait_q, 2445 wait_event(pinstance->reset_wait_q,
2192 !pinstance->ioa_reset_in_progress); 2446 !pinstance->ioa_reset_in_progress);
2193 2447
2194 pmcraid_info("reset_reload: reset is complete !! \n"); 2448 pmcraid_info("reset_reload: reset is complete !!\n");
2195 scsi_unblock_requests(pinstance->host); 2449 scsi_unblock_requests(pinstance->host);
2196 if (pinstance->ioa_state == target_state) 2450 if (pinstance->ioa_state == target_state)
2197 reset = 0; 2451 reset = 0;
@@ -2225,6 +2479,8 @@ static int pmcraid_reset_bringdown(struct pmcraid_instance *pinstance)
2225 */ 2479 */
2226static int pmcraid_reset_bringup(struct pmcraid_instance *pinstance) 2480static int pmcraid_reset_bringup(struct pmcraid_instance *pinstance)
2227{ 2481{
2482 pmcraid_notify_ioastate(pinstance, PMC_DEVICE_EVENT_RESET_START);
2483
2228 return pmcraid_reset_reload(pinstance, 2484 return pmcraid_reset_reload(pinstance,
2229 SHUTDOWN_NONE, 2485 SHUTDOWN_NONE,
2230 IOA_STATE_OPERATIONAL); 2486 IOA_STATE_OPERATIONAL);
@@ -2704,7 +2960,7 @@ static struct pmcraid_cmd *pmcraid_abort_cmd(struct pmcraid_cmd *cmd)
2704 2960
2705 pmcraid_info("command (%d) CDB[0] = %x for %x\n", 2961 pmcraid_info("command (%d) CDB[0] = %x for %x\n",
2706 le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.response_handle) >> 2, 2962 le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.response_handle) >> 2,
2707 cmd->ioa_cb->ioarcb.cdb[0], 2963 cancel_cmd->ioa_cb->ioarcb.cdb[0],
2708 le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.resource_handle)); 2964 le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.resource_handle));
2709 2965
2710 pmcraid_send_cmd(cancel_cmd, 2966 pmcraid_send_cmd(cancel_cmd,
@@ -2729,8 +2985,8 @@ static int pmcraid_abort_complete(struct pmcraid_cmd *cancel_cmd)
2729 u32 ioasc; 2985 u32 ioasc;
2730 2986
2731 wait_for_completion(&cancel_cmd->wait_for_completion); 2987 wait_for_completion(&cancel_cmd->wait_for_completion);
2732 res = cancel_cmd->u.res; 2988 res = cancel_cmd->res;
2733 cancel_cmd->u.res = NULL; 2989 cancel_cmd->res = NULL;
2734 ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc); 2990 ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc);
2735 2991
2736 /* If the abort task is not timed out we will get a Good completion 2992 /* If the abort task is not timed out we will get a Good completion
@@ -2823,7 +3079,7 @@ static int pmcraid_eh_abort_handler(struct scsi_cmnd *scsi_cmd)
2823 host_lock_flags); 3079 host_lock_flags);
2824 3080
2825 if (cancel_cmd) { 3081 if (cancel_cmd) {
2826 cancel_cmd->u.res = cmd->scsi_cmd->device->hostdata; 3082 cancel_cmd->res = cmd->scsi_cmd->device->hostdata;
2827 rc = pmcraid_abort_complete(cancel_cmd); 3083 rc = pmcraid_abort_complete(cancel_cmd);
2828 } 3084 }
2829 3085
@@ -2842,7 +3098,7 @@ static int pmcraid_eh_abort_handler(struct scsi_cmnd *scsi_cmd)
2842 * takes care by locking/unlocking host_lock. 3098 * takes care by locking/unlocking host_lock.
2843 * 3099 *
2844 * Return value 3100 * Return value
2845 * SUCCESS or FAILED 3101 * SUCCESS or FAILED
2846 */ 3102 */
2847static int pmcraid_eh_device_reset_handler(struct scsi_cmnd *scmd) 3103static int pmcraid_eh_device_reset_handler(struct scsi_cmnd *scmd)
2848{ 3104{
@@ -2879,7 +3135,7 @@ static int pmcraid_eh_target_reset_handler(struct scsi_cmnd *scmd)
2879 * Initiates adapter reset to bring it up to operational state 3135 * Initiates adapter reset to bring it up to operational state
2880 * 3136 *
2881 * Return value 3137 * Return value
2882 * SUCCESS or FAILED 3138 * SUCCESS or FAILED
2883 */ 3139 */
2884static int pmcraid_eh_host_reset_handler(struct scsi_cmnd *scmd) 3140static int pmcraid_eh_host_reset_handler(struct scsi_cmnd *scmd)
2885{ 3141{
@@ -2991,7 +3247,7 @@ pmcraid_init_ioadls(struct pmcraid_cmd *cmd, int sgcount)
2991 * to firmware. This builds ioadl descriptors and sets up ioarcb fields. 3247 * to firmware. This builds ioadl descriptors and sets up ioarcb fields.
2992 * 3248 *
2993 * Return value: 3249 * Return value:
2994 * 0 on success or -1 on failure 3250 * 0 on success or -1 on failure
2995 */ 3251 */
2996static int pmcraid_build_ioadl( 3252static int pmcraid_build_ioadl(
2997 struct pmcraid_instance *pinstance, 3253 struct pmcraid_instance *pinstance,
@@ -3049,7 +3305,7 @@ static int pmcraid_build_ioadl(
3049 * Free a DMA'able memory previously allocated with pmcraid_alloc_sglist 3305 * Free a DMA'able memory previously allocated with pmcraid_alloc_sglist
3050 * 3306 *
3051 * Return value: 3307 * Return value:
3052 * none 3308 * none
3053 */ 3309 */
3054static void pmcraid_free_sglist(struct pmcraid_sglist *sglist) 3310static void pmcraid_free_sglist(struct pmcraid_sglist *sglist)
3055{ 3311{
@@ -3070,7 +3326,7 @@ static void pmcraid_free_sglist(struct pmcraid_sglist *sglist)
3070 * list. 3326 * list.
3071 * 3327 *
3072 * Return value 3328 * Return value
3073 * pointer to sglist / NULL on failure 3329 * pointer to sglist / NULL on failure
3074 */ 3330 */
3075static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen) 3331static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen)
3076{ 3332{
@@ -3224,11 +3480,12 @@ static int pmcraid_queuecommand(
3224 struct pmcraid_resource_entry *res; 3480 struct pmcraid_resource_entry *res;
3225 struct pmcraid_ioarcb *ioarcb; 3481 struct pmcraid_ioarcb *ioarcb;
3226 struct pmcraid_cmd *cmd; 3482 struct pmcraid_cmd *cmd;
3483 u32 fw_version;
3227 int rc = 0; 3484 int rc = 0;
3228 3485
3229 pinstance = 3486 pinstance =
3230 (struct pmcraid_instance *)scsi_cmd->device->host->hostdata; 3487 (struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
3231 3488 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
3232 scsi_cmd->scsi_done = done; 3489 scsi_cmd->scsi_done = done;
3233 res = scsi_cmd->device->hostdata; 3490 res = scsi_cmd->device->hostdata;
3234 scsi_cmd->result = (DID_OK << 16); 3491 scsi_cmd->result = (DID_OK << 16);
@@ -3247,6 +3504,15 @@ static int pmcraid_queuecommand(
3247 if (pinstance->ioa_reset_in_progress) 3504 if (pinstance->ioa_reset_in_progress)
3248 return SCSI_MLQUEUE_HOST_BUSY; 3505 return SCSI_MLQUEUE_HOST_BUSY;
3249 3506
3507 /* Firmware doesn't support SYNCHRONIZE_CACHE command (0x35), complete
3508 * the command here itself with success return
3509 */
3510 if (scsi_cmd->cmnd[0] == SYNCHRONIZE_CACHE) {
3511 pmcraid_info("SYNC_CACHE(0x35), completing in driver itself\n");
3512 scsi_cmd->scsi_done(scsi_cmd);
3513 return 0;
3514 }
3515
3250 /* initialize the command and IOARCB to be sent to IOA */ 3516 /* initialize the command and IOARCB to be sent to IOA */
3251 cmd = pmcraid_get_free_cmd(pinstance); 3517 cmd = pmcraid_get_free_cmd(pinstance);
3252 3518
@@ -3261,6 +3527,13 @@ static int pmcraid_queuecommand(
3261 ioarcb->resource_handle = res->cfg_entry.resource_handle; 3527 ioarcb->resource_handle = res->cfg_entry.resource_handle;
3262 ioarcb->request_type = REQ_TYPE_SCSI; 3528 ioarcb->request_type = REQ_TYPE_SCSI;
3263 3529
3530 /* set hrrq number where the IOA should respond to. Note that all cmds
3531 * generated internally uses hrrq_id 0, exception to this is the cmd
3532 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
3533 * hrrq_id assigned here in queuecommand
3534 */
3535 ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
3536 pinstance->num_hrrq;
3264 cmd->cmd_done = pmcraid_io_done; 3537 cmd->cmd_done = pmcraid_io_done;
3265 3538
3266 if (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry)) { 3539 if (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry)) {
@@ -3287,7 +3560,9 @@ static int pmcraid_queuecommand(
3287 RES_IS_VSET(res->cfg_entry) ? PMCRAID_VSET_BUS_ID : 3560 RES_IS_VSET(res->cfg_entry) ? PMCRAID_VSET_BUS_ID :
3288 PMCRAID_PHYS_BUS_ID, 3561 PMCRAID_PHYS_BUS_ID,
3289 RES_IS_VSET(res->cfg_entry) ? 3562 RES_IS_VSET(res->cfg_entry) ?
3290 res->cfg_entry.unique_flags1 : 3563 (fw_version <= PMCRAID_FW_VERSION_1 ?
3564 res->cfg_entry.unique_flags1 :
3565 res->cfg_entry.array_id & 0xFF) :
3291 RES_TARGET(res->cfg_entry.resource_address), 3566 RES_TARGET(res->cfg_entry.resource_address),
3292 RES_LUN(res->cfg_entry.resource_address)); 3567 RES_LUN(res->cfg_entry.resource_address));
3293 3568
@@ -3324,8 +3599,7 @@ static int pmcraid_chr_open(struct inode *inode, struct file *filep)
3324 */ 3599 */
3325static int pmcraid_chr_release(struct inode *inode, struct file *filep) 3600static int pmcraid_chr_release(struct inode *inode, struct file *filep)
3326{ 3601{
3327 struct pmcraid_instance *pinstance = 3602 struct pmcraid_instance *pinstance = filep->private_data;
3328 ((struct pmcraid_instance *)filep->private_data);
3329 3603
3330 filep->private_data = NULL; 3604 filep->private_data = NULL;
3331 fasync_helper(-1, filep, 0, &pinstance->aen_queue); 3605 fasync_helper(-1, filep, 0, &pinstance->aen_queue);
@@ -3344,7 +3618,7 @@ static int pmcraid_chr_fasync(int fd, struct file *filep, int mode)
3344 struct pmcraid_instance *pinstance; 3618 struct pmcraid_instance *pinstance;
3345 int rc; 3619 int rc;
3346 3620
3347 pinstance = (struct pmcraid_instance *)filep->private_data; 3621 pinstance = filep->private_data;
3348 mutex_lock(&pinstance->aen_queue_lock); 3622 mutex_lock(&pinstance->aen_queue_lock);
3349 rc = fasync_helper(fd, filep, mode, &pinstance->aen_queue); 3623 rc = fasync_helper(fd, filep, mode, &pinstance->aen_queue);
3350 mutex_unlock(&pinstance->aen_queue_lock); 3624 mutex_unlock(&pinstance->aen_queue_lock);
@@ -3465,6 +3739,7 @@ static long pmcraid_ioctl_passthrough(
3465 unsigned long request_buffer; 3739 unsigned long request_buffer;
3466 unsigned long request_offset; 3740 unsigned long request_offset;
3467 unsigned long lock_flags; 3741 unsigned long lock_flags;
3742 u32 ioasc;
3468 int request_size; 3743 int request_size;
3469 int buffer_size; 3744 int buffer_size;
3470 u8 access, direction; 3745 u8 access, direction;
@@ -3566,6 +3841,14 @@ static long pmcraid_ioctl_passthrough(
3566 buffer->ioarcb.add_cmd_param_length); 3841 buffer->ioarcb.add_cmd_param_length);
3567 } 3842 }
3568 3843
3844 /* set hrrq number where the IOA should respond to. Note that all cmds
3845 * generated internally uses hrrq_id 0, exception to this is the cmd
3846 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
3847 * hrrq_id assigned here in queuecommand
3848 */
3849 ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
3850 pinstance->num_hrrq;
3851
3569 if (request_size) { 3852 if (request_size) {
3570 rc = pmcraid_build_passthrough_ioadls(cmd, 3853 rc = pmcraid_build_passthrough_ioadls(cmd,
3571 request_size, 3854 request_size,
@@ -3606,6 +3889,14 @@ static long pmcraid_ioctl_passthrough(
3606 _pmcraid_fire_command(cmd); 3889 _pmcraid_fire_command(cmd);
3607 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); 3890 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
3608 3891
3892 /* NOTE ! Remove the below line once abort_task is implemented
3893 * in firmware. This line disables ioctl command timeout handling logic
3894 * similar to IO command timeout handling, making ioctl commands to wait
3895 * until the command completion regardless of timeout value specified in
3896 * ioarcb
3897 */
3898 buffer->ioarcb.cmd_timeout = 0;
3899
3609 /* If command timeout is specified put caller to wait till that time, 3900 /* If command timeout is specified put caller to wait till that time,
3610 * otherwise it would be blocking wait. If command gets timed out, it 3901 * otherwise it would be blocking wait. If command gets timed out, it
3611 * will be aborted. 3902 * will be aborted.
@@ -3620,25 +3911,47 @@ static long pmcraid_ioctl_passthrough(
3620 le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle >> 2), 3911 le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle >> 2),
3621 cmd->ioa_cb->ioarcb.cdb[0]); 3912 cmd->ioa_cb->ioarcb.cdb[0]);
3622 3913
3623 rc = -ETIMEDOUT;
3624 spin_lock_irqsave(pinstance->host->host_lock, lock_flags); 3914 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
3625 cancel_cmd = pmcraid_abort_cmd(cmd); 3915 cancel_cmd = pmcraid_abort_cmd(cmd);
3626 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); 3916 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
3627 3917
3628 if (cancel_cmd) { 3918 if (cancel_cmd) {
3629 wait_for_completion(&cancel_cmd->wait_for_completion); 3919 wait_for_completion(&cancel_cmd->wait_for_completion);
3920 ioasc = cancel_cmd->ioa_cb->ioasa.ioasc;
3630 pmcraid_return_cmd(cancel_cmd); 3921 pmcraid_return_cmd(cancel_cmd);
3922
3923 /* if abort task couldn't find the command i.e it got
3924 * completed prior to aborting, return good completion.
3925 * if command got aborted succesfully or there was IOA
3926 * reset due to abort task itself getting timedout then
3927 * return -ETIMEDOUT
3928 */
3929 if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
3930 PMCRAID_IOASC_SENSE_KEY(ioasc) == 0x00) {
3931 if (ioasc != PMCRAID_IOASC_GC_IOARCB_NOTFOUND)
3932 rc = -ETIMEDOUT;
3933 goto out_handle_response;
3934 }
3631 } 3935 }
3632 3936
3633 goto out_free_sglist; 3937 /* no command block for abort task or abort task failed to abort
3938 * the IOARCB, then wait for 150 more seconds and initiate reset
3939 * sequence after timeout
3940 */
3941 if (!wait_for_completion_timeout(
3942 &cmd->wait_for_completion,
3943 msecs_to_jiffies(150 * 1000))) {
3944 pmcraid_reset_bringup(cmd->drv_inst);
3945 rc = -ETIMEDOUT;
3946 }
3634 } 3947 }
3635 3948
3949out_handle_response:
3636 /* If the command failed for any reason, copy entire IOASA buffer and 3950 /* If the command failed for any reason, copy entire IOASA buffer and
3637 * return IOCTL success. If copying IOASA to user-buffer fails, return 3951 * return IOCTL success. If copying IOASA to user-buffer fails, return
3638 * EFAULT 3952 * EFAULT
3639 */ 3953 */
3640 if (le32_to_cpu(cmd->ioa_cb->ioasa.ioasc)) { 3954 if (PMCRAID_IOASC_SENSE_KEY(le32_to_cpu(cmd->ioa_cb->ioasa.ioasc))) {
3641
3642 void *ioasa = 3955 void *ioasa =
3643 (void *)(arg + 3956 (void *)(arg +
3644 offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa)); 3957 offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa));
@@ -3651,6 +3964,7 @@ static long pmcraid_ioctl_passthrough(
3651 rc = -EFAULT; 3964 rc = -EFAULT;
3652 } 3965 }
3653 } 3966 }
3967
3654 /* If the data transfer was from device, copy the data onto user 3968 /* If the data transfer was from device, copy the data onto user
3655 * buffers 3969 * buffers
3656 */ 3970 */
@@ -3699,7 +4013,7 @@ static long pmcraid_ioctl_driver(
3699 int rc = -ENOSYS; 4013 int rc = -ENOSYS;
3700 4014
3701 if (!access_ok(VERIFY_READ, user_buffer, _IOC_SIZE(cmd))) { 4015 if (!access_ok(VERIFY_READ, user_buffer, _IOC_SIZE(cmd))) {
3702 pmcraid_err("ioctl_driver: access fault in request buffer \n"); 4016 pmcraid_err("ioctl_driver: access fault in request buffer\n");
3703 return -EFAULT; 4017 return -EFAULT;
3704 } 4018 }
3705 4019
@@ -3795,7 +4109,7 @@ static long pmcraid_chr_ioctl(
3795 return retval; 4109 return retval;
3796 } 4110 }
3797 4111
3798 pinstance = (struct pmcraid_instance *)filep->private_data; 4112 pinstance = filep->private_data;
3799 4113
3800 if (!pinstance) { 4114 if (!pinstance) {
3801 pmcraid_info("adapter instance is not found\n"); 4115 pmcraid_info("adapter instance is not found\n");
@@ -4011,36 +4325,77 @@ static struct scsi_host_template pmcraid_host_template = {
4011 .proc_name = PMCRAID_DRIVER_NAME 4325 .proc_name = PMCRAID_DRIVER_NAME
4012}; 4326};
4013 4327
4014/** 4328/*
4015 * pmcraid_isr_common - Common interrupt handler routine 4329 * pmcraid_isr_msix - implements MSI-X interrupt handling routine
4016 * 4330 * @irq: interrupt vector number
4017 * @pinstance: pointer to adapter instance 4331 * @dev_id: pointer hrrq_vector
4018 * @intrs: active interrupts (contents of ioa_host_interrupt register)
4019 * @hrrq_id: Host RRQ index
4020 * 4332 *
4021 * Return Value 4333 * Return Value
4022 * none 4334 * IRQ_HANDLED if interrupt is handled or IRQ_NONE if ignored
4023 */ 4335 */
4024static void pmcraid_isr_common( 4336
4025 struct pmcraid_instance *pinstance, 4337static irqreturn_t pmcraid_isr_msix(int irq, void *dev_id)
4026 u32 intrs,
4027 int hrrq_id
4028)
4029{ 4338{
4030 u32 intrs_clear = 4339 struct pmcraid_isr_param *hrrq_vector;
4031 (intrs & INTRS_CRITICAL_OP_IN_PROGRESS) ? intrs 4340 struct pmcraid_instance *pinstance;
4032 : INTRS_HRRQ_VALID; 4341 unsigned long lock_flags;
4033 iowrite32(intrs_clear, 4342 u32 intrs_val;
4034 pinstance->int_regs.ioa_host_interrupt_clr_reg); 4343 int hrrq_id;
4035 intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg); 4344
4345 hrrq_vector = (struct pmcraid_isr_param *)dev_id;
4346 hrrq_id = hrrq_vector->hrrq_id;
4347 pinstance = hrrq_vector->drv_inst;
4348
4349 if (!hrrq_id) {
4350 /* Read the interrupt */
4351 intrs_val = pmcraid_read_interrupts(pinstance);
4352 if (intrs_val &&
4353 ((ioread32(pinstance->int_regs.host_ioa_interrupt_reg)
4354 & DOORBELL_INTR_MSIX_CLR) == 0)) {
4355 /* Any error interrupts including unit_check,
4356 * initiate IOA reset.In case of unit check indicate
4357 * to reset_sequence that IOA unit checked and prepare
4358 * for a dump during reset sequence
4359 */
4360 if (intrs_val & PMCRAID_ERROR_INTERRUPTS) {
4361 if (intrs_val & INTRS_IOA_UNIT_CHECK)
4362 pinstance->ioa_unit_check = 1;
4363
4364 pmcraid_err("ISR: error interrupts: %x \
4365 initiating reset\n", intrs_val);
4366 spin_lock_irqsave(pinstance->host->host_lock,
4367 lock_flags);
4368 pmcraid_initiate_reset(pinstance);
4369 spin_unlock_irqrestore(
4370 pinstance->host->host_lock,
4371 lock_flags);
4372 }
4373 /* If interrupt was as part of the ioa initialization,
4374 * clear it. Delete the timer and wakeup the
4375 * reset engine to proceed with reset sequence
4376 */
4377 if (intrs_val & INTRS_TRANSITION_TO_OPERATIONAL)
4378 pmcraid_clr_trans_op(pinstance);
4379
4380 /* Clear the interrupt register by writing
4381 * to host to ioa doorbell. Once done
4382 * FW will clear the interrupt.
4383 */
4384 iowrite32(DOORBELL_INTR_MSIX_CLR,
4385 pinstance->int_regs.host_ioa_interrupt_reg);
4386 ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
4387
4036 4388
4037 /* hrrq valid bit was set, schedule tasklet to handle the response */ 4389 }
4038 if (intrs_clear == INTRS_HRRQ_VALID) 4390 }
4039 tasklet_schedule(&(pinstance->isr_tasklet[hrrq_id])); 4391
4392 tasklet_schedule(&(pinstance->isr_tasklet[hrrq_id]));
4393
4394 return IRQ_HANDLED;
4040} 4395}
4041 4396
4042/** 4397/**
4043 * pmcraid_isr - implements interrupt handling routine 4398 * pmcraid_isr - implements legacy interrupt handling routine
4044 * 4399 *
4045 * @irq: interrupt vector number 4400 * @irq: interrupt vector number
4046 * @dev_id: pointer hrrq_vector 4401 * @dev_id: pointer hrrq_vector
@@ -4052,8 +4407,9 @@ static irqreturn_t pmcraid_isr(int irq, void *dev_id)
4052{ 4407{
4053 struct pmcraid_isr_param *hrrq_vector; 4408 struct pmcraid_isr_param *hrrq_vector;
4054 struct pmcraid_instance *pinstance; 4409 struct pmcraid_instance *pinstance;
4055 unsigned long lock_flags;
4056 u32 intrs; 4410 u32 intrs;
4411 unsigned long lock_flags;
4412 int hrrq_id = 0;
4057 4413
4058 /* In case of legacy interrupt mode where interrupts are shared across 4414 /* In case of legacy interrupt mode where interrupts are shared across
4059 * isrs, it may be possible that the current interrupt is not from IOA 4415 * isrs, it may be possible that the current interrupt is not from IOA
@@ -4062,21 +4418,13 @@ static irqreturn_t pmcraid_isr(int irq, void *dev_id)
4062 printk(KERN_INFO "%s(): NULL host pointer\n", __func__); 4418 printk(KERN_INFO "%s(): NULL host pointer\n", __func__);
4063 return IRQ_NONE; 4419 return IRQ_NONE;
4064 } 4420 }
4065
4066 hrrq_vector = (struct pmcraid_isr_param *)dev_id; 4421 hrrq_vector = (struct pmcraid_isr_param *)dev_id;
4067 pinstance = hrrq_vector->drv_inst; 4422 pinstance = hrrq_vector->drv_inst;
4068 4423
4069 /* Acquire the lock (currently host_lock) while processing interrupts.
4070 * This interval is small as most of the response processing is done by
4071 * tasklet without the lock.
4072 */
4073 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
4074 intrs = pmcraid_read_interrupts(pinstance); 4424 intrs = pmcraid_read_interrupts(pinstance);
4075 4425
4076 if (unlikely((intrs & PMCRAID_PCI_INTERRUPTS) == 0)) { 4426 if (unlikely((intrs & PMCRAID_PCI_INTERRUPTS) == 0))
4077 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
4078 return IRQ_NONE; 4427 return IRQ_NONE;
4079 }
4080 4428
4081 /* Any error interrupts including unit_check, initiate IOA reset. 4429 /* Any error interrupts including unit_check, initiate IOA reset.
4082 * In case of unit check indicate to reset_sequence that IOA unit 4430 * In case of unit check indicate to reset_sequence that IOA unit
@@ -4091,13 +4439,28 @@ static irqreturn_t pmcraid_isr(int irq, void *dev_id)
4091 pinstance->int_regs.ioa_host_interrupt_clr_reg); 4439 pinstance->int_regs.ioa_host_interrupt_clr_reg);
4092 pmcraid_err("ISR: error interrupts: %x initiating reset\n", 4440 pmcraid_err("ISR: error interrupts: %x initiating reset\n",
4093 intrs); 4441 intrs);
4094 intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg); 4442 intrs = ioread32(
4443 pinstance->int_regs.ioa_host_interrupt_clr_reg);
4444 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
4095 pmcraid_initiate_reset(pinstance); 4445 pmcraid_initiate_reset(pinstance);
4446 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
4096 } else { 4447 } else {
4097 pmcraid_isr_common(pinstance, intrs, hrrq_vector->hrrq_id); 4448 /* If interrupt was as part of the ioa initialization,
4098 } 4449 * clear. Delete the timer and wakeup the
4450 * reset engine to proceed with reset sequence
4451 */
4452 if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) {
4453 pmcraid_clr_trans_op(pinstance);
4454 } else {
4455 iowrite32(intrs,
4456 pinstance->int_regs.ioa_host_interrupt_clr_reg);
4457 ioread32(
4458 pinstance->int_regs.ioa_host_interrupt_clr_reg);
4099 4459
4100 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); 4460 tasklet_schedule(
4461 &(pinstance->isr_tasklet[hrrq_id]));
4462 }
4463 }
4101 4464
4102 return IRQ_HANDLED; 4465 return IRQ_HANDLED;
4103} 4466}
@@ -4120,6 +4483,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
4120 struct scsi_device *sdev; 4483 struct scsi_device *sdev;
4121 unsigned long lock_flags; 4484 unsigned long lock_flags;
4122 unsigned long host_lock_flags; 4485 unsigned long host_lock_flags;
4486 u16 fw_version;
4123 u8 bus, target, lun; 4487 u8 bus, target, lun;
4124 4488
4125 pinstance = container_of(workp, struct pmcraid_instance, worker_q); 4489 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
@@ -4127,6 +4491,8 @@ static void pmcraid_worker_function(struct work_struct *workp)
4127 if (!atomic_read(&pinstance->expose_resources)) 4491 if (!atomic_read(&pinstance->expose_resources))
4128 return; 4492 return;
4129 4493
4494 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
4495
4130 spin_lock_irqsave(&pinstance->resource_lock, lock_flags); 4496 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
4131 list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue) { 4497 list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue) {
4132 4498
@@ -4166,12 +4532,16 @@ static void pmcraid_worker_function(struct work_struct *workp)
4166 4532
4167 if (res->change_detected == RES_CHANGE_ADD) { 4533 if (res->change_detected == RES_CHANGE_ADD) {
4168 4534
4169 if (!pmcraid_expose_resource(&res->cfg_entry)) 4535 if (!pmcraid_expose_resource(fw_version,
4536 &res->cfg_entry))
4170 continue; 4537 continue;
4171 4538
4172 if (RES_IS_VSET(res->cfg_entry)) { 4539 if (RES_IS_VSET(res->cfg_entry)) {
4173 bus = PMCRAID_VSET_BUS_ID; 4540 bus = PMCRAID_VSET_BUS_ID;
4174 target = res->cfg_entry.unique_flags1; 4541 if (fw_version <= PMCRAID_FW_VERSION_1)
4542 target = res->cfg_entry.unique_flags1;
4543 else
4544 target = res->cfg_entry.array_id & 0xFF;
4175 lun = PMCRAID_VSET_LUN_ID; 4545 lun = PMCRAID_VSET_LUN_ID;
4176 } else { 4546 } else {
4177 bus = PMCRAID_PHYS_BUS_ID; 4547 bus = PMCRAID_PHYS_BUS_ID;
@@ -4201,7 +4571,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
4201 * Return Value 4571 * Return Value
4202 * None 4572 * None
4203 */ 4573 */
4204void pmcraid_tasklet_function(unsigned long instance) 4574static void pmcraid_tasklet_function(unsigned long instance)
4205{ 4575{
4206 struct pmcraid_isr_param *hrrq_vector; 4576 struct pmcraid_isr_param *hrrq_vector;
4207 struct pmcraid_instance *pinstance; 4577 struct pmcraid_instance *pinstance;
@@ -4210,35 +4580,12 @@ void pmcraid_tasklet_function(unsigned long instance)
4210 unsigned long host_lock_flags; 4580 unsigned long host_lock_flags;
4211 spinlock_t *lockp; /* hrrq buffer lock */ 4581 spinlock_t *lockp; /* hrrq buffer lock */
4212 int id; 4582 int id;
4213 u32 intrs;
4214 __le32 resp; 4583 __le32 resp;
4215 4584
4216 hrrq_vector = (struct pmcraid_isr_param *)instance; 4585 hrrq_vector = (struct pmcraid_isr_param *)instance;
4217 pinstance = hrrq_vector->drv_inst; 4586 pinstance = hrrq_vector->drv_inst;
4218 id = hrrq_vector->hrrq_id; 4587 id = hrrq_vector->hrrq_id;
4219 lockp = &(pinstance->hrrq_lock[id]); 4588 lockp = &(pinstance->hrrq_lock[id]);
4220 intrs = pmcraid_read_interrupts(pinstance);
4221
4222 /* If interrupts was as part of the ioa initialization, clear and mask
4223 * it. Delete the timer and wakeup the reset engine to proceed with
4224 * reset sequence
4225 */
4226 if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) {
4227 iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
4228 pinstance->int_regs.ioa_host_interrupt_mask_reg);
4229 iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
4230 pinstance->int_regs.ioa_host_interrupt_clr_reg);
4231
4232 if (pinstance->reset_cmd != NULL) {
4233 del_timer(&pinstance->reset_cmd->timer);
4234 spin_lock_irqsave(pinstance->host->host_lock,
4235 host_lock_flags);
4236 pinstance->reset_cmd->cmd_done(pinstance->reset_cmd);
4237 spin_unlock_irqrestore(pinstance->host->host_lock,
4238 host_lock_flags);
4239 }
4240 return;
4241 }
4242 4589
4243 /* loop through each of the commands responded by IOA. Each HRRQ buf is 4590 /* loop through each of the commands responded by IOA. Each HRRQ buf is
4244 * protected by its own lock. Traversals must be done within this lock 4591 * protected by its own lock. Traversals must be done within this lock
@@ -4256,27 +4603,6 @@ void pmcraid_tasklet_function(unsigned long instance)
4256 int cmd_index = resp >> 2; 4603 int cmd_index = resp >> 2;
4257 struct pmcraid_cmd *cmd = NULL; 4604 struct pmcraid_cmd *cmd = NULL;
4258 4605
4259 if (cmd_index < PMCRAID_MAX_CMD) {
4260 cmd = pinstance->cmd_list[cmd_index];
4261 } else {
4262 /* In case of invalid response handle, initiate IOA
4263 * reset sequence.
4264 */
4265 spin_unlock_irqrestore(lockp, hrrq_lock_flags);
4266
4267 pmcraid_err("Invalid response %d initiating reset\n",
4268 cmd_index);
4269
4270 spin_lock_irqsave(pinstance->host->host_lock,
4271 host_lock_flags);
4272 pmcraid_initiate_reset(pinstance);
4273 spin_unlock_irqrestore(pinstance->host->host_lock,
4274 host_lock_flags);
4275
4276 spin_lock_irqsave(lockp, hrrq_lock_flags);
4277 break;
4278 }
4279
4280 if (pinstance->hrrq_curr[id] < pinstance->hrrq_end[id]) { 4606 if (pinstance->hrrq_curr[id] < pinstance->hrrq_end[id]) {
4281 pinstance->hrrq_curr[id]++; 4607 pinstance->hrrq_curr[id]++;
4282 } else { 4608 } else {
@@ -4284,6 +4610,14 @@ void pmcraid_tasklet_function(unsigned long instance)
4284 pinstance->host_toggle_bit[id] ^= 1u; 4610 pinstance->host_toggle_bit[id] ^= 1u;
4285 } 4611 }
4286 4612
4613 if (cmd_index >= PMCRAID_MAX_CMD) {
4614 /* In case of invalid response handle, log message */
4615 pmcraid_err("Invalid response handle %d\n", cmd_index);
4616 resp = le32_to_cpu(*(pinstance->hrrq_curr[id]));
4617 continue;
4618 }
4619
4620 cmd = pinstance->cmd_list[cmd_index];
4287 spin_unlock_irqrestore(lockp, hrrq_lock_flags); 4621 spin_unlock_irqrestore(lockp, hrrq_lock_flags);
4288 4622
4289 spin_lock_irqsave(&pinstance->pending_pool_lock, 4623 spin_lock_irqsave(&pinstance->pending_pool_lock,
@@ -4324,7 +4658,16 @@ void pmcraid_tasklet_function(unsigned long instance)
4324static 4658static
4325void pmcraid_unregister_interrupt_handler(struct pmcraid_instance *pinstance) 4659void pmcraid_unregister_interrupt_handler(struct pmcraid_instance *pinstance)
4326{ 4660{
4327 free_irq(pinstance->pdev->irq, &(pinstance->hrrq_vector[0])); 4661 int i;
4662
4663 for (i = 0; i < pinstance->num_hrrq; i++)
4664 free_irq(pinstance->hrrq_vector[i].vector,
4665 &(pinstance->hrrq_vector[i]));
4666
4667 if (pinstance->interrupt_mode) {
4668 pci_disable_msix(pinstance->pdev);
4669 pinstance->interrupt_mode = 0;
4670 }
4328} 4671}
4329 4672
4330/** 4673/**
@@ -4337,14 +4680,70 @@ void pmcraid_unregister_interrupt_handler(struct pmcraid_instance *pinstance)
4337static int 4680static int
4338pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance) 4681pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance)
4339{ 4682{
4683 int rc;
4340 struct pci_dev *pdev = pinstance->pdev; 4684 struct pci_dev *pdev = pinstance->pdev;
4341 4685
4686 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
4687 int num_hrrq = PMCRAID_NUM_MSIX_VECTORS;
4688 struct msix_entry entries[PMCRAID_NUM_MSIX_VECTORS];
4689 int i;
4690 for (i = 0; i < PMCRAID_NUM_MSIX_VECTORS; i++)
4691 entries[i].entry = i;
4692
4693 rc = pci_enable_msix(pdev, entries, num_hrrq);
4694 if (rc < 0)
4695 goto pmcraid_isr_legacy;
4696
4697 /* Check how many MSIX vectors are allocated and register
4698 * msi-x handlers for each of them giving appropriate buffer
4699 */
4700 if (rc > 0) {
4701 num_hrrq = rc;
4702 if (pci_enable_msix(pdev, entries, num_hrrq))
4703 goto pmcraid_isr_legacy;
4704 }
4705
4706 for (i = 0; i < num_hrrq; i++) {
4707 pinstance->hrrq_vector[i].hrrq_id = i;
4708 pinstance->hrrq_vector[i].drv_inst = pinstance;
4709 pinstance->hrrq_vector[i].vector = entries[i].vector;
4710 rc = request_irq(pinstance->hrrq_vector[i].vector,
4711 pmcraid_isr_msix, 0,
4712 PMCRAID_DRIVER_NAME,
4713 &(pinstance->hrrq_vector[i]));
4714
4715 if (rc) {
4716 int j;
4717 for (j = 0; j < i; j++)
4718 free_irq(entries[j].vector,
4719 &(pinstance->hrrq_vector[j]));
4720 pci_disable_msix(pdev);
4721 goto pmcraid_isr_legacy;
4722 }
4723 }
4724
4725 pinstance->num_hrrq = num_hrrq;
4726 pinstance->interrupt_mode = 1;
4727 iowrite32(DOORBELL_INTR_MODE_MSIX,
4728 pinstance->int_regs.host_ioa_interrupt_reg);
4729 ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
4730 goto pmcraid_isr_out;
4731 }
4732
4733pmcraid_isr_legacy:
4734 /* If MSI-X registration failed fallback to legacy mode, where
4735 * only one hrrq entry will be used
4736 */
4342 pinstance->hrrq_vector[0].hrrq_id = 0; 4737 pinstance->hrrq_vector[0].hrrq_id = 0;
4343 pinstance->hrrq_vector[0].drv_inst = pinstance; 4738 pinstance->hrrq_vector[0].drv_inst = pinstance;
4344 pinstance->hrrq_vector[0].vector = 0; 4739 pinstance->hrrq_vector[0].vector = pdev->irq;
4345 pinstance->num_hrrq = 1; 4740 pinstance->num_hrrq = 1;
4346 return request_irq(pdev->irq, pmcraid_isr, IRQF_SHARED, 4741 rc = 0;
4347 PMCRAID_DRIVER_NAME, &pinstance->hrrq_vector[0]); 4742
4743 rc = request_irq(pdev->irq, pmcraid_isr, IRQF_SHARED,
4744 PMCRAID_DRIVER_NAME, &pinstance->hrrq_vector[0]);
4745pmcraid_isr_out:
4746 return rc;
4348} 4747}
4349 4748
4350/** 4749/**
@@ -4516,12 +4915,11 @@ pmcraid_release_host_rrqs(struct pmcraid_instance *pinstance, int maxindex)
4516static int __devinit 4915static int __devinit
4517pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance) 4916pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
4518{ 4917{
4519 int i; 4918 int i, buffer_size;
4520 int buf_count = PMCRAID_MAX_CMD / pinstance->num_hrrq;
4521 4919
4522 for (i = 0; i < pinstance->num_hrrq; i++) { 4920 buffer_size = HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD;
4523 int buffer_size = HRRQ_ENTRY_SIZE * buf_count;
4524 4921
4922 for (i = 0; i < pinstance->num_hrrq; i++) {
4525 pinstance->hrrq_start[i] = 4923 pinstance->hrrq_start[i] =
4526 pci_alloc_consistent( 4924 pci_alloc_consistent(
4527 pinstance->pdev, 4925 pinstance->pdev,
@@ -4529,7 +4927,8 @@ pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
4529 &(pinstance->hrrq_start_bus_addr[i])); 4927 &(pinstance->hrrq_start_bus_addr[i]));
4530 4928
4531 if (pinstance->hrrq_start[i] == 0) { 4929 if (pinstance->hrrq_start[i] == 0) {
4532 pmcraid_err("could not allocate host rrq: %d\n", i); 4930 pmcraid_err("pci_alloc failed for hrrq vector : %d\n",
4931 i);
4533 pmcraid_release_host_rrqs(pinstance, i); 4932 pmcraid_release_host_rrqs(pinstance, i);
4534 return -ENOMEM; 4933 return -ENOMEM;
4535 } 4934 }
@@ -4537,7 +4936,7 @@ pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
4537 memset(pinstance->hrrq_start[i], 0, buffer_size); 4936 memset(pinstance->hrrq_start[i], 0, buffer_size);
4538 pinstance->hrrq_curr[i] = pinstance->hrrq_start[i]; 4937 pinstance->hrrq_curr[i] = pinstance->hrrq_start[i];
4539 pinstance->hrrq_end[i] = 4938 pinstance->hrrq_end[i] =
4540 pinstance->hrrq_start[i] + buf_count - 1; 4939 pinstance->hrrq_start[i] + PMCRAID_MAX_CMD - 1;
4541 pinstance->host_toggle_bit[i] = 1; 4940 pinstance->host_toggle_bit[i] = 1;
4542 spin_lock_init(&pinstance->hrrq_lock[i]); 4941 spin_lock_init(&pinstance->hrrq_lock[i]);
4543 } 4942 }
@@ -4557,7 +4956,7 @@ static void pmcraid_release_hcams(struct pmcraid_instance *pinstance)
4557 if (pinstance->ccn.msg != NULL) { 4956 if (pinstance->ccn.msg != NULL) {
4558 pci_free_consistent(pinstance->pdev, 4957 pci_free_consistent(pinstance->pdev,
4559 PMCRAID_AEN_HDR_SIZE + 4958 PMCRAID_AEN_HDR_SIZE +
4560 sizeof(struct pmcraid_hcam_ccn), 4959 sizeof(struct pmcraid_hcam_ccn_ext),
4561 pinstance->ccn.msg, 4960 pinstance->ccn.msg,
4562 pinstance->ccn.baddr); 4961 pinstance->ccn.baddr);
4563 4962
@@ -4591,7 +4990,7 @@ static int pmcraid_allocate_hcams(struct pmcraid_instance *pinstance)
4591 pinstance->ccn.msg = pci_alloc_consistent( 4990 pinstance->ccn.msg = pci_alloc_consistent(
4592 pinstance->pdev, 4991 pinstance->pdev,
4593 PMCRAID_AEN_HDR_SIZE + 4992 PMCRAID_AEN_HDR_SIZE +
4594 sizeof(struct pmcraid_hcam_ccn), 4993 sizeof(struct pmcraid_hcam_ccn_ext),
4595 &(pinstance->ccn.baddr)); 4994 &(pinstance->ccn.baddr));
4596 4995
4597 pinstance->ldn.msg = pci_alloc_consistent( 4996 pinstance->ldn.msg = pci_alloc_consistent(
@@ -4724,6 +5123,32 @@ static void pmcraid_kill_tasklets(struct pmcraid_instance *pinstance)
4724} 5123}
4725 5124
4726/** 5125/**
5126 * pmcraid_release_buffers - release per-adapter buffers allocated
5127 *
5128 * @pinstance: pointer to adapter soft state
5129 *
5130 * Return Value
5131 * none
5132 */
5133static void pmcraid_release_buffers(struct pmcraid_instance *pinstance)
5134{
5135 pmcraid_release_config_buffers(pinstance);
5136 pmcraid_release_control_blocks(pinstance, PMCRAID_MAX_CMD);
5137 pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD);
5138 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
5139
5140 if (pinstance->inq_data != NULL) {
5141 pci_free_consistent(pinstance->pdev,
5142 sizeof(struct pmcraid_inquiry_data),
5143 pinstance->inq_data,
5144 pinstance->inq_data_baddr);
5145
5146 pinstance->inq_data = NULL;
5147 pinstance->inq_data_baddr = 0;
5148 }
5149}
5150
5151/**
4727 * pmcraid_init_buffers - allocates memory and initializes various structures 5152 * pmcraid_init_buffers - allocates memory and initializes various structures
4728 * @pinstance: pointer to per adapter instance structure 5153 * @pinstance: pointer to per adapter instance structure
4729 * 5154 *
@@ -4753,20 +5178,32 @@ static int __devinit pmcraid_init_buffers(struct pmcraid_instance *pinstance)
4753 } 5178 }
4754 5179
4755 if (pmcraid_allocate_cmd_blocks(pinstance)) { 5180 if (pmcraid_allocate_cmd_blocks(pinstance)) {
4756 pmcraid_err("couldn't allocate memory for cmd blocks \n"); 5181 pmcraid_err("couldn't allocate memory for cmd blocks\n");
4757 pmcraid_release_config_buffers(pinstance); 5182 pmcraid_release_config_buffers(pinstance);
4758 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq); 5183 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
4759 return -ENOMEM; 5184 return -ENOMEM;
4760 } 5185 }
4761 5186
4762 if (pmcraid_allocate_control_blocks(pinstance)) { 5187 if (pmcraid_allocate_control_blocks(pinstance)) {
4763 pmcraid_err("couldn't allocate memory control blocks \n"); 5188 pmcraid_err("couldn't allocate memory control blocks\n");
4764 pmcraid_release_config_buffers(pinstance); 5189 pmcraid_release_config_buffers(pinstance);
4765 pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD); 5190 pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD);
4766 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq); 5191 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
4767 return -ENOMEM; 5192 return -ENOMEM;
4768 } 5193 }
4769 5194
5195 /* allocate DMAable memory for page D0 INQUIRY buffer */
5196 pinstance->inq_data = pci_alloc_consistent(
5197 pinstance->pdev,
5198 sizeof(struct pmcraid_inquiry_data),
5199 &pinstance->inq_data_baddr);
5200
5201 if (pinstance->inq_data == NULL) {
5202 pmcraid_err("couldn't allocate DMA memory for INQUIRY\n");
5203 pmcraid_release_buffers(pinstance);
5204 return -ENOMEM;
5205 }
5206
4770 /* Initialize all the command blocks and add them to free pool. No 5207 /* Initialize all the command blocks and add them to free pool. No
4771 * need to lock (free_pool_lock) as this is done in initialization 5208 * need to lock (free_pool_lock) as this is done in initialization
4772 * itself 5209 * itself
@@ -4785,7 +5222,7 @@ static int __devinit pmcraid_init_buffers(struct pmcraid_instance *pinstance)
4785 * pmcraid_reinit_buffers - resets various buffer pointers 5222 * pmcraid_reinit_buffers - resets various buffer pointers
4786 * @pinstance: pointer to adapter instance 5223 * @pinstance: pointer to adapter instance
4787 * Return value 5224 * Return value
4788 * none 5225 * none
4789 */ 5226 */
4790static void pmcraid_reinit_buffers(struct pmcraid_instance *pinstance) 5227static void pmcraid_reinit_buffers(struct pmcraid_instance *pinstance)
4791{ 5228{
@@ -4836,6 +5273,8 @@ static int __devinit pmcraid_init_instance(
4836 mapped_pci_addr + chip_cfg->ioa_host_intr; 5273 mapped_pci_addr + chip_cfg->ioa_host_intr;
4837 pint_regs->ioa_host_interrupt_clr_reg = 5274 pint_regs->ioa_host_interrupt_clr_reg =
4838 mapped_pci_addr + chip_cfg->ioa_host_intr_clr; 5275 mapped_pci_addr + chip_cfg->ioa_host_intr_clr;
5276 pint_regs->ioa_host_msix_interrupt_reg =
5277 mapped_pci_addr + chip_cfg->ioa_host_msix_intr;
4839 pint_regs->host_ioa_interrupt_reg = 5278 pint_regs->host_ioa_interrupt_reg =
4840 mapped_pci_addr + chip_cfg->host_ioa_intr; 5279 mapped_pci_addr + chip_cfg->host_ioa_intr;
4841 pint_regs->host_ioa_interrupt_clr_reg = 5280 pint_regs->host_ioa_interrupt_clr_reg =
@@ -4858,6 +5297,7 @@ static int __devinit pmcraid_init_instance(
4858 init_waitqueue_head(&pinstance->reset_wait_q); 5297 init_waitqueue_head(&pinstance->reset_wait_q);
4859 5298
4860 atomic_set(&pinstance->outstanding_cmds, 0); 5299 atomic_set(&pinstance->outstanding_cmds, 0);
5300 atomic_set(&pinstance->last_message_id, 0);
4861 atomic_set(&pinstance->expose_resources, 0); 5301 atomic_set(&pinstance->expose_resources, 0);
4862 5302
4863 INIT_LIST_HEAD(&pinstance->free_res_q); 5303 INIT_LIST_HEAD(&pinstance->free_res_q);
@@ -4883,23 +5323,6 @@ static int __devinit pmcraid_init_instance(
4883} 5323}
4884 5324
4885/** 5325/**
4886 * pmcraid_release_buffers - release per-adapter buffers allocated
4887 *
4888 * @pinstance: pointer to adapter soft state
4889 *
4890 * Return Value
4891 * none
4892 */
4893static void pmcraid_release_buffers(struct pmcraid_instance *pinstance)
4894{
4895 pmcraid_release_config_buffers(pinstance);
4896 pmcraid_release_control_blocks(pinstance, PMCRAID_MAX_CMD);
4897 pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD);
4898 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
4899
4900}
4901
4902/**
4903 * pmcraid_shutdown - shutdown adapter controller. 5326 * pmcraid_shutdown - shutdown adapter controller.
4904 * @pdev: pci device struct 5327 * @pdev: pci device struct
4905 * 5328 *
@@ -4958,7 +5381,7 @@ static int pmcraid_setup_chrdev(struct pmcraid_instance *pinstance)
4958 pmcraid_release_minor(minor); 5381 pmcraid_release_minor(minor);
4959 else 5382 else
4960 device_create(pmcraid_class, NULL, MKDEV(pmcraid_major, minor), 5383 device_create(pmcraid_class, NULL, MKDEV(pmcraid_major, minor),
4961 NULL, "pmcsas%u", minor); 5384 NULL, "%s%u", PMCRAID_DEVFILE, minor);
4962 return error; 5385 return error;
4963} 5386}
4964 5387
@@ -5050,7 +5473,6 @@ static int pmcraid_resume(struct pci_dev *pdev)
5050 struct pmcraid_instance *pinstance = pci_get_drvdata(pdev); 5473 struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
5051 struct Scsi_Host *host = pinstance->host; 5474 struct Scsi_Host *host = pinstance->host;
5052 int rc; 5475 int rc;
5053 int hrrqs;
5054 5476
5055 pci_set_power_state(pdev, PCI_D0); 5477 pci_set_power_state(pdev, PCI_D0);
5056 pci_enable_wake(pdev, PCI_D0, 0); 5478 pci_enable_wake(pdev, PCI_D0, 0);
@@ -5077,8 +5499,8 @@ static int pmcraid_resume(struct pci_dev *pdev)
5077 goto disable_device; 5499 goto disable_device;
5078 } 5500 }
5079 5501
5502 pmcraid_disable_interrupts(pinstance, ~0);
5080 atomic_set(&pinstance->outstanding_cmds, 0); 5503 atomic_set(&pinstance->outstanding_cmds, 0);
5081 hrrqs = pinstance->num_hrrq;
5082 rc = pmcraid_register_interrupt_handler(pinstance); 5504 rc = pmcraid_register_interrupt_handler(pinstance);
5083 5505
5084 if (rc) { 5506 if (rc) {
@@ -5100,7 +5522,7 @@ static int pmcraid_resume(struct pci_dev *pdev)
5100 * state. 5522 * state.
5101 */ 5523 */
5102 if (pmcraid_reset_bringup(pinstance)) { 5524 if (pmcraid_reset_bringup(pinstance)) {
5103 dev_err(&pdev->dev, "couldn't initialize IOA \n"); 5525 dev_err(&pdev->dev, "couldn't initialize IOA\n");
5104 rc = -ENODEV; 5526 rc = -ENODEV;
5105 goto release_tasklets; 5527 goto release_tasklets;
5106 } 5528 }
@@ -5108,6 +5530,7 @@ static int pmcraid_resume(struct pci_dev *pdev)
5108 return 0; 5530 return 0;
5109 5531
5110release_tasklets: 5532release_tasklets:
5533 pmcraid_disable_interrupts(pinstance, ~0);
5111 pmcraid_kill_tasklets(pinstance); 5534 pmcraid_kill_tasklets(pinstance);
5112 pmcraid_unregister_interrupt_handler(pinstance); 5535 pmcraid_unregister_interrupt_handler(pinstance);
5113 5536
@@ -5129,7 +5552,7 @@ disable_device:
5129 5552
5130/** 5553/**
5131 * pmcraid_complete_ioa_reset - Called by either timer or tasklet during 5554 * pmcraid_complete_ioa_reset - Called by either timer or tasklet during
5132 * completion of the ioa reset 5555 * completion of the ioa reset
5133 * @cmd: pointer to reset command block 5556 * @cmd: pointer to reset command block
5134 */ 5557 */
5135static void pmcraid_complete_ioa_reset(struct pmcraid_cmd *cmd) 5558static void pmcraid_complete_ioa_reset(struct pmcraid_cmd *cmd)
@@ -5204,11 +5627,14 @@ static void pmcraid_init_res_table(struct pmcraid_cmd *cmd)
5204 struct pmcraid_config_table_entry *cfgte; 5627 struct pmcraid_config_table_entry *cfgte;
5205 unsigned long lock_flags; 5628 unsigned long lock_flags;
5206 int found, rc, i; 5629 int found, rc, i;
5630 u16 fw_version;
5207 LIST_HEAD(old_res); 5631 LIST_HEAD(old_res);
5208 5632
5209 if (pinstance->cfg_table->flags & MICROCODE_UPDATE_REQUIRED) 5633 if (pinstance->cfg_table->flags & MICROCODE_UPDATE_REQUIRED)
5210 pmcraid_err("IOA requires microcode download\n"); 5634 pmcraid_err("IOA requires microcode download\n");
5211 5635
5636 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
5637
5212 /* resource list is protected by pinstance->resource_lock. 5638 /* resource list is protected by pinstance->resource_lock.
5213 * init_res_table can be called from probe (user-thread) or runtime 5639 * init_res_table can be called from probe (user-thread) or runtime
5214 * reset (timer/tasklet) 5640 * reset (timer/tasklet)
@@ -5219,9 +5645,14 @@ static void pmcraid_init_res_table(struct pmcraid_cmd *cmd)
5219 list_move_tail(&res->queue, &old_res); 5645 list_move_tail(&res->queue, &old_res);
5220 5646
5221 for (i = 0; i < pinstance->cfg_table->num_entries; i++) { 5647 for (i = 0; i < pinstance->cfg_table->num_entries; i++) {
5222 cfgte = &pinstance->cfg_table->entries[i]; 5648 if (be16_to_cpu(pinstance->inq_data->fw_version) <=
5649 PMCRAID_FW_VERSION_1)
5650 cfgte = &pinstance->cfg_table->entries[i];
5651 else
5652 cfgte = (struct pmcraid_config_table_entry *)
5653 &pinstance->cfg_table->entries_ext[i];
5223 5654
5224 if (!pmcraid_expose_resource(cfgte)) 5655 if (!pmcraid_expose_resource(fw_version, cfgte))
5225 continue; 5656 continue;
5226 5657
5227 found = 0; 5658 found = 0;
@@ -5263,10 +5694,12 @@ static void pmcraid_init_res_table(struct pmcraid_cmd *cmd)
5263 */ 5694 */
5264 if (found) { 5695 if (found) {
5265 memcpy(&res->cfg_entry, cfgte, 5696 memcpy(&res->cfg_entry, cfgte,
5266 sizeof(struct pmcraid_config_table_entry)); 5697 pinstance->config_table_entry_size);
5267 pmcraid_info("New res type:%x, vset:%x, addr:%x:\n", 5698 pmcraid_info("New res type:%x, vset:%x, addr:%x:\n",
5268 res->cfg_entry.resource_type, 5699 res->cfg_entry.resource_type,
5269 res->cfg_entry.unique_flags1, 5700 (fw_version <= PMCRAID_FW_VERSION_1 ?
5701 res->cfg_entry.unique_flags1 :
5702 res->cfg_entry.array_id & 0xFF),
5270 le32_to_cpu(res->cfg_entry.resource_address)); 5703 le32_to_cpu(res->cfg_entry.resource_address));
5271 } 5704 }
5272 } 5705 }
@@ -5306,6 +5739,14 @@ static void pmcraid_querycfg(struct pmcraid_cmd *cmd)
5306 struct pmcraid_instance *pinstance = cmd->drv_inst; 5739 struct pmcraid_instance *pinstance = cmd->drv_inst;
5307 int cfg_table_size = cpu_to_be32(sizeof(struct pmcraid_config_table)); 5740 int cfg_table_size = cpu_to_be32(sizeof(struct pmcraid_config_table));
5308 5741
5742 if (be16_to_cpu(pinstance->inq_data->fw_version) <=
5743 PMCRAID_FW_VERSION_1)
5744 pinstance->config_table_entry_size =
5745 sizeof(struct pmcraid_config_table_entry);
5746 else
5747 pinstance->config_table_entry_size =
5748 sizeof(struct pmcraid_config_table_entry_ext);
5749
5309 ioarcb->request_type = REQ_TYPE_IOACMD; 5750 ioarcb->request_type = REQ_TYPE_IOACMD;
5310 ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE); 5751 ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
5311 5752
@@ -5338,7 +5779,7 @@ static void pmcraid_querycfg(struct pmcraid_cmd *cmd)
5338 5779
5339 5780
5340/** 5781/**
5341 * pmcraid_probe - PCI probe entry pointer for PMC MaxRaid controller driver 5782 * pmcraid_probe - PCI probe entry pointer for PMC MaxRAID controller driver
5342 * @pdev: pointer to pci device structure 5783 * @pdev: pointer to pci device structure
5343 * @dev_id: pointer to device ids structure 5784 * @dev_id: pointer to device ids structure
5344 * 5785 *
@@ -5485,7 +5926,7 @@ static int __devinit pmcraid_probe(
5485 */ 5926 */
5486 pmcraid_info("starting IOA initialization sequence\n"); 5927 pmcraid_info("starting IOA initialization sequence\n");
5487 if (pmcraid_reset_bringup(pinstance)) { 5928 if (pmcraid_reset_bringup(pinstance)) {
5488 dev_err(&pdev->dev, "couldn't initialize IOA \n"); 5929 dev_err(&pdev->dev, "couldn't initialize IOA\n");
5489 rc = 1; 5930 rc = 1;
5490 goto out_release_bufs; 5931 goto out_release_bufs;
5491 } 5932 }
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index b8ad07c3449e..6cfa0145a1d7 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -40,10 +40,12 @@
40 * Driver version: version string in major_version.minor_version.patch format 40 * Driver version: version string in major_version.minor_version.patch format
41 * Driver date : date information in "Mon dd yyyy" format 41 * Driver date : date information in "Mon dd yyyy" format
42 */ 42 */
43#define PMCRAID_DRIVER_NAME "PMC MaxRAID" 43#define PMCRAID_DRIVER_NAME "PMC MaxRAID"
44#define PMCRAID_DEVFILE "pmcsas" 44#define PMCRAID_DEVFILE "pmcsas"
45#define PMCRAID_DRIVER_VERSION "1.0.2" 45#define PMCRAID_DRIVER_VERSION "2.0.2"
46#define PMCRAID_DRIVER_DATE __DATE__ 46#define PMCRAID_DRIVER_DATE __DATE__
47
48#define PMCRAID_FW_VERSION_1 0x002
47 49
48/* Maximum number of adapters supported by current version of the driver */ 50/* Maximum number of adapters supported by current version of the driver */
49#define PMCRAID_MAX_ADAPTERS 1024 51#define PMCRAID_MAX_ADAPTERS 1024
@@ -85,17 +87,17 @@
85#define PMCRAID_IOARCB_ALIGNMENT 32 87#define PMCRAID_IOARCB_ALIGNMENT 32
86#define PMCRAID_IOADL_ALIGNMENT 16 88#define PMCRAID_IOADL_ALIGNMENT 16
87#define PMCRAID_IOASA_ALIGNMENT 4 89#define PMCRAID_IOASA_ALIGNMENT 4
88#define PMCRAID_NUM_MSIX_VECTORS 1 90#define PMCRAID_NUM_MSIX_VECTORS 16
89 91
90/* various other limits */ 92/* various other limits */
91#define PMCRAID_VENDOR_ID_LEN 8 93#define PMCRAID_VENDOR_ID_LEN 8
92#define PMCRAID_PRODUCT_ID_LEN 16 94#define PMCRAID_PRODUCT_ID_LEN 16
93#define PMCRAID_SERIAL_NUM_LEN 8 95#define PMCRAID_SERIAL_NUM_LEN 8
94#define PMCRAID_LUN_LEN 8 96#define PMCRAID_LUN_LEN 8
95#define PMCRAID_MAX_CDB_LEN 16 97#define PMCRAID_MAX_CDB_LEN 16
96#define PMCRAID_DEVICE_ID_LEN 8 98#define PMCRAID_DEVICE_ID_LEN 8
97#define PMCRAID_SENSE_DATA_LEN 256 99#define PMCRAID_SENSE_DATA_LEN 256
98#define PMCRAID_ADD_CMD_PARAM_LEN 48 100#define PMCRAID_ADD_CMD_PARAM_LEN 48
99 101
100#define PMCRAID_MAX_BUS_TO_SCAN 1 102#define PMCRAID_MAX_BUS_TO_SCAN 1
101#define PMCRAID_MAX_NUM_TARGETS_PER_BUS 256 103#define PMCRAID_MAX_NUM_TARGETS_PER_BUS 256
@@ -116,17 +118,10 @@
116#define PMCRAID_VSET_MAX_SECTORS 512 118#define PMCRAID_VSET_MAX_SECTORS 512
117#define PMCRAID_MAX_CMD_PER_LUN 254 119#define PMCRAID_MAX_CMD_PER_LUN 254
118 120
119/* Number of configuration table entries (resources) */ 121/* Number of configuration table entries (resources), includes 1 FP,
120#define PMCRAID_MAX_NUM_OF_VSETS 240 122 * 1 Enclosure device
121 123 */
122/* Todo : Check max limit for Phase 1 */ 124#define PMCRAID_MAX_RESOURCES 256
123#define PMCRAID_MAX_NUM_OF_PHY_DEVS 256
124
125/* MAX_NUM_OF_DEVS includes 1 FP, 1 Dummy Enclosure device */
126#define PMCRAID_MAX_NUM_OF_DEVS \
127 (PMCRAID_MAX_NUM_OF_VSETS + PMCRAID_MAX_NUM_OF_PHY_DEVS + 2)
128
129#define PMCRAID_MAX_RESOURCES PMCRAID_MAX_NUM_OF_DEVS
130 125
131/* Adapter Commands used by driver */ 126/* Adapter Commands used by driver */
132#define PMCRAID_QUERY_RESOURCE_STATE 0xC2 127#define PMCRAID_QUERY_RESOURCE_STATE 0xC2
@@ -177,6 +172,7 @@
177#define PMCRAID_IOASC_SENSE_STATUS(ioasc) ((ioasc) & 0x000000ff) 172#define PMCRAID_IOASC_SENSE_STATUS(ioasc) ((ioasc) & 0x000000ff)
178 173
179#define PMCRAID_IOASC_GOOD_COMPLETION 0x00000000 174#define PMCRAID_IOASC_GOOD_COMPLETION 0x00000000
175#define PMCRAID_IOASC_GC_IOARCB_NOTFOUND 0x005A0000
180#define PMCRAID_IOASC_NR_INIT_CMD_REQUIRED 0x02040200 176#define PMCRAID_IOASC_NR_INIT_CMD_REQUIRED 0x02040200
181#define PMCRAID_IOASC_NR_IOA_RESET_REQUIRED 0x02048000 177#define PMCRAID_IOASC_NR_IOA_RESET_REQUIRED 0x02048000
182#define PMCRAID_IOASC_NR_SYNC_REQUIRED 0x023F0000 178#define PMCRAID_IOASC_NR_SYNC_REQUIRED 0x023F0000
@@ -187,12 +183,12 @@
187#define PMCRAID_IOASC_HW_IOA_RESET_REQUIRED 0x04448600 183#define PMCRAID_IOASC_HW_IOA_RESET_REQUIRED 0x04448600
188#define PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE 0x05250000 184#define PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE 0x05250000
189#define PMCRAID_IOASC_AC_TERMINATED_BY_HOST 0x0B5A0000 185#define PMCRAID_IOASC_AC_TERMINATED_BY_HOST 0x0B5A0000
190#define PMCRAID_IOASC_UA_BUS_WAS_RESET 0x06290000 186#define PMCRAID_IOASC_UA_BUS_WAS_RESET 0x06290000
191#define PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER 0x06298000 187#define PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER 0x06298000
192 188
193/* Driver defined IOASCs */ 189/* Driver defined IOASCs */
194#define PMCRAID_IOASC_IOA_WAS_RESET 0x10000001 190#define PMCRAID_IOASC_IOA_WAS_RESET 0x10000001
195#define PMCRAID_IOASC_PCI_ACCESS_ERROR 0x10000002 191#define PMCRAID_IOASC_PCI_ACCESS_ERROR 0x10000002
196 192
197/* Various timeout values (in milliseconds) used. If any of these are chip 193/* Various timeout values (in milliseconds) used. If any of these are chip
198 * specific, move them to pmcraid_chip_details structure. 194 * specific, move them to pmcraid_chip_details structure.
@@ -336,6 +332,13 @@ struct pmcraid_config_table_entry {
336 __u8 lun[PMCRAID_LUN_LEN]; 332 __u8 lun[PMCRAID_LUN_LEN];
337} __attribute__((packed, aligned(4))); 333} __attribute__((packed, aligned(4)));
338 334
335/* extended configuration table sizes are of 64 bytes in size */
336#define PMCRAID_CFGTE_EXT_SIZE 32
337struct pmcraid_config_table_entry_ext {
338 struct pmcraid_config_table_entry cfgte;
339 __u8 cfgte_ext[PMCRAID_CFGTE_EXT_SIZE];
340};
341
339/* resource types (config_table_entry.resource_type values) */ 342/* resource types (config_table_entry.resource_type values) */
340#define RES_TYPE_AF_DASD 0x00 343#define RES_TYPE_AF_DASD 0x00
341#define RES_TYPE_GSCSI 0x01 344#define RES_TYPE_GSCSI 0x01
@@ -376,7 +379,12 @@ struct pmcraid_config_table {
376 __u8 reserved1; 379 __u8 reserved1;
377 __u8 flags; 380 __u8 flags;
378 __u8 reserved2[11]; 381 __u8 reserved2[11];
379 struct pmcraid_config_table_entry entries[PMCRAID_MAX_RESOURCES]; 382 union {
383 struct pmcraid_config_table_entry
384 entries[PMCRAID_MAX_RESOURCES];
385 struct pmcraid_config_table_entry_ext
386 entries_ext[PMCRAID_MAX_RESOURCES];
387 };
380} __attribute__((packed, aligned(4))); 388} __attribute__((packed, aligned(4)));
381 389
382/* config_table.flags value */ 390/* config_table.flags value */
@@ -385,7 +393,7 @@ struct pmcraid_config_table {
385/* 393/*
386 * HCAM format 394 * HCAM format
387 */ 395 */
388#define PMCRAID_HOSTRCB_LDNSIZE 4056 396#define PMCRAID_HOSTRCB_LDNSIZE 4056
389 397
390/* Error log notification format */ 398/* Error log notification format */
391struct pmcraid_hostrcb_error { 399struct pmcraid_hostrcb_error {
@@ -416,6 +424,15 @@ struct pmcraid_hcam_hdr {
416struct pmcraid_hcam_ccn { 424struct pmcraid_hcam_ccn {
417 struct pmcraid_hcam_hdr header; 425 struct pmcraid_hcam_hdr header;
418 struct pmcraid_config_table_entry cfg_entry; 426 struct pmcraid_config_table_entry cfg_entry;
427 struct pmcraid_config_table_entry cfg_entry_old;
428} __attribute__((packed, aligned(4)));
429
430#define PMCRAID_CCN_EXT_SIZE 3944
431struct pmcraid_hcam_ccn_ext {
432 struct pmcraid_hcam_hdr header;
433 struct pmcraid_config_table_entry_ext cfg_entry;
434 struct pmcraid_config_table_entry_ext cfg_entry_old;
435 __u8 reserved[PMCRAID_CCN_EXT_SIZE];
419} __attribute__((packed, aligned(4))); 436} __attribute__((packed, aligned(4)));
420 437
421struct pmcraid_hcam_ldn { 438struct pmcraid_hcam_ldn {
@@ -431,6 +448,8 @@ struct pmcraid_hcam_ldn {
431#define NOTIFICATION_TYPE_ENTRY_CHANGED 0x0 448#define NOTIFICATION_TYPE_ENTRY_CHANGED 0x0
432#define NOTIFICATION_TYPE_ENTRY_NEW 0x1 449#define NOTIFICATION_TYPE_ENTRY_NEW 0x1
433#define NOTIFICATION_TYPE_ENTRY_DELETED 0x2 450#define NOTIFICATION_TYPE_ENTRY_DELETED 0x2
451#define NOTIFICATION_TYPE_STATE_CHANGE 0x3
452#define NOTIFICATION_TYPE_ENTRY_STATECHANGED 0x4
434#define NOTIFICATION_TYPE_ERROR_LOG 0x10 453#define NOTIFICATION_TYPE_ERROR_LOG 0x10
435#define NOTIFICATION_TYPE_INFORMATION_LOG 0x11 454#define NOTIFICATION_TYPE_INFORMATION_LOG 0x11
436 455
@@ -460,6 +479,7 @@ struct pmcraid_chip_details {
460 unsigned long mailbox; 479 unsigned long mailbox;
461 unsigned long global_intr_mask; 480 unsigned long global_intr_mask;
462 unsigned long ioa_host_intr; 481 unsigned long ioa_host_intr;
482 unsigned long ioa_host_msix_intr;
463 unsigned long ioa_host_intr_clr; 483 unsigned long ioa_host_intr_clr;
464 unsigned long ioa_host_mask; 484 unsigned long ioa_host_mask;
465 unsigned long ioa_host_mask_clr; 485 unsigned long ioa_host_mask_clr;
@@ -482,6 +502,7 @@ struct pmcraid_chip_details {
482#define INTRS_IOA_PROCESSOR_ERROR PMC_BIT32(29) 502#define INTRS_IOA_PROCESSOR_ERROR PMC_BIT32(29)
483#define INTRS_HRRQ_VALID PMC_BIT32(30) 503#define INTRS_HRRQ_VALID PMC_BIT32(30)
484#define INTRS_OPERATIONAL_STATUS PMC_BIT32(0) 504#define INTRS_OPERATIONAL_STATUS PMC_BIT32(0)
505#define INTRS_ALLOW_MSIX_VECTOR0 PMC_BIT32(31)
485 506
486/* Host to IOA Doorbells */ 507/* Host to IOA Doorbells */
487#define DOORBELL_RUNTIME_RESET PMC_BIT32(1) 508#define DOORBELL_RUNTIME_RESET PMC_BIT32(1)
@@ -489,10 +510,12 @@ struct pmcraid_chip_details {
489#define DOORBELL_IOA_DEBUG_ALERT PMC_BIT32(9) 510#define DOORBELL_IOA_DEBUG_ALERT PMC_BIT32(9)
490#define DOORBELL_ENABLE_DESTRUCTIVE_DIAGS PMC_BIT32(8) 511#define DOORBELL_ENABLE_DESTRUCTIVE_DIAGS PMC_BIT32(8)
491#define DOORBELL_IOA_START_BIST PMC_BIT32(23) 512#define DOORBELL_IOA_START_BIST PMC_BIT32(23)
513#define DOORBELL_INTR_MODE_MSIX PMC_BIT32(25)
514#define DOORBELL_INTR_MSIX_CLR PMC_BIT32(26)
492#define DOORBELL_RESET_IOA PMC_BIT32(31) 515#define DOORBELL_RESET_IOA PMC_BIT32(31)
493 516
494/* Global interrupt mask register value */ 517/* Global interrupt mask register value */
495#define GLOBAL_INTERRUPT_MASK 0x4ULL 518#define GLOBAL_INTERRUPT_MASK 0x5ULL
496 519
497#define PMCRAID_ERROR_INTERRUPTS (INTRS_IOARCB_TRANSFER_FAILED | \ 520#define PMCRAID_ERROR_INTERRUPTS (INTRS_IOARCB_TRANSFER_FAILED | \
498 INTRS_IOA_UNIT_CHECK | \ 521 INTRS_IOA_UNIT_CHECK | \
@@ -503,8 +526,8 @@ struct pmcraid_chip_details {
503 526
504#define PMCRAID_PCI_INTERRUPTS (PMCRAID_ERROR_INTERRUPTS | \ 527#define PMCRAID_PCI_INTERRUPTS (PMCRAID_ERROR_INTERRUPTS | \
505 INTRS_HRRQ_VALID | \ 528 INTRS_HRRQ_VALID | \
506 INTRS_CRITICAL_OP_IN_PROGRESS |\ 529 INTRS_TRANSITION_TO_OPERATIONAL |\
507 INTRS_TRANSITION_TO_OPERATIONAL) 530 INTRS_ALLOW_MSIX_VECTOR0)
508 531
509/* control_block, associated with each of the commands contains IOARCB, IOADLs 532/* control_block, associated with each of the commands contains IOARCB, IOADLs
510 * memory for IOASA. Additional 3 * 16 bytes are allocated in order to support 533 * memory for IOASA. Additional 3 * 16 bytes are allocated in order to support
@@ -526,17 +549,24 @@ struct pmcraid_sglist {
526 struct scatterlist scatterlist[1]; 549 struct scatterlist scatterlist[1];
527}; 550};
528 551
552/* page D0 inquiry data of focal point resource */
553struct pmcraid_inquiry_data {
554 __u8 ph_dev_type;
555 __u8 page_code;
556 __u8 reserved1;
557 __u8 add_page_len;
558 __u8 length;
559 __u8 reserved2;
560 __le16 fw_version;
561 __u8 reserved3[16];
562};
563
529/* pmcraid_cmd - LLD representation of SCSI command */ 564/* pmcraid_cmd - LLD representation of SCSI command */
530struct pmcraid_cmd { 565struct pmcraid_cmd {
531 566
532 /* Ptr and bus address of DMA.able control block for this command */ 567 /* Ptr and bus address of DMA.able control block for this command */
533 struct pmcraid_control_block *ioa_cb; 568 struct pmcraid_control_block *ioa_cb;
534 dma_addr_t ioa_cb_bus_addr; 569 dma_addr_t ioa_cb_bus_addr;
535
536 /* sense buffer for REQUEST SENSE command if firmware is not sending
537 * auto sense data
538 */
539 dma_addr_t sense_buffer_dma;
540 dma_addr_t dma_handle; 570 dma_addr_t dma_handle;
541 u8 *sense_buffer; 571 u8 *sense_buffer;
542 572
@@ -556,11 +586,22 @@ struct pmcraid_cmd {
556 586
557 struct pmcraid_sglist *sglist; /* used for passthrough IOCTLs */ 587 struct pmcraid_sglist *sglist; /* used for passthrough IOCTLs */
558 588
559 /* scratch used during reset sequence */ 589 /* scratch used */
560 union { 590 union {
591 /* during reset sequence */
561 unsigned long time_left; 592 unsigned long time_left;
562 struct pmcraid_resource_entry *res; 593 struct pmcraid_resource_entry *res;
563 } u; 594 int hrrq_index;
595
596 /* used during IO command error handling. Sense buffer
597 * for REQUEST SENSE command if firmware is not sending
598 * auto sense data
599 */
600 struct {
601 u8 *sense_buffer;
602 dma_addr_t sense_buffer_dma;
603 };
604 };
564}; 605};
565 606
566/* 607/*
@@ -568,6 +609,7 @@ struct pmcraid_cmd {
568 */ 609 */
569struct pmcraid_interrupts { 610struct pmcraid_interrupts {
570 void __iomem *ioa_host_interrupt_reg; 611 void __iomem *ioa_host_interrupt_reg;
612 void __iomem *ioa_host_msix_interrupt_reg;
571 void __iomem *ioa_host_interrupt_clr_reg; 613 void __iomem *ioa_host_interrupt_clr_reg;
572 void __iomem *ioa_host_interrupt_mask_reg; 614 void __iomem *ioa_host_interrupt_mask_reg;
573 void __iomem *ioa_host_interrupt_mask_clr_reg; 615 void __iomem *ioa_host_interrupt_mask_clr_reg;
@@ -578,11 +620,12 @@ struct pmcraid_interrupts {
578 620
579/* ISR parameters LLD allocates (one for each MSI-X if enabled) vectors */ 621/* ISR parameters LLD allocates (one for each MSI-X if enabled) vectors */
580struct pmcraid_isr_param { 622struct pmcraid_isr_param {
581 u8 hrrq_id; /* hrrq entry index */
582 u16 vector; /* allocated msi-x vector */
583 struct pmcraid_instance *drv_inst; 623 struct pmcraid_instance *drv_inst;
624 u16 vector; /* allocated msi-x vector */
625 u8 hrrq_id; /* hrrq entry index */
584}; 626};
585 627
628
586/* AEN message header sent as part of event data to applications */ 629/* AEN message header sent as part of event data to applications */
587struct pmcraid_aen_msg { 630struct pmcraid_aen_msg {
588 u32 hostno; 631 u32 hostno;
@@ -591,6 +634,19 @@ struct pmcraid_aen_msg {
591 u8 data[0]; 634 u8 data[0];
592}; 635};
593 636
637/* Controller state event message type */
638struct pmcraid_state_msg {
639 struct pmcraid_aen_msg msg;
640 u32 ioa_state;
641};
642
643#define PMC_DEVICE_EVENT_RESET_START 0x11000000
644#define PMC_DEVICE_EVENT_RESET_SUCCESS 0x11000001
645#define PMC_DEVICE_EVENT_RESET_FAILED 0x11000002
646#define PMC_DEVICE_EVENT_SHUTDOWN_START 0x11000003
647#define PMC_DEVICE_EVENT_SHUTDOWN_SUCCESS 0x11000004
648#define PMC_DEVICE_EVENT_SHUTDOWN_FAILED 0x11000005
649
594struct pmcraid_hostrcb { 650struct pmcraid_hostrcb {
595 struct pmcraid_instance *drv_inst; 651 struct pmcraid_instance *drv_inst;
596 struct pmcraid_aen_msg *msg; 652 struct pmcraid_aen_msg *msg;
@@ -628,6 +684,7 @@ struct pmcraid_instance {
628 /* HostRCBs needed for HCAM */ 684 /* HostRCBs needed for HCAM */
629 struct pmcraid_hostrcb ldn; 685 struct pmcraid_hostrcb ldn;
630 struct pmcraid_hostrcb ccn; 686 struct pmcraid_hostrcb ccn;
687 struct pmcraid_state_msg scn; /* controller state change msg */
631 688
632 689
633 /* Bus address of start of HRRQ */ 690 /* Bus address of start of HRRQ */
@@ -645,12 +702,15 @@ struct pmcraid_instance {
645 /* Lock for HRRQ access */ 702 /* Lock for HRRQ access */
646 spinlock_t hrrq_lock[PMCRAID_NUM_MSIX_VECTORS]; 703 spinlock_t hrrq_lock[PMCRAID_NUM_MSIX_VECTORS];
647 704
705 struct pmcraid_inquiry_data *inq_data;
706 dma_addr_t inq_data_baddr;
707
708 /* size of configuration table entry, varies based on the firmware */
709 u32 config_table_entry_size;
710
648 /* Expected toggle bit at host */ 711 /* Expected toggle bit at host */
649 u8 host_toggle_bit[PMCRAID_NUM_MSIX_VECTORS]; 712 u8 host_toggle_bit[PMCRAID_NUM_MSIX_VECTORS];
650 713
651 /* No of Reset IOA retries . IOA marked dead if threshold exceeds */
652 u8 ioa_reset_attempts;
653#define PMCRAID_RESET_ATTEMPTS 3
654 714
655 /* Wait Q for threads to wait for Reset IOA completion */ 715 /* Wait Q for threads to wait for Reset IOA completion */
656 wait_queue_head_t reset_wait_q; 716 wait_queue_head_t reset_wait_q;
@@ -664,14 +724,22 @@ struct pmcraid_instance {
664 struct Scsi_Host *host; /* mid layer interface structure handle */ 724 struct Scsi_Host *host; /* mid layer interface structure handle */
665 struct pci_dev *pdev; /* PCI device structure handle */ 725 struct pci_dev *pdev; /* PCI device structure handle */
666 726
727 /* No of Reset IOA retries . IOA marked dead if threshold exceeds */
728 u8 ioa_reset_attempts;
729#define PMCRAID_RESET_ATTEMPTS 3
730
667 u8 current_log_level; /* default level for logging IOASC errors */ 731 u8 current_log_level; /* default level for logging IOASC errors */
668 732
669 u8 num_hrrq; /* Number of interrupt vectors allocated */ 733 u8 num_hrrq; /* Number of interrupt vectors allocated */
734 u8 interrupt_mode; /* current interrupt mode legacy or msix */
670 dev_t dev; /* Major-Minor numbers for Char device */ 735 dev_t dev; /* Major-Minor numbers for Char device */
671 736
672 /* Used as ISR handler argument */ 737 /* Used as ISR handler argument */
673 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS]; 738 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
674 739
740 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
741 atomic_t last_message_id;
742
675 /* configuration table */ 743 /* configuration table */
676 struct pmcraid_config_table *cfg_table; 744 struct pmcraid_config_table *cfg_table;
677 dma_addr_t cfg_table_bus_addr; 745 dma_addr_t cfg_table_bus_addr;
@@ -686,8 +754,14 @@ struct pmcraid_instance {
686 754
687 struct list_head free_cmd_pool; 755 struct list_head free_cmd_pool;
688 struct list_head pending_cmd_pool; 756 struct list_head pending_cmd_pool;
689 spinlock_t free_pool_lock; /* free pool lock */ 757 spinlock_t free_pool_lock; /* free pool lock */
690 spinlock_t pending_pool_lock; /* pending pool lock */ 758 spinlock_t pending_pool_lock; /* pending pool lock */
759
760 /* Tasklet to handle deferred processing */
761 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
762
763 /* Work-queue (Shared) for deferred reset processing */
764 struct work_struct worker_q;
691 765
692 /* No of IO commands pending with FW */ 766 /* No of IO commands pending with FW */
693 atomic_t outstanding_cmds; 767 atomic_t outstanding_cmds;
@@ -695,11 +769,6 @@ struct pmcraid_instance {
695 /* should add/delete resources to mid-layer now ?*/ 769 /* should add/delete resources to mid-layer now ?*/
696 atomic_t expose_resources; 770 atomic_t expose_resources;
697 771
698 /* Tasklet to handle deferred processing */
699 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
700
701 /* Work-queue (Shared) for deferred reset processing */
702 struct work_struct worker_q;
703 772
704 773
705 u32 ioa_state:4; /* For IOA Reset sequence FSM */ 774 u32 ioa_state:4; /* For IOA Reset sequence FSM */
@@ -728,7 +797,10 @@ struct pmcraid_instance {
728/* LLD maintained resource entry structure */ 797/* LLD maintained resource entry structure */
729struct pmcraid_resource_entry { 798struct pmcraid_resource_entry {
730 struct list_head queue; /* link to "to be exposed" resources */ 799 struct list_head queue; /* link to "to be exposed" resources */
731 struct pmcraid_config_table_entry cfg_entry; 800 union {
801 struct pmcraid_config_table_entry cfg_entry;
802 struct pmcraid_config_table_entry_ext cfg_entry_ext;
803 };
732 struct scsi_device *scsi_dev; /* Link scsi_device structure */ 804 struct scsi_device *scsi_dev; /* Link scsi_device structure */
733 atomic_t read_failures; /* count of failed READ commands */ 805 atomic_t read_failures; /* count of failed READ commands */
734 atomic_t write_failures; /* count of failed WRITE commands */ 806 atomic_t write_failures; /* count of failed WRITE commands */
@@ -771,73 +843,75 @@ struct pmcraid_ioasc_error {
771 * statically. 843 * statically.
772 */ 844 */
773static struct pmcraid_ioasc_error pmcraid_ioasc_error_table[] = { 845static struct pmcraid_ioasc_error pmcraid_ioasc_error_table[] = {
774 {0x01180600, IOASC_LOG_LEVEL_MUST, 846 {0x01180600, IOASC_LOG_LEVEL_HARD,
775 "Recovered Error, soft media error, sector reassignment suggested"}, 847 "Recovered Error, soft media error, sector reassignment suggested"},
776 {0x015D0000, IOASC_LOG_LEVEL_MUST, 848 {0x015D0000, IOASC_LOG_LEVEL_HARD,
777 "Recovered Error, failure prediction threshold exceeded"}, 849 "Recovered Error, failure prediction thresold exceeded"},
778 {0x015D9200, IOASC_LOG_LEVEL_MUST, 850 {0x015D9200, IOASC_LOG_LEVEL_HARD,
779 "Recovered Error, soft Cache Card Battery error threshold"}, 851 "Recovered Error, soft Cache Card Battery error thresold"},
780 {0x015D9200, IOASC_LOG_LEVEL_MUST, 852 {0x015D9200, IOASC_LOG_LEVEL_HARD,
781 "Recovered Error, soft Cache Card Battery error threshold"}, 853 "Recovered Error, soft Cache Card Battery error thresold"},
782 {0x02048000, IOASC_LOG_LEVEL_MUST, 854 {0x02048000, IOASC_LOG_LEVEL_HARD,
783 "Not Ready, IOA Reset Required"}, 855 "Not Ready, IOA Reset Required"},
784 {0x02408500, IOASC_LOG_LEVEL_MUST, 856 {0x02408500, IOASC_LOG_LEVEL_HARD,
785 "Not Ready, IOA microcode download required"}, 857 "Not Ready, IOA microcode download required"},
786 {0x03110B00, IOASC_LOG_LEVEL_MUST, 858 {0x03110B00, IOASC_LOG_LEVEL_HARD,
787 "Medium Error, data unreadable, reassignment suggested"}, 859 "Medium Error, data unreadable, reassignment suggested"},
788 {0x03110C00, IOASC_LOG_LEVEL_MUST, 860 {0x03110C00, IOASC_LOG_LEVEL_MUST,
789 "Medium Error, data unreadable do not reassign"}, 861 "Medium Error, data unreadable do not reassign"},
790 {0x03310000, IOASC_LOG_LEVEL_MUST, 862 {0x03310000, IOASC_LOG_LEVEL_HARD,
791 "Medium Error, media corrupted"}, 863 "Medium Error, media corrupted"},
792 {0x04050000, IOASC_LOG_LEVEL_MUST, 864 {0x04050000, IOASC_LOG_LEVEL_HARD,
793 "Hardware Error, IOA can't communicate with device"}, 865 "Hardware Error, IOA can't communicate with device"},
794 {0x04080000, IOASC_LOG_LEVEL_MUST, 866 {0x04080000, IOASC_LOG_LEVEL_MUST,
795 "Hardware Error, device bus error"}, 867 "Hardware Error, device bus error"},
796 {0x04080000, IOASC_LOG_LEVEL_MUST, 868 {0x04088000, IOASC_LOG_LEVEL_MUST,
797 "Hardware Error, device bus is not functioning"}, 869 "Hardware Error, device bus is not functioning"},
798 {0x04118000, IOASC_LOG_LEVEL_MUST, 870 {0x04118000, IOASC_LOG_LEVEL_HARD,
799 "Hardware Error, IOA reserved area data check"}, 871 "Hardware Error, IOA reserved area data check"},
800 {0x04118100, IOASC_LOG_LEVEL_MUST, 872 {0x04118100, IOASC_LOG_LEVEL_HARD,
801 "Hardware Error, IOA reserved area invalid data pattern"}, 873 "Hardware Error, IOA reserved area invalid data pattern"},
802 {0x04118200, IOASC_LOG_LEVEL_MUST, 874 {0x04118200, IOASC_LOG_LEVEL_HARD,
803 "Hardware Error, IOA reserved area LRC error"}, 875 "Hardware Error, IOA reserved area LRC error"},
804 {0x04320000, IOASC_LOG_LEVEL_MUST, 876 {0x04320000, IOASC_LOG_LEVEL_HARD,
805 "Hardware Error, reassignment space exhausted"}, 877 "Hardware Error, reassignment space exhausted"},
806 {0x04330000, IOASC_LOG_LEVEL_MUST, 878 {0x04330000, IOASC_LOG_LEVEL_HARD,
807 "Hardware Error, data transfer underlength error"}, 879 "Hardware Error, data transfer underlength error"},
808 {0x04330000, IOASC_LOG_LEVEL_MUST, 880 {0x04330000, IOASC_LOG_LEVEL_HARD,
809 "Hardware Error, data transfer overlength error"}, 881 "Hardware Error, data transfer overlength error"},
810 {0x04418000, IOASC_LOG_LEVEL_MUST, 882 {0x04418000, IOASC_LOG_LEVEL_MUST,
811 "Hardware Error, PCI bus error"}, 883 "Hardware Error, PCI bus error"},
812 {0x04440000, IOASC_LOG_LEVEL_MUST, 884 {0x04440000, IOASC_LOG_LEVEL_HARD,
813 "Hardware Error, device error"}, 885 "Hardware Error, device error"},
814 {0x04448300, IOASC_LOG_LEVEL_MUST, 886 {0x04448200, IOASC_LOG_LEVEL_MUST,
887 "Hardware Error, IOA error"},
888 {0x04448300, IOASC_LOG_LEVEL_HARD,
815 "Hardware Error, undefined device response"}, 889 "Hardware Error, undefined device response"},
816 {0x04448400, IOASC_LOG_LEVEL_MUST, 890 {0x04448400, IOASC_LOG_LEVEL_HARD,
817 "Hardware Error, IOA microcode error"}, 891 "Hardware Error, IOA microcode error"},
818 {0x04448600, IOASC_LOG_LEVEL_MUST, 892 {0x04448600, IOASC_LOG_LEVEL_HARD,
819 "Hardware Error, IOA reset required"}, 893 "Hardware Error, IOA reset required"},
820 {0x04449200, IOASC_LOG_LEVEL_MUST, 894 {0x04449200, IOASC_LOG_LEVEL_HARD,
821 "Hardware Error, hard Cache Fearuee Card Battery error"}, 895 "Hardware Error, hard Cache Fearuee Card Battery error"},
822 {0x0444A000, IOASC_LOG_LEVEL_MUST, 896 {0x0444A000, IOASC_LOG_LEVEL_HARD,
823 "Hardware Error, failed device altered"}, 897 "Hardware Error, failed device altered"},
824 {0x0444A200, IOASC_LOG_LEVEL_MUST, 898 {0x0444A200, IOASC_LOG_LEVEL_HARD,
825 "Hardware Error, data check after reassignment"}, 899 "Hardware Error, data check after reassignment"},
826 {0x0444A300, IOASC_LOG_LEVEL_MUST, 900 {0x0444A300, IOASC_LOG_LEVEL_HARD,
827 "Hardware Error, LRC error after reassignment"}, 901 "Hardware Error, LRC error after reassignment"},
828 {0x044A0000, IOASC_LOG_LEVEL_MUST, 902 {0x044A0000, IOASC_LOG_LEVEL_HARD,
829 "Hardware Error, device bus error (msg/cmd phase)"}, 903 "Hardware Error, device bus error (msg/cmd phase)"},
830 {0x04670400, IOASC_LOG_LEVEL_MUST, 904 {0x04670400, IOASC_LOG_LEVEL_HARD,
831 "Hardware Error, new device can't be used"}, 905 "Hardware Error, new device can't be used"},
832 {0x04678000, IOASC_LOG_LEVEL_MUST, 906 {0x04678000, IOASC_LOG_LEVEL_HARD,
833 "Hardware Error, invalid multiadapter configuration"}, 907 "Hardware Error, invalid multiadapter configuration"},
834 {0x04678100, IOASC_LOG_LEVEL_MUST, 908 {0x04678100, IOASC_LOG_LEVEL_HARD,
835 "Hardware Error, incorrect connection between enclosures"}, 909 "Hardware Error, incorrect connection between enclosures"},
836 {0x04678200, IOASC_LOG_LEVEL_MUST, 910 {0x04678200, IOASC_LOG_LEVEL_HARD,
837 "Hardware Error, connections exceed IOA design limits"}, 911 "Hardware Error, connections exceed IOA design limits"},
838 {0x04678300, IOASC_LOG_LEVEL_MUST, 912 {0x04678300, IOASC_LOG_LEVEL_HARD,
839 "Hardware Error, incorrect multipath connection"}, 913 "Hardware Error, incorrect multipath connection"},
840 {0x04679000, IOASC_LOG_LEVEL_MUST, 914 {0x04679000, IOASC_LOG_LEVEL_HARD,
841 "Hardware Error, command to LUN failed"}, 915 "Hardware Error, command to LUN failed"},
842 {0x064C8000, IOASC_LOG_LEVEL_HARD, 916 {0x064C8000, IOASC_LOG_LEVEL_HARD,
843 "Unit Attention, cache exists for missing/failed device"}, 917 "Unit Attention, cache exists for missing/failed device"},
@@ -845,15 +919,15 @@ static struct pmcraid_ioasc_error pmcraid_ioasc_error_table[] = {
845 "Unit Attention, incompatible exposed mode device"}, 919 "Unit Attention, incompatible exposed mode device"},
846 {0x06670600, IOASC_LOG_LEVEL_HARD, 920 {0x06670600, IOASC_LOG_LEVEL_HARD,
847 "Unit Attention, attachment of logical unit failed"}, 921 "Unit Attention, attachment of logical unit failed"},
848 {0x06678000, IOASC_LOG_LEVEL_MUST, 922 {0x06678000, IOASC_LOG_LEVEL_HARD,
849 "Unit Attention, cables exceed connective design limit"}, 923 "Unit Attention, cables exceed connective design limit"},
850 {0x06678300, IOASC_LOG_LEVEL_MUST, 924 {0x06678300, IOASC_LOG_LEVEL_HARD,
851 "Unit Attention, incomplete multipath connection between" \ 925 "Unit Attention, incomplete multipath connection between" \
852 "IOA and enclosure"}, 926 "IOA and enclosure"},
853 {0x06678400, IOASC_LOG_LEVEL_MUST, 927 {0x06678400, IOASC_LOG_LEVEL_HARD,
854 "Unit Attention, incomplete multipath connection between" \ 928 "Unit Attention, incomplete multipath connection between" \
855 "device and enclosure"}, 929 "device and enclosure"},
856 {0x06678500, IOASC_LOG_LEVEL_MUST, 930 {0x06678500, IOASC_LOG_LEVEL_HARD,
857 "Unit Attention, incomplete multipath connection between" \ 931 "Unit Attention, incomplete multipath connection between" \
858 "IOA and remote IOA"}, 932 "IOA and remote IOA"},
859 {0x06678600, IOASC_LOG_LEVEL_HARD, 933 {0x06678600, IOASC_LOG_LEVEL_HARD,
@@ -863,11 +937,11 @@ static struct pmcraid_ioasc_error pmcraid_ioasc_error_table[] = {
863 "function"}, 937 "function"},
864 {0x06698200, IOASC_LOG_LEVEL_HARD, 938 {0x06698200, IOASC_LOG_LEVEL_HARD,
865 "Unit Attention, corrupt array parity detected on device"}, 939 "Unit Attention, corrupt array parity detected on device"},
866 {0x066B0200, IOASC_LOG_LEVEL_MUST, 940 {0x066B0200, IOASC_LOG_LEVEL_HARD,
867 "Unit Attention, array exposed"}, 941 "Unit Attention, array exposed"},
868 {0x066B8200, IOASC_LOG_LEVEL_HARD, 942 {0x066B8200, IOASC_LOG_LEVEL_HARD,
869 "Unit Attention, exposed array is still protected"}, 943 "Unit Attention, exposed array is still protected"},
870 {0x066B9200, IOASC_LOG_LEVEL_MUST, 944 {0x066B9200, IOASC_LOG_LEVEL_HARD,
871 "Unit Attention, Multipath redundancy level got worse"}, 945 "Unit Attention, Multipath redundancy level got worse"},
872 {0x07270000, IOASC_LOG_LEVEL_HARD, 946 {0x07270000, IOASC_LOG_LEVEL_HARD,
873 "Data Protect, device is read/write protected by IOA"}, 947 "Data Protect, device is read/write protected by IOA"},
@@ -875,37 +949,37 @@ static struct pmcraid_ioasc_error pmcraid_ioasc_error_table[] = {
875 "Data Protect, IOA doesn't support device attribute"}, 949 "Data Protect, IOA doesn't support device attribute"},
876 {0x07278100, IOASC_LOG_LEVEL_HARD, 950 {0x07278100, IOASC_LOG_LEVEL_HARD,
877 "Data Protect, NVRAM mirroring prohibited"}, 951 "Data Protect, NVRAM mirroring prohibited"},
878 {0x07278400, IOASC_LOG_LEVEL_MUST, 952 {0x07278400, IOASC_LOG_LEVEL_HARD,
879 "Data Protect, array is short 2 or more devices"}, 953 "Data Protect, array is short 2 or more devices"},
880 {0x07278600, IOASC_LOG_LEVEL_MUST, 954 {0x07278600, IOASC_LOG_LEVEL_HARD,
881 "Data Protect, exposed array is short a required device"}, 955 "Data Protect, exposed array is short a required device"},
882 {0x07278700, IOASC_LOG_LEVEL_MUST, 956 {0x07278700, IOASC_LOG_LEVEL_HARD,
883 "Data Protect, array members not at required addresses"}, 957 "Data Protect, array members not at required addresses"},
884 {0x07278800, IOASC_LOG_LEVEL_MUST, 958 {0x07278800, IOASC_LOG_LEVEL_HARD,
885 "Data Protect, exposed mode device resource address conflict"}, 959 "Data Protect, exposed mode device resource address conflict"},
886 {0x07278900, IOASC_LOG_LEVEL_MUST, 960 {0x07278900, IOASC_LOG_LEVEL_HARD,
887 "Data Protect, incorrect resource address of exposed mode device"}, 961 "Data Protect, incorrect resource address of exposed mode device"},
888 {0x07278A00, IOASC_LOG_LEVEL_MUST, 962 {0x07278A00, IOASC_LOG_LEVEL_HARD,
889 "Data Protect, Array is missing a device and parity is out of sync"}, 963 "Data Protect, Array is missing a device and parity is out of sync"},
890 {0x07278B00, IOASC_LOG_LEVEL_MUST, 964 {0x07278B00, IOASC_LOG_LEVEL_HARD,
891 "Data Protect, maximum number of arrays already exist"}, 965 "Data Protect, maximum number of arrays already exist"},
892 {0x07278C00, IOASC_LOG_LEVEL_HARD, 966 {0x07278C00, IOASC_LOG_LEVEL_HARD,
893 "Data Protect, cannot locate cache data for device"}, 967 "Data Protect, cannot locate cache data for device"},
894 {0x07278D00, IOASC_LOG_LEVEL_HARD, 968 {0x07278D00, IOASC_LOG_LEVEL_HARD,
895 "Data Protect, cache data exits for a changed device"}, 969 "Data Protect, cache data exits for a changed device"},
896 {0x07279100, IOASC_LOG_LEVEL_MUST, 970 {0x07279100, IOASC_LOG_LEVEL_HARD,
897 "Data Protect, detection of a device requiring format"}, 971 "Data Protect, detection of a device requiring format"},
898 {0x07279200, IOASC_LOG_LEVEL_MUST, 972 {0x07279200, IOASC_LOG_LEVEL_HARD,
899 "Data Protect, IOA exceeds maximum number of devices"}, 973 "Data Protect, IOA exceeds maximum number of devices"},
900 {0x07279600, IOASC_LOG_LEVEL_MUST, 974 {0x07279600, IOASC_LOG_LEVEL_HARD,
901 "Data Protect, missing array, volume set is not functional"}, 975 "Data Protect, missing array, volume set is not functional"},
902 {0x07279700, IOASC_LOG_LEVEL_MUST, 976 {0x07279700, IOASC_LOG_LEVEL_HARD,
903 "Data Protect, single device for a volume set"}, 977 "Data Protect, single device for a volume set"},
904 {0x07279800, IOASC_LOG_LEVEL_MUST, 978 {0x07279800, IOASC_LOG_LEVEL_HARD,
905 "Data Protect, missing multiple devices for a volume set"}, 979 "Data Protect, missing multiple devices for a volume set"},
906 {0x07279900, IOASC_LOG_LEVEL_HARD, 980 {0x07279900, IOASC_LOG_LEVEL_HARD,
907 "Data Protect, maximum number of volument sets already exists"}, 981 "Data Protect, maximum number of volument sets already exists"},
908 {0x07279A00, IOASC_LOG_LEVEL_MUST, 982 {0x07279A00, IOASC_LOG_LEVEL_HARD,
909 "Data Protect, other volume set problem"}, 983 "Data Protect, other volume set problem"},
910}; 984};
911 985
@@ -952,27 +1026,6 @@ struct pmcraid_ioctl_header {
952 1026
953#define PMCRAID_IOCTL_SIGNATURE "PMCRAID" 1027#define PMCRAID_IOCTL_SIGNATURE "PMCRAID"
954 1028
955
956/*
957 * pmcraid_event_details - defines AEN details that apps can retrieve from LLD
958 *
959 * .rcb_ccn - complete RCB of CCN
960 * .rcb_ldn - complete RCB of CCN
961 */
962struct pmcraid_event_details {
963 struct pmcraid_hcam_ccn rcb_ccn;
964 struct pmcraid_hcam_ldn rcb_ldn;
965};
966
967/*
968 * pmcraid_driver_ioctl_buffer - structure passed as argument to most of the
969 * PMC driver handled ioctls.
970 */
971struct pmcraid_driver_ioctl_buffer {
972 struct pmcraid_ioctl_header ioctl_header;
973 struct pmcraid_event_details event_details;
974};
975
976/* 1029/*
977 * pmcraid_passthrough_ioctl_buffer - structure given as argument to 1030 * pmcraid_passthrough_ioctl_buffer - structure given as argument to
978 * passthrough(or firmware handled) IOCTL commands. Note that ioarcb requires 1031 * passthrough(or firmware handled) IOCTL commands. Note that ioarcb requires
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 1e4cafabba15..420238cc794e 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -1187,6 +1187,21 @@ qla2x00_optrom_fw_version_show(struct device *dev,
1187} 1187}
1188 1188
1189static ssize_t 1189static ssize_t
1190qla2x00_optrom_gold_fw_version_show(struct device *dev,
1191 struct device_attribute *attr, char *buf)
1192{
1193 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1194 struct qla_hw_data *ha = vha->hw;
1195
1196 if (!IS_QLA81XX(ha))
1197 return snprintf(buf, PAGE_SIZE, "\n");
1198
1199 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
1200 ha->gold_fw_version[0], ha->gold_fw_version[1],
1201 ha->gold_fw_version[2], ha->gold_fw_version[3]);
1202}
1203
1204static ssize_t
1190qla2x00_total_isp_aborts_show(struct device *dev, 1205qla2x00_total_isp_aborts_show(struct device *dev,
1191 struct device_attribute *attr, char *buf) 1206 struct device_attribute *attr, char *buf)
1192{ 1207{
@@ -1208,7 +1223,7 @@ qla24xx_84xx_fw_version_show(struct device *dev,
1208 if (!IS_QLA84XX(ha)) 1223 if (!IS_QLA84XX(ha))
1209 return snprintf(buf, PAGE_SIZE, "\n"); 1224 return snprintf(buf, PAGE_SIZE, "\n");
1210 1225
1211 if (ha->cs84xx && ha->cs84xx->op_fw_version == 0) 1226 if (ha->cs84xx->op_fw_version == 0)
1212 rval = qla84xx_verify_chip(vha, status); 1227 rval = qla84xx_verify_chip(vha, status);
1213 1228
1214 if ((rval == QLA_SUCCESS) && (status[0] == 0)) 1229 if ((rval == QLA_SUCCESS) && (status[0] == 0))
@@ -1336,6 +1351,8 @@ static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
1336 qla2x00_optrom_fcode_version_show, NULL); 1351 qla2x00_optrom_fcode_version_show, NULL);
1337static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show, 1352static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
1338 NULL); 1353 NULL);
1354static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO,
1355 qla2x00_optrom_gold_fw_version_show, NULL);
1339static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show, 1356static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
1340 NULL); 1357 NULL);
1341static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show, 1358static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
@@ -1376,6 +1393,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
1376 &dev_attr_vn_port_mac_address, 1393 &dev_attr_vn_port_mac_address,
1377 &dev_attr_fabric_param, 1394 &dev_attr_fabric_param,
1378 &dev_attr_fw_state, 1395 &dev_attr_fw_state,
1396 &dev_attr_optrom_gold_fw_version,
1379 NULL, 1397 NULL,
1380}; 1398};
1381 1399
@@ -1732,7 +1750,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1732 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); 1750 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
1733 } 1751 }
1734 1752
1735 if (IS_QLA25XX(ha) && ql2xenabledif) { 1753 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
1736 if (ha->fw_attributes & BIT_4) { 1754 if (ha->fw_attributes & BIT_4) {
1737 vha->flags.difdix_supported = 1; 1755 vha->flags.difdix_supported = 1;
1738 DEBUG18(qla_printk(KERN_INFO, ha, 1756 DEBUG18(qla_printk(KERN_INFO, ha,
@@ -1740,8 +1758,10 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1740 " protection.\n")); 1758 " protection.\n"));
1741 scsi_host_set_prot(vha->host, 1759 scsi_host_set_prot(vha->host,
1742 SHOST_DIF_TYPE1_PROTECTION 1760 SHOST_DIF_TYPE1_PROTECTION
1761 | SHOST_DIF_TYPE2_PROTECTION
1743 | SHOST_DIF_TYPE3_PROTECTION 1762 | SHOST_DIF_TYPE3_PROTECTION
1744 | SHOST_DIX_TYPE1_PROTECTION 1763 | SHOST_DIX_TYPE1_PROTECTION
1764 | SHOST_DIX_TYPE2_PROTECTION
1745 | SHOST_DIX_TYPE3_PROTECTION); 1765 | SHOST_DIX_TYPE3_PROTECTION);
1746 scsi_host_set_guard(vha->host, SHOST_DIX_GUARD_CRC); 1766 scsi_host_set_guard(vha->host, SHOST_DIX_GUARD_CRC);
1747 } else 1767 } else
@@ -1809,7 +1829,6 @@ static int
1809qla24xx_vport_delete(struct fc_vport *fc_vport) 1829qla24xx_vport_delete(struct fc_vport *fc_vport)
1810{ 1830{
1811 scsi_qla_host_t *vha = fc_vport->dd_data; 1831 scsi_qla_host_t *vha = fc_vport->dd_data;
1812 fc_port_t *fcport, *tfcport;
1813 struct qla_hw_data *ha = vha->hw; 1832 struct qla_hw_data *ha = vha->hw;
1814 uint16_t id = vha->vp_idx; 1833 uint16_t id = vha->vp_idx;
1815 1834
@@ -1823,11 +1842,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1823 1842
1824 scsi_remove_host(vha->host); 1843 scsi_remove_host(vha->host);
1825 1844
1826 list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) { 1845 qla2x00_free_fcports(vha);
1827 list_del(&fcport->list);
1828 kfree(fcport);
1829 fcport = NULL;
1830 }
1831 1846
1832 qla24xx_deallocate_vp_id(vha); 1847 qla24xx_deallocate_vp_id(vha);
1833 1848
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index b905dfe5ea61..9067629817ea 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -41,13 +41,28 @@ qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
41 int i, ret, num_valid; 41 int i, ret, num_valid;
42 uint8_t *bcode; 42 uint8_t *bcode;
43 struct qla_fcp_prio_entry *pri_entry; 43 struct qla_fcp_prio_entry *pri_entry;
44 uint32_t *bcode_val_ptr, bcode_val;
44 45
45 ret = 1; 46 ret = 1;
46 num_valid = 0; 47 num_valid = 0;
47 bcode = (uint8_t *)pri_cfg; 48 bcode = (uint8_t *)pri_cfg;
49 bcode_val_ptr = (uint32_t *)pri_cfg;
50 bcode_val = (uint32_t)(*bcode_val_ptr);
48 51
49 if (bcode[0x0] != 'H' || bcode[0x1] != 'Q' || bcode[0x2] != 'O' || 52 if (bcode_val == 0xFFFFFFFF) {
50 bcode[0x3] != 'S') { 53 /* No FCP Priority config data in flash */
54 DEBUG2(printk(KERN_INFO
55 "%s: No FCP priority config data.\n",
56 __func__));
57 return 0;
58 }
59
60 if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
61 bcode[3] != 'S') {
62 /* Invalid FCP priority data header*/
63 DEBUG2(printk(KERN_ERR
64 "%s: Invalid FCP Priority data header. bcode=0x%x\n",
65 __func__, bcode_val));
51 return 0; 66 return 0;
52 } 67 }
53 if (flag != 1) 68 if (flag != 1)
@@ -60,8 +75,18 @@ qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
60 pri_entry++; 75 pri_entry++;
61 } 76 }
62 77
63 if (num_valid == 0) 78 if (num_valid == 0) {
79 /* No valid FCP priority data entries */
80 DEBUG2(printk(KERN_ERR
81 "%s: No valid FCP Priority data entries.\n",
82 __func__));
64 ret = 0; 83 ret = 0;
84 } else {
85 /* FCP priority data is valid */
86 DEBUG2(printk(KERN_INFO
87 "%s: Valid FCP priority data. num entries = %d\n",
88 __func__, num_valid));
89 }
65 90
66 return ret; 91 return ret;
67} 92}
@@ -78,6 +103,11 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
78 103
79 bsg_job->reply->reply_payload_rcv_len = 0; 104 bsg_job->reply->reply_payload_rcv_len = 0;
80 105
106 if (!IS_QLA24XX_TYPE(ha) || !IS_QLA25XX(ha)) {
107 ret = -EINVAL;
108 goto exit_fcp_prio_cfg;
109 }
110
81 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 111 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
82 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 112 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
83 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 113 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
@@ -199,7 +229,7 @@ static int
199qla2x00_process_els(struct fc_bsg_job *bsg_job) 229qla2x00_process_els(struct fc_bsg_job *bsg_job)
200{ 230{
201 struct fc_rport *rport; 231 struct fc_rport *rport;
202 fc_port_t *fcport; 232 fc_port_t *fcport = NULL;
203 struct Scsi_Host *host; 233 struct Scsi_Host *host;
204 scsi_qla_host_t *vha; 234 scsi_qla_host_t *vha;
205 struct qla_hw_data *ha; 235 struct qla_hw_data *ha;
@@ -210,6 +240,29 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
210 uint16_t nextlid = 0; 240 uint16_t nextlid = 0;
211 struct srb_ctx *els; 241 struct srb_ctx *els;
212 242
243 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
244 rport = bsg_job->rport;
245 fcport = *(fc_port_t **) rport->dd_data;
246 host = rport_to_shost(rport);
247 vha = shost_priv(host);
248 ha = vha->hw;
249 type = "FC_BSG_RPT_ELS";
250 } else {
251 host = bsg_job->shost;
252 vha = shost_priv(host);
253 ha = vha->hw;
254 type = "FC_BSG_HST_ELS_NOLOGIN";
255 }
256
257 /* pass through is supported only for ISP 4Gb or higher */
258 if (!IS_FWI2_CAPABLE(ha)) {
259 DEBUG2(qla_printk(KERN_INFO, ha,
260 "scsi(%ld):ELS passthru not supported for ISP23xx based "
261 "adapters\n", vha->host_no));
262 rval = -EPERM;
263 goto done;
264 }
265
213 /* Multiple SG's are not supported for ELS requests */ 266 /* Multiple SG's are not supported for ELS requests */
214 if (bsg_job->request_payload.sg_cnt > 1 || 267 if (bsg_job->request_payload.sg_cnt > 1 ||
215 bsg_job->reply_payload.sg_cnt > 1) { 268 bsg_job->reply_payload.sg_cnt > 1) {
@@ -224,13 +277,6 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
224 277
225 /* ELS request for rport */ 278 /* ELS request for rport */
226 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { 279 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
227 rport = bsg_job->rport;
228 fcport = *(fc_port_t **) rport->dd_data;
229 host = rport_to_shost(rport);
230 vha = shost_priv(host);
231 ha = vha->hw;
232 type = "FC_BSG_RPT_ELS";
233
234 /* make sure the rport is logged in, 280 /* make sure the rport is logged in,
235 * if not perform fabric login 281 * if not perform fabric login
236 */ 282 */
@@ -242,11 +288,6 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
242 goto done; 288 goto done;
243 } 289 }
244 } else { 290 } else {
245 host = bsg_job->shost;
246 vha = shost_priv(host);
247 ha = vha->hw;
248 type = "FC_BSG_HST_ELS_NOLOGIN";
249
250 /* Allocate a dummy fcport structure, since functions 291 /* Allocate a dummy fcport structure, since functions
251 * preparing the IOCB and mailbox command retrieves port 292 * preparing the IOCB and mailbox command retrieves port
252 * specific information from fcport structure. For Host based 293 * specific information from fcport structure. For Host based
@@ -366,15 +407,6 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
366 char *type = "FC_BSG_HST_CT"; 407 char *type = "FC_BSG_HST_CT";
367 struct srb_ctx *ct; 408 struct srb_ctx *ct;
368 409
369 /* pass through is supported only for ISP 4Gb or higher */
370 if (!IS_FWI2_CAPABLE(ha)) {
371 DEBUG2(qla_printk(KERN_INFO, ha,
372 "scsi(%ld):Firmware is not capable to support FC "
373 "CT pass thru\n", vha->host_no));
374 rval = -EPERM;
375 goto done;
376 }
377
378 req_sg_cnt = 410 req_sg_cnt =
379 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 411 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
380 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 412 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
@@ -483,6 +515,98 @@ done:
483 return rval; 515 return rval;
484} 516}
485 517
518/* Set the port configuration to enable the
519 * internal loopback on ISP81XX
520 */
521static inline int
522qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
523 uint16_t *new_config)
524{
525 int ret = 0;
526 int rval = 0;
527 struct qla_hw_data *ha = vha->hw;
528
529 if (!IS_QLA81XX(ha))
530 goto done_set_internal;
531
532 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
533 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
534
535 ha->notify_dcbx_comp = 1;
536 ret = qla81xx_set_port_config(vha, new_config);
537 if (ret != QLA_SUCCESS) {
538 DEBUG2(printk(KERN_ERR
539 "%s(%lu): Set port config failed\n",
540 __func__, vha->host_no));
541 ha->notify_dcbx_comp = 0;
542 rval = -EINVAL;
543 goto done_set_internal;
544 }
545
546 /* Wait for DCBX complete event */
547 if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
548 DEBUG2(qla_printk(KERN_WARNING, ha,
549 "State change notificaition not received.\n"));
550 } else
551 DEBUG2(qla_printk(KERN_INFO, ha,
552 "State change RECEIVED\n"));
553
554 ha->notify_dcbx_comp = 0;
555
556done_set_internal:
557 return rval;
558}
559
560/* Set the port configuration to disable the
561 * internal loopback on ISP81XX
562 */
563static inline int
564qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
565 int wait)
566{
567 int ret = 0;
568 int rval = 0;
569 uint16_t new_config[4];
570 struct qla_hw_data *ha = vha->hw;
571
572 if (!IS_QLA81XX(ha))
573 goto done_reset_internal;
574
575 memset(new_config, 0 , sizeof(new_config));
576 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
577 ENABLE_INTERNAL_LOOPBACK) {
578 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
579 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
580
581 ha->notify_dcbx_comp = wait;
582 ret = qla81xx_set_port_config(vha, new_config);
583 if (ret != QLA_SUCCESS) {
584 DEBUG2(printk(KERN_ERR
585 "%s(%lu): Set port config failed\n",
586 __func__, vha->host_no));
587 ha->notify_dcbx_comp = 0;
588 rval = -EINVAL;
589 goto done_reset_internal;
590 }
591
592 /* Wait for DCBX complete event */
593 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
594 (20 * HZ))) {
595 DEBUG2(qla_printk(KERN_WARNING, ha,
596 "State change notificaition not received.\n"));
597 ha->notify_dcbx_comp = 0;
598 rval = -EINVAL;
599 goto done_reset_internal;
600 } else
601 DEBUG2(qla_printk(KERN_INFO, ha,
602 "State change RECEIVED\n"));
603
604 ha->notify_dcbx_comp = 0;
605 }
606done_reset_internal:
607 return rval;
608}
609
486static int 610static int
487qla2x00_process_loopback(struct fc_bsg_job *bsg_job) 611qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
488{ 612{
@@ -494,6 +618,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
494 char *type; 618 char *type;
495 struct msg_echo_lb elreq; 619 struct msg_echo_lb elreq;
496 uint16_t response[MAILBOX_REGISTER_COUNT]; 620 uint16_t response[MAILBOX_REGISTER_COUNT];
621 uint16_t config[4], new_config[4];
497 uint8_t *fw_sts_ptr; 622 uint8_t *fw_sts_ptr;
498 uint8_t *req_data = NULL; 623 uint8_t *req_data = NULL;
499 dma_addr_t req_data_dma; 624 dma_addr_t req_data_dma;
@@ -568,29 +693,102 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
568 693
569 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 694 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
570 695
571 if (ha->current_topology != ISP_CFG_F) { 696 if ((ha->current_topology == ISP_CFG_F ||
572 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 697 (IS_QLA81XX(ha) &&
698 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
699 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
700 elreq.options == EXTERNAL_LOOPBACK) {
701 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
573 DEBUG2(qla_printk(KERN_INFO, ha, 702 DEBUG2(qla_printk(KERN_INFO, ha,
574 "scsi(%ld) bsg rqst type: %s\n", 703 "scsi(%ld) bsg rqst type: %s\n", vha->host_no, type));
575 vha->host_no, type)); 704 command_sent = INT_DEF_LB_ECHO_CMD;
576 705 rval = qla2x00_echo_test(vha, &elreq, response);
577 command_sent = INT_DEF_LB_LOOPBACK_CMD; 706 } else {
578 rval = qla2x00_loopback_test(vha, &elreq, response);
579 if (IS_QLA81XX(ha)) { 707 if (IS_QLA81XX(ha)) {
708 memset(config, 0, sizeof(config));
709 memset(new_config, 0, sizeof(new_config));
710 if (qla81xx_get_port_config(vha, config)) {
711 DEBUG2(printk(KERN_ERR
712 "%s(%lu): Get port config failed\n",
713 __func__, vha->host_no));
714 bsg_job->reply->reply_payload_rcv_len = 0;
715 bsg_job->reply->result = (DID_ERROR << 16);
716 rval = -EPERM;
717 goto done_free_dma_req;
718 }
719
720 if (elreq.options != EXTERNAL_LOOPBACK) {
721 DEBUG2(qla_printk(KERN_INFO, ha,
722 "Internal: current port config = %x\n",
723 config[0]));
724 if (qla81xx_set_internal_loopback(vha, config,
725 new_config)) {
726 bsg_job->reply->reply_payload_rcv_len =
727 0;
728 bsg_job->reply->result =
729 (DID_ERROR << 16);
730 rval = -EPERM;
731 goto done_free_dma_req;
732 }
733 } else {
734 /* For external loopback to work
735 * ensure internal loopback is disabled
736 */
737 if (qla81xx_reset_internal_loopback(vha,
738 config, 1)) {
739 bsg_job->reply->reply_payload_rcv_len =
740 0;
741 bsg_job->reply->result =
742 (DID_ERROR << 16);
743 rval = -EPERM;
744 goto done_free_dma_req;
745 }
746 }
747
748 type = "FC_BSG_HST_VENDOR_LOOPBACK";
749 DEBUG2(qla_printk(KERN_INFO, ha,
750 "scsi(%ld) bsg rqst type: %s\n",
751 vha->host_no, type));
752
753 command_sent = INT_DEF_LB_LOOPBACK_CMD;
754 rval = qla2x00_loopback_test(vha, &elreq, response);
755
756 if (new_config[1]) {
757 /* Revert back to original port config
758 * Also clear internal loopback
759 */
760 qla81xx_reset_internal_loopback(vha,
761 new_config, 0);
762 }
763
580 if (response[0] == MBS_COMMAND_ERROR && 764 if (response[0] == MBS_COMMAND_ERROR &&
581 response[1] == MBS_LB_RESET) { 765 response[1] == MBS_LB_RESET) {
582 DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing " 766 DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
583 "ISP\n", __func__, vha->host_no)); 767 "ISP\n", __func__, vha->host_no));
584 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 768 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
585 qla2xxx_wake_dpc(vha); 769 qla2xxx_wake_dpc(vha);
770 qla2x00_wait_for_chip_reset(vha);
771 /* Also reset the MPI */
772 if (qla81xx_restart_mpi_firmware(vha) !=
773 QLA_SUCCESS) {
774 qla_printk(KERN_INFO, ha,
775 "MPI reset failed for host%ld.\n",
776 vha->host_no);
777 }
778
779 bsg_job->reply->reply_payload_rcv_len = 0;
780 bsg_job->reply->result = (DID_ERROR << 16);
781 rval = -EIO;
782 goto done_free_dma_req;
586 } 783 }
784 } else {
785 type = "FC_BSG_HST_VENDOR_LOOPBACK";
786 DEBUG2(qla_printk(KERN_INFO, ha,
787 "scsi(%ld) bsg rqst type: %s\n",
788 vha->host_no, type));
789 command_sent = INT_DEF_LB_LOOPBACK_CMD;
790 rval = qla2x00_loopback_test(vha, &elreq, response);
587 } 791 }
588 } else {
589 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
590 DEBUG2(qla_printk(KERN_INFO, ha,
591 "scsi(%ld) bsg rqst type: %s\n", vha->host_no, type));
592 command_sent = INT_DEF_LB_ECHO_CMD;
593 rval = qla2x00_echo_test(vha, &elreq, response);
594 } 792 }
595 793
596 if (rval) { 794 if (rval) {
@@ -1056,6 +1254,20 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1056 return -EINVAL; 1254 return -EINVAL;
1057 } 1255 }
1058 1256
1257 if (fcport->loop_id == FC_NO_LOOP_ID) {
1258 DEBUG2(printk(KERN_ERR "%s(%ld): Invalid port loop id, "
1259 "loop_id = 0x%x\n",
1260 __func__, vha->host_no, fcport->loop_id));
1261 return -EINVAL;
1262 }
1263
1264 if (fcport->flags & FCF_LOGIN_NEEDED) {
1265 DEBUG2(printk(KERN_ERR "%s(%ld): Remote port not logged in, "
1266 "flags = 0x%x\n",
1267 __func__, vha->host_no, fcport->flags));
1268 return -EINVAL;
1269 }
1270
1059 if (port_param->mode) 1271 if (port_param->mode)
1060 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, 1272 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1061 port_param->speed, mb); 1273 port_param->speed, mb);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index 76ed92dd2ef2..cc7c52f87a11 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -19,6 +19,13 @@
19#define INT_DEF_LB_LOOPBACK_CMD 0 19#define INT_DEF_LB_LOOPBACK_CMD 0
20#define INT_DEF_LB_ECHO_CMD 1 20#define INT_DEF_LB_ECHO_CMD 1
21 21
22/* Loopback related definations */
23#define EXTERNAL_LOOPBACK 0xF2
24#define ENABLE_INTERNAL_LOOPBACK 0x02
25#define INTERNAL_LOOPBACK_MASK 0x000E
26#define MAX_ELS_FRAME_PAYLOAD 252
27#define ELS_OPCODE_BYTE 0x10
28
22/* BSG Vendor specific definations */ 29/* BSG Vendor specific definations */
23#define A84_ISSUE_WRITE_TYPE_CMD 0 30#define A84_ISSUE_WRITE_TYPE_CMD 0
24#define A84_ISSUE_READ_TYPE_CMD 1 31#define A84_ISSUE_READ_TYPE_CMD 1
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 2afc8a362f2c..096141148257 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 916c81f3f55d..6cfc28a25eb3 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 839610909018..3a432ea0c7a3 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -202,6 +202,7 @@ struct sd_dif_tuple {
202 * SCSI Request Block 202 * SCSI Request Block
203 */ 203 */
204typedef struct srb { 204typedef struct srb {
205 atomic_t ref_count;
205 struct fc_port *fcport; 206 struct fc_port *fcport;
206 uint32_t handle; 207 uint32_t handle;
207 208
@@ -249,16 +250,6 @@ struct srb_iocb {
249 uint32_t lun; 250 uint32_t lun;
250 uint32_t data; 251 uint32_t data;
251 } tmf; 252 } tmf;
252 struct {
253 /*
254 * values for modif field below are as
255 * defined in mrk_entry_24xx struct
256 * for the modifier field in qla_fw.h.
257 */
258 uint8_t modif;
259 uint16_t lun;
260 uint32_t data;
261 } marker;
262 } u; 253 } u;
263 254
264 struct timer_list timer; 255 struct timer_list timer;
@@ -276,7 +267,6 @@ struct srb_iocb {
276#define SRB_CT_CMD 5 267#define SRB_CT_CMD 5
277#define SRB_ADISC_CMD 6 268#define SRB_ADISC_CMD 6
278#define SRB_TM_CMD 7 269#define SRB_TM_CMD 7
279#define SRB_MARKER_CMD 8
280 270
281struct srb_ctx { 271struct srb_ctx {
282 uint16_t type; 272 uint16_t type;
@@ -713,6 +703,8 @@ typedef struct {
713#define MBC_SEND_RNFT_ELS 0x5e /* Send RNFT ELS request */ 703#define MBC_SEND_RNFT_ELS 0x5e /* Send RNFT ELS request */
714#define MBC_GET_LINK_PRIV_STATS 0x6d /* Get link & private data. */ 704#define MBC_GET_LINK_PRIV_STATS 0x6d /* Get link & private data. */
715#define MBC_SET_VENDOR_ID 0x76 /* Set Vendor ID. */ 705#define MBC_SET_VENDOR_ID 0x76 /* Set Vendor ID. */
706#define MBC_SET_PORT_CONFIG 0x122 /* Set port configuration */
707#define MBC_GET_PORT_CONFIG 0x123 /* Get port configuration */
716 708
717/* Firmware return data sizes */ 709/* Firmware return data sizes */
718#define FCAL_MAP_SIZE 128 710#define FCAL_MAP_SIZE 128
@@ -1660,8 +1652,14 @@ typedef struct {
1660 uint8_t port_name[WWN_SIZE]; 1652 uint8_t port_name[WWN_SIZE];
1661 uint8_t fabric_port_name[WWN_SIZE]; 1653 uint8_t fabric_port_name[WWN_SIZE];
1662 uint16_t fp_speed; 1654 uint16_t fp_speed;
1655 uint8_t fc4_type;
1663} sw_info_t; 1656} sw_info_t;
1664 1657
1658/* FCP-4 types */
1659#define FC4_TYPE_FCP_SCSI 0x08
1660#define FC4_TYPE_OTHER 0x0
1661#define FC4_TYPE_UNKNOWN 0xff
1662
1665/* 1663/*
1666 * Fibre channel port type. 1664 * Fibre channel port type.
1667 */ 1665 */
@@ -1705,6 +1703,7 @@ typedef struct fc_port {
1705 u32 supported_classes; 1703 u32 supported_classes;
1706 1704
1707 uint16_t vp_idx; 1705 uint16_t vp_idx;
1706 uint8_t fc4_type;
1708} fc_port_t; 1707} fc_port_t;
1709 1708
1710/* 1709/*
@@ -1787,6 +1786,9 @@ typedef struct fc_port {
1787#define GPSC_REQ_SIZE (16 + 8) 1786#define GPSC_REQ_SIZE (16 + 8)
1788#define GPSC_RSP_SIZE (16 + 2 + 2) 1787#define GPSC_RSP_SIZE (16 + 2 + 2)
1789 1788
1789#define GFF_ID_CMD 0x011F
1790#define GFF_ID_REQ_SIZE (16 + 4)
1791#define GFF_ID_RSP_SIZE (16 + 128)
1790 1792
1791/* 1793/*
1792 * HBA attribute types. 1794 * HBA attribute types.
@@ -1988,6 +1990,11 @@ struct ct_sns_req {
1988 struct { 1990 struct {
1989 uint8_t port_name[8]; 1991 uint8_t port_name[8];
1990 } gpsc; 1992 } gpsc;
1993
1994 struct {
1995 uint8_t reserved;
1996 uint8_t port_name[3];
1997 } gff_id;
1991 } req; 1998 } req;
1992}; 1999};
1993 2000
@@ -2060,6 +2067,11 @@ struct ct_sns_rsp {
2060 uint16_t speeds; 2067 uint16_t speeds;
2061 uint16_t speed; 2068 uint16_t speed;
2062 } gpsc; 2069 } gpsc;
2070
2071#define GFF_FCP_SCSI_OFFSET 7
2072 struct {
2073 uint8_t fc4_features[128];
2074 } gff_id;
2063 } rsp; 2075 } rsp;
2064}; 2076};
2065 2077
@@ -2410,6 +2422,7 @@ struct qla_hw_data {
2410 uint32_t cpu_affinity_enabled :1; 2422 uint32_t cpu_affinity_enabled :1;
2411 uint32_t disable_msix_handshake :1; 2423 uint32_t disable_msix_handshake :1;
2412 uint32_t fcp_prio_enabled :1; 2424 uint32_t fcp_prio_enabled :1;
2425 uint32_t fw_hung :1;
2413 } flags; 2426 } flags;
2414 2427
2415 /* This spinlock is used to protect "io transactions", you must 2428 /* This spinlock is used to protect "io transactions", you must
@@ -2630,6 +2643,8 @@ struct qla_hw_data {
2630 struct mutex vport_lock; /* Virtual port synchronization */ 2643 struct mutex vport_lock; /* Virtual port synchronization */
2631 struct completion mbx_cmd_comp; /* Serialize mbx access */ 2644 struct completion mbx_cmd_comp; /* Serialize mbx access */
2632 struct completion mbx_intr_comp; /* Used for completion notification */ 2645 struct completion mbx_intr_comp; /* Used for completion notification */
2646 struct completion dcbx_comp; /* For set port config notification */
2647 int notify_dcbx_comp;
2633 2648
2634 /* Basic firmware related information. */ 2649 /* Basic firmware related information. */
2635 uint16_t fw_major_version; 2650 uint16_t fw_major_version;
@@ -2699,6 +2714,8 @@ struct qla_hw_data {
2699 uint8_t fcode_revision[16]; 2714 uint8_t fcode_revision[16];
2700 uint32_t fw_revision[4]; 2715 uint32_t fw_revision[4];
2701 2716
2717 uint32_t gold_fw_version[4];
2718
2702 /* Offsets for flash/nvram access (set to ~0 if not used). */ 2719 /* Offsets for flash/nvram access (set to ~0 if not used). */
2703 uint32_t flash_conf_off; 2720 uint32_t flash_conf_off;
2704 uint32_t flash_data_off; 2721 uint32_t flash_data_off;
@@ -2783,6 +2800,9 @@ struct qla_hw_data {
2783 uint16_t gbl_dsd_avail; 2800 uint16_t gbl_dsd_avail;
2784 struct list_head gbl_dsd_list; 2801 struct list_head gbl_dsd_list;
2785#define NUM_DSD_CHAIN 4096 2802#define NUM_DSD_CHAIN 4096
2803
2804 uint8_t fw_type;
2805 __le32 file_prd_off; /* File firmware product offset */
2786}; 2806};
2787 2807
2788/* 2808/*
@@ -2961,9 +2981,15 @@ typedef struct scsi_qla_host {
2961 2981
2962#define QLA_DSDS_PER_IOCB 37 2982#define QLA_DSDS_PER_IOCB 37
2963 2983
2984#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
2985
2986enum nexus_wait_type {
2987 WAIT_HOST = 0,
2988 WAIT_TARGET,
2989 WAIT_LUN,
2990};
2991
2964#include "qla_gbl.h" 2992#include "qla_gbl.h"
2965#include "qla_dbg.h" 2993#include "qla_dbg.h"
2966#include "qla_inline.h" 2994#include "qla_inline.h"
2967
2968#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
2969#endif 2995#endif
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 3a9a6ca42266..6271353e8c51 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 93f833960147..631fefc8482d 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 8217c3bcbc2e..1a1b281cea33 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -59,7 +59,6 @@ extern int qla2x00_async_logout(struct scsi_qla_host *, fc_port_t *);
59extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *, 59extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *,
60 uint16_t *); 60 uint16_t *);
61extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t); 61extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t);
62extern int qla2x00_async_marker(fc_port_t *, uint16_t, uint8_t);
63extern void qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *, 62extern void qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
64 uint16_t *); 63 uint16_t *);
65extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *, 64extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
@@ -68,8 +67,7 @@ extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *,
68 uint16_t *); 67 uint16_t *);
69extern void qla2x00_async_tm_cmd_done(struct scsi_qla_host *, fc_port_t *, 68extern void qla2x00_async_tm_cmd_done(struct scsi_qla_host *, fc_port_t *,
70 struct srb_iocb *); 69 struct srb_iocb *);
71extern void qla2x00_async_marker_done(struct scsi_qla_host *, fc_port_t *, 70extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
72 struct srb_iocb *);
73 71
74extern fc_port_t * 72extern fc_port_t *
75qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t ); 73qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t );
@@ -124,6 +122,7 @@ extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *,
124extern void qla2x00_free_host(struct scsi_qla_host *); 122extern void qla2x00_free_host(struct scsi_qla_host *);
125extern void qla2x00_relogin(struct scsi_qla_host *); 123extern void qla2x00_relogin(struct scsi_qla_host *);
126extern void qla2x00_do_work(struct scsi_qla_host *); 124extern void qla2x00_do_work(struct scsi_qla_host *);
125extern void qla2x00_free_fcports(struct scsi_qla_host *);
127 126
128/* 127/*
129 * Global Functions in qla_mid.c source file. 128 * Global Functions in qla_mid.c source file.
@@ -176,10 +175,7 @@ extern int qla2x00_start_scsi(srb_t *sp);
176extern int qla24xx_start_scsi(srb_t *sp); 175extern int qla24xx_start_scsi(srb_t *sp);
177int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *, 176int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
178 uint16_t, uint16_t, uint8_t); 177 uint16_t, uint16_t, uint8_t);
179int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
180 uint16_t, uint16_t, uint8_t);
181extern int qla2x00_start_sp(srb_t *); 178extern int qla2x00_start_sp(srb_t *);
182extern void qla2x00_ctx_sp_free(srb_t *);
183extern uint16_t qla24xx_calc_iocbs(uint16_t); 179extern uint16_t qla24xx_calc_iocbs(uint16_t);
184extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t); 180extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
185extern int qla24xx_dif_start_scsi(srb_t *); 181extern int qla24xx_dif_start_scsi(srb_t *);
@@ -293,7 +289,9 @@ extern int
293qla24xx_abort_target(struct fc_port *, unsigned int, int); 289qla24xx_abort_target(struct fc_port *, unsigned int, int);
294extern int 290extern int
295qla24xx_lun_reset(struct fc_port *, unsigned int, int); 291qla24xx_lun_reset(struct fc_port *, unsigned int, int);
296 292extern int
293qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *, unsigned int,
294 unsigned int, enum nexus_wait_type);
297extern int 295extern int
298qla2x00_system_error(scsi_qla_host_t *); 296qla2x00_system_error(scsi_qla_host_t *);
299 297
@@ -357,6 +355,11 @@ qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t);
357extern int qla2x00_get_data_rate(scsi_qla_host_t *); 355extern int qla2x00_get_data_rate(scsi_qla_host_t *);
358extern int qla24xx_set_fcp_prio(scsi_qla_host_t *, uint16_t, uint16_t, 356extern int qla24xx_set_fcp_prio(scsi_qla_host_t *, uint16_t, uint16_t,
359 uint16_t *); 357 uint16_t *);
358extern int
359qla81xx_get_port_config(scsi_qla_host_t *, uint16_t *);
360
361extern int
362qla81xx_set_port_config(scsi_qla_host_t *, uint16_t *);
360 363
361/* 364/*
362 * Global Function Prototypes in qla_isr.c source file. 365 * Global Function Prototypes in qla_isr.c source file.
@@ -438,6 +441,7 @@ extern int qla2x00_ga_nxt(scsi_qla_host_t *, fc_port_t *);
438extern int qla2x00_gid_pt(scsi_qla_host_t *, sw_info_t *); 441extern int qla2x00_gid_pt(scsi_qla_host_t *, sw_info_t *);
439extern int qla2x00_gpn_id(scsi_qla_host_t *, sw_info_t *); 442extern int qla2x00_gpn_id(scsi_qla_host_t *, sw_info_t *);
440extern int qla2x00_gnn_id(scsi_qla_host_t *, sw_info_t *); 443extern int qla2x00_gnn_id(scsi_qla_host_t *, sw_info_t *);
444extern void qla2x00_gff_id(scsi_qla_host_t *, sw_info_t *);
441extern int qla2x00_rft_id(scsi_qla_host_t *); 445extern int qla2x00_rft_id(scsi_qla_host_t *);
442extern int qla2x00_rff_id(scsi_qla_host_t *); 446extern int qla2x00_rff_id(scsi_qla_host_t *);
443extern int qla2x00_rnn_id(scsi_qla_host_t *); 447extern int qla2x00_rnn_id(scsi_qla_host_t *);
@@ -482,11 +486,8 @@ extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
482 uint16_t, int, uint8_t); 486 uint16_t, int, uint8_t);
483extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t, 487extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t,
484 uint16_t, int); 488 uint16_t, int);
485extern int qla25xx_update_req_que(struct scsi_qla_host *, uint8_t, uint8_t);
486extern void qla2x00_init_response_q_entries(struct rsp_que *); 489extern void qla2x00_init_response_q_entries(struct rsp_que *);
487extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *); 490extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
488extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *);
489extern int qla25xx_create_queues(struct scsi_qla_host *, uint8_t);
490extern int qla25xx_delete_queues(struct scsi_qla_host *); 491extern int qla25xx_delete_queues(struct scsi_qla_host *);
491extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t); 492extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t);
492extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t); 493extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t);
@@ -503,17 +504,12 @@ extern int qla82xx_pci_mem_read_2M(struct qla_hw_data *, u64, void *, int);
503extern int qla82xx_pci_mem_write_2M(struct qla_hw_data *, u64, void *, int); 504extern int qla82xx_pci_mem_write_2M(struct qla_hw_data *, u64, void *, int);
504extern char *qla82xx_pci_info_str(struct scsi_qla_host *, char *); 505extern char *qla82xx_pci_info_str(struct scsi_qla_host *, char *);
505extern int qla82xx_pci_region_offset(struct pci_dev *, int); 506extern int qla82xx_pci_region_offset(struct pci_dev *, int);
506extern int qla82xx_pci_region_len(struct pci_dev *, int);
507extern int qla82xx_iospace_config(struct qla_hw_data *); 507extern int qla82xx_iospace_config(struct qla_hw_data *);
508 508
509/* Initialization related functions */ 509/* Initialization related functions */
510extern void qla82xx_reset_chip(struct scsi_qla_host *); 510extern void qla82xx_reset_chip(struct scsi_qla_host *);
511extern void qla82xx_config_rings(struct scsi_qla_host *); 511extern void qla82xx_config_rings(struct scsi_qla_host *);
512extern int qla82xx_nvram_config(struct scsi_qla_host *);
513extern int qla82xx_pinit_from_rom(scsi_qla_host_t *); 512extern int qla82xx_pinit_from_rom(scsi_qla_host_t *);
514extern int qla82xx_load_firmware(scsi_qla_host_t *);
515extern int qla82xx_reset_hw(scsi_qla_host_t *);
516extern int qla82xx_load_risc_blob(scsi_qla_host_t *, uint32_t *);
517extern void qla82xx_watchdog(scsi_qla_host_t *); 513extern void qla82xx_watchdog(scsi_qla_host_t *);
518 514
519/* Firmware and flash related functions */ 515/* Firmware and flash related functions */
@@ -569,7 +565,6 @@ extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
569extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *); 565extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
570extern void qla82xx_start_iocbs(srb_t *); 566extern void qla82xx_start_iocbs(srb_t *);
571extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *); 567extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *);
572extern void qla82xx_wait_for_pending_commands(scsi_qla_host_t *);
573 568
574/* BSG related functions */ 569/* BSG related functions */
575extern int qla24xx_bsg_request(struct fc_bsg_job *); 570extern int qla24xx_bsg_request(struct fc_bsg_job *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 872c55f049a5..4c083928c2fb 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -1913,3 +1913,75 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1913 1913
1914 return (rval); 1914 return (rval);
1915} 1915}
1916
1917/**
1918 * qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query.
1919 *
1920 * @ha: HA context
1921 * @list: switch info entries to populate
1922 *
1923 */
1924void
1925qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
1926{
1927 int rval;
1928 uint16_t i;
1929
1930 ms_iocb_entry_t *ms_pkt;
1931 struct ct_sns_req *ct_req;
1932 struct ct_sns_rsp *ct_rsp;
1933 struct qla_hw_data *ha = vha->hw;
1934 uint8_t fcp_scsi_features = 0;
1935
1936 for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
1937 /* Set default FC4 Type as UNKNOWN so the default is to
1938 * Process this port */
1939 list[i].fc4_type = FC4_TYPE_UNKNOWN;
1940
1941 /* Do not attempt GFF_ID if we are not FWI_2 capable */
1942 if (!IS_FWI2_CAPABLE(ha))
1943 continue;
1944
1945 /* Prepare common MS IOCB */
1946 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GFF_ID_REQ_SIZE,
1947 GFF_ID_RSP_SIZE);
1948
1949 /* Prepare CT request */
1950 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GFF_ID_CMD,
1951 GFF_ID_RSP_SIZE);
1952 ct_rsp = &ha->ct_sns->p.rsp;
1953
1954 /* Prepare CT arguments -- port_id */
1955 ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
1956 ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
1957 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
1958
1959 /* Execute MS IOCB */
1960 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1961 sizeof(ms_iocb_entry_t));
1962
1963 if (rval != QLA_SUCCESS) {
1964 DEBUG2_3(printk(KERN_INFO
1965 "scsi(%ld): GFF_ID issue IOCB failed "
1966 "(%d).\n", vha->host_no, rval));
1967 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
1968 "GPN_ID") != QLA_SUCCESS) {
1969 DEBUG2_3(printk(KERN_INFO
1970 "scsi(%ld): GFF_ID IOCB status had a "
1971 "failure status code\n", vha->host_no));
1972 } else {
1973 fcp_scsi_features =
1974 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
1975 fcp_scsi_features &= 0x0f;
1976
1977 if (fcp_scsi_features)
1978 list[i].fc4_type = FC4_TYPE_FCP_SCSI;
1979 else
1980 list[i].fc4_type = FC4_TYPE_OTHER;
1981 }
1982
1983 /* Last device exit. */
1984 if (list[i].d_id.b.rsvd_1 != 0)
1985 break;
1986 }
1987}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index ab2cc71994c2..d863ed2619b5 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -60,12 +60,11 @@ qla2x00_ctx_sp_timeout(unsigned long __data)
60 ctx = sp->ctx; 60 ctx = sp->ctx;
61 iocb = ctx->u.iocb_cmd; 61 iocb = ctx->u.iocb_cmd;
62 iocb->timeout(sp); 62 iocb->timeout(sp);
63 spin_unlock_irqrestore(&ha->hardware_lock, flags);
64
65 iocb->free(sp); 63 iocb->free(sp);
64 spin_unlock_irqrestore(&ha->hardware_lock, flags);
66} 65}
67 66
68void 67static void
69qla2x00_ctx_sp_free(srb_t *sp) 68qla2x00_ctx_sp_free(srb_t *sp)
70{ 69{
71 struct srb_ctx *ctx = sp->ctx; 70 struct srb_ctx *ctx = sp->ctx;
@@ -122,7 +121,23 @@ done:
122 121
123/* Asynchronous Login/Logout Routines -------------------------------------- */ 122/* Asynchronous Login/Logout Routines -------------------------------------- */
124 123
125#define ELS_TMO_2_RATOV(ha) ((ha)->r_a_tov / 10 * 2) 124static inline unsigned long
125qla2x00_get_async_timeout(struct scsi_qla_host *vha)
126{
127 unsigned long tmo;
128 struct qla_hw_data *ha = vha->hw;
129
130 /* Firmware should use switch negotiated r_a_tov for timeout. */
131 tmo = ha->r_a_tov / 10 * 2;
132 if (!IS_FWI2_CAPABLE(ha)) {
133 /*
134 * Except for earlier ISPs where the timeout is seeded from the
135 * initialization control block.
136 */
137 tmo = ha->login_timeout;
138 }
139 return tmo;
140}
126 141
127static void 142static void
128qla2x00_async_iocb_timeout(srb_t *sp) 143qla2x00_async_iocb_timeout(srb_t *sp)
@@ -131,12 +146,22 @@ qla2x00_async_iocb_timeout(srb_t *sp)
131 struct srb_ctx *ctx = sp->ctx; 146 struct srb_ctx *ctx = sp->ctx;
132 147
133 DEBUG2(printk(KERN_WARNING 148 DEBUG2(printk(KERN_WARNING
134 "scsi(%ld:%x): Async-%s timeout.\n", 149 "scsi(%ld:%x): Async-%s timeout - portid=%02x%02x%02x.\n",
135 fcport->vha->host_no, sp->handle, ctx->name)); 150 fcport->vha->host_no, sp->handle,
151 ctx->name, fcport->d_id.b.domain,
152 fcport->d_id.b.area, fcport->d_id.b.al_pa));
136 153
137 fcport->flags &= ~FCF_ASYNC_SENT; 154 fcport->flags &= ~FCF_ASYNC_SENT;
138 if (ctx->type == SRB_LOGIN_CMD) 155 if (ctx->type == SRB_LOGIN_CMD) {
156 struct srb_iocb *lio = ctx->u.iocb_cmd;
139 qla2x00_post_async_logout_work(fcport->vha, fcport, NULL); 157 qla2x00_post_async_logout_work(fcport->vha, fcport, NULL);
158 /* Retry as needed. */
159 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
160 lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
161 QLA_LOGIO_LOGIN_RETRIED : 0;
162 qla2x00_post_async_login_done_work(fcport->vha, fcport,
163 lio->u.logio.data);
164 }
140} 165}
141 166
142static void 167static void
@@ -154,7 +179,6 @@ int
154qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, 179qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
155 uint16_t *data) 180 uint16_t *data)
156{ 181{
157 struct qla_hw_data *ha = vha->hw;
158 srb_t *sp; 182 srb_t *sp;
159 struct srb_ctx *ctx; 183 struct srb_ctx *ctx;
160 struct srb_iocb *lio; 184 struct srb_iocb *lio;
@@ -162,7 +186,7 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
162 186
163 rval = QLA_FUNCTION_FAILED; 187 rval = QLA_FUNCTION_FAILED;
164 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), 188 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
165 ELS_TMO_2_RATOV(ha) + 2); 189 qla2x00_get_async_timeout(vha) + 2);
166 if (!sp) 190 if (!sp)
167 goto done; 191 goto done;
168 192
@@ -206,7 +230,6 @@ qla2x00_async_logout_ctx_done(srb_t *sp)
206int 230int
207qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) 231qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
208{ 232{
209 struct qla_hw_data *ha = vha->hw;
210 srb_t *sp; 233 srb_t *sp;
211 struct srb_ctx *ctx; 234 struct srb_ctx *ctx;
212 struct srb_iocb *lio; 235 struct srb_iocb *lio;
@@ -214,7 +237,7 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
214 237
215 rval = QLA_FUNCTION_FAILED; 238 rval = QLA_FUNCTION_FAILED;
216 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), 239 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
217 ELS_TMO_2_RATOV(ha) + 2); 240 qla2x00_get_async_timeout(vha) + 2);
218 if (!sp) 241 if (!sp)
219 goto done; 242 goto done;
220 243
@@ -255,7 +278,6 @@ int
255qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, 278qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
256 uint16_t *data) 279 uint16_t *data)
257{ 280{
258 struct qla_hw_data *ha = vha->hw;
259 srb_t *sp; 281 srb_t *sp;
260 struct srb_ctx *ctx; 282 struct srb_ctx *ctx;
261 struct srb_iocb *lio; 283 struct srb_iocb *lio;
@@ -263,7 +285,7 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
263 285
264 rval = QLA_FUNCTION_FAILED; 286 rval = QLA_FUNCTION_FAILED;
265 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), 287 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
266 ELS_TMO_2_RATOV(ha) + 2); 288 qla2x00_get_async_timeout(vha) + 2);
267 if (!sp) 289 if (!sp)
268 goto done; 290 goto done;
269 291
@@ -307,7 +329,6 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
307 uint32_t tag) 329 uint32_t tag)
308{ 330{
309 struct scsi_qla_host *vha = fcport->vha; 331 struct scsi_qla_host *vha = fcport->vha;
310 struct qla_hw_data *ha = vha->hw;
311 srb_t *sp; 332 srb_t *sp;
312 struct srb_ctx *ctx; 333 struct srb_ctx *ctx;
313 struct srb_iocb *tcf; 334 struct srb_iocb *tcf;
@@ -315,7 +336,7 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
315 336
316 rval = QLA_FUNCTION_FAILED; 337 rval = QLA_FUNCTION_FAILED;
317 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), 338 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
318 ELS_TMO_2_RATOV(ha) + 2); 339 qla2x00_get_async_timeout(vha) + 2);
319 if (!sp) 340 if (!sp)
320 goto done; 341 goto done;
321 342
@@ -346,58 +367,6 @@ done:
346 return rval; 367 return rval;
347} 368}
348 369
349static void
350qla2x00_async_marker_ctx_done(srb_t *sp)
351{
352 struct srb_ctx *ctx = sp->ctx;
353 struct srb_iocb *iocb = (struct srb_iocb *)ctx->u.iocb_cmd;
354
355 qla2x00_async_marker_done(sp->fcport->vha, sp->fcport, iocb);
356 iocb->free(sp);
357}
358
359int
360qla2x00_async_marker(fc_port_t *fcport, uint16_t lun, uint8_t modif)
361{
362 struct scsi_qla_host *vha = fcport->vha;
363 srb_t *sp;
364 struct srb_ctx *ctx;
365 struct srb_iocb *mrk;
366 int rval;
367
368 rval = QLA_FUNCTION_FAILED;
369 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), 0);
370 if (!sp)
371 goto done;
372
373 ctx = sp->ctx;
374 ctx->type = SRB_MARKER_CMD;
375 ctx->name = "marker";
376 mrk = ctx->u.iocb_cmd;
377 mrk->u.marker.lun = lun;
378 mrk->u.marker.modif = modif;
379 mrk->timeout = qla2x00_async_iocb_timeout;
380 mrk->done = qla2x00_async_marker_ctx_done;
381
382 rval = qla2x00_start_sp(sp);
383 if (rval != QLA_SUCCESS)
384 goto done_free_sp;
385
386 DEBUG2(printk(KERN_DEBUG
387 "scsi(%ld:%x): Async-marker - loop-id=%x "
388 "portid=%02x%02x%02x.\n",
389 fcport->vha->host_no, sp->handle, fcport->loop_id,
390 fcport->d_id.b.domain, fcport->d_id.b.area,
391 fcport->d_id.b.al_pa));
392
393 return rval;
394
395done_free_sp:
396 mrk->free(sp);
397done:
398 return rval;
399}
400
401void 370void
402qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport, 371qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
403 uint16_t *data) 372 uint16_t *data)
@@ -418,10 +387,11 @@ qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
418 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 387 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
419 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 388 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
420 else 389 else
421 qla2x00_mark_device_lost(vha, fcport, 1, 0); 390 qla2x00_mark_device_lost(vha, fcport, 1, 1);
422 break; 391 break;
423 case MBS_PORT_ID_USED: 392 case MBS_PORT_ID_USED:
424 fcport->loop_id = data[1]; 393 fcport->loop_id = data[1];
394 qla2x00_post_async_logout_work(vha, fcport, NULL);
425 qla2x00_post_async_login_work(vha, fcport, NULL); 395 qla2x00_post_async_login_work(vha, fcport, NULL);
426 break; 396 break;
427 case MBS_LOOP_ID_USED: 397 case MBS_LOOP_ID_USED:
@@ -429,7 +399,7 @@ qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
429 rval = qla2x00_find_new_loop_id(vha, fcport); 399 rval = qla2x00_find_new_loop_id(vha, fcport);
430 if (rval != QLA_SUCCESS) { 400 if (rval != QLA_SUCCESS) {
431 fcport->flags &= ~FCF_ASYNC_SENT; 401 fcport->flags &= ~FCF_ASYNC_SENT;
432 qla2x00_mark_device_lost(vha, fcport, 1, 0); 402 qla2x00_mark_device_lost(vha, fcport, 1, 1);
433 break; 403 break;
434 } 404 }
435 qla2x00_post_async_login_work(vha, fcport, NULL); 405 qla2x00_post_async_login_work(vha, fcport, NULL);
@@ -461,7 +431,7 @@ qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
461 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 431 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
462 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 432 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
463 else 433 else
464 qla2x00_mark_device_lost(vha, fcport, 1, 0); 434 qla2x00_mark_device_lost(vha, fcport, 1, 1);
465 435
466 return; 436 return;
467} 437}
@@ -478,7 +448,8 @@ qla2x00_async_tm_cmd_done(struct scsi_qla_host *vha, fc_port_t *fcport,
478 lun = (uint16_t)iocb->u.tmf.lun; 448 lun = (uint16_t)iocb->u.tmf.lun;
479 449
480 /* Issue Marker IOCB */ 450 /* Issue Marker IOCB */
481 rval = qla2x00_async_marker(fcport, lun, 451 rval = qla2x00_marker(vha, vha->hw->req_q_map[0],
452 vha->hw->rsp_q_map[0], fcport->loop_id, lun,
482 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); 453 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
483 454
484 if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) { 455 if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
@@ -490,24 +461,6 @@ qla2x00_async_tm_cmd_done(struct scsi_qla_host *vha, fc_port_t *fcport,
490 return; 461 return;
491} 462}
492 463
493void
494qla2x00_async_marker_done(struct scsi_qla_host *vha, fc_port_t *fcport,
495 struct srb_iocb *iocb)
496{
497 /*
498 * Currently we dont have any specific post response processing
499 * for this IOCB. We'll just return success or failed
500 * depending on whether the IOCB command succeeded or failed.
501 */
502 if (iocb->u.tmf.data) {
503 DEBUG2_3_11(printk(KERN_WARNING
504 "%s(%ld): Marker IOCB failed (%x).\n",
505 __func__, vha->host_no, iocb->u.tmf.data));
506 }
507
508 return;
509}
510
511/****************************************************************************/ 464/****************************************************************************/
512/* QLogic ISP2x00 Hardware Support Functions. */ 465/* QLogic ISP2x00 Hardware Support Functions. */
513/****************************************************************************/ 466/****************************************************************************/
@@ -613,11 +566,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
613 } 566 }
614 } 567 }
615 568
616 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) { 569 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
617 if (qla24xx_read_fcp_prio_cfg(vha)) 570 qla24xx_read_fcp_prio_cfg(vha);
618 qla_printk(KERN_ERR, ha,
619 "Unable to read FCP priority data.\n");
620 }
621 571
622 return (rval); 572 return (rval);
623} 573}
@@ -1452,8 +1402,11 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
1452 1402
1453 if (IS_QLA82XX(ha)) { 1403 if (IS_QLA82XX(ha)) {
1454 rval = ha->isp_ops->load_risc(vha, &srisc_address); 1404 rval = ha->isp_ops->load_risc(vha, &srisc_address);
1455 if (rval == QLA_SUCCESS) 1405 if (rval == QLA_SUCCESS) {
1406 qla2x00_stop_firmware(vha);
1456 goto enable_82xx_npiv; 1407 goto enable_82xx_npiv;
1408 } else
1409 goto failed;
1457 } 1410 }
1458 1411
1459 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { 1412 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
@@ -1960,7 +1913,8 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1960 } 1913 }
1961 } else { 1914 } else {
1962 /* Mailbox cmd failed. Timeout on min_wait. */ 1915 /* Mailbox cmd failed. Timeout on min_wait. */
1963 if (time_after_eq(jiffies, mtime)) 1916 if (time_after_eq(jiffies, mtime) ||
1917 (IS_QLA82XX(ha) && ha->flags.fw_hung))
1964 break; 1918 break;
1965 } 1919 }
1966 1920
@@ -2396,7 +2350,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2396 ha->retry_count = nv->retry_count; 2350 ha->retry_count = nv->retry_count;
2397 2351
2398 /* Set minimum login_timeout to 4 seconds. */ 2352 /* Set minimum login_timeout to 4 seconds. */
2399 if (nv->login_timeout < ql2xlogintimeout) 2353 if (nv->login_timeout != ql2xlogintimeout)
2400 nv->login_timeout = ql2xlogintimeout; 2354 nv->login_timeout = ql2xlogintimeout;
2401 if (nv->login_timeout < 4) 2355 if (nv->login_timeout < 4)
2402 nv->login_timeout = 4; 2356 nv->login_timeout = 4;
@@ -2639,7 +2593,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2639 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2593 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2640 if (test_bit(RSCN_UPDATE, &save_flags)) { 2594 if (test_bit(RSCN_UPDATE, &save_flags)) {
2641 set_bit(RSCN_UPDATE, &vha->dpc_flags); 2595 set_bit(RSCN_UPDATE, &vha->dpc_flags);
2642 vha->flags.rscn_queue_overflow = 1; 2596 if (!IS_ALOGIO_CAPABLE(ha))
2597 vha->flags.rscn_queue_overflow = 1;
2643 } 2598 }
2644 } 2599 }
2645 2600
@@ -3124,7 +3079,6 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3124 return (rval); 3079 return (rval);
3125} 3080}
3126 3081
3127
3128/* 3082/*
3129 * qla2x00_find_all_fabric_devs 3083 * qla2x00_find_all_fabric_devs
3130 * 3084 *
@@ -3177,6 +3131,10 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3177 qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) { 3131 qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) {
3178 qla2x00_gpsc(vha, swl); 3132 qla2x00_gpsc(vha, swl);
3179 } 3133 }
3134
3135 /* If other queries succeeded probe for FC-4 type */
3136 if (swl)
3137 qla2x00_gff_id(vha, swl);
3180 } 3138 }
3181 swl_idx = 0; 3139 swl_idx = 0;
3182 3140
@@ -3197,8 +3155,9 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3197 if (qla2x00_is_reserved_id(vha, loop_id)) 3155 if (qla2x00_is_reserved_id(vha, loop_id))
3198 continue; 3156 continue;
3199 3157
3200 if (atomic_read(&vha->loop_down_timer) || 3158 if (ha->current_topology == ISP_CFG_FL &&
3201 LOOP_TRANSITION(vha)) { 3159 (atomic_read(&vha->loop_down_timer) ||
3160 LOOP_TRANSITION(vha))) {
3202 atomic_set(&vha->loop_down_timer, 0); 3161 atomic_set(&vha->loop_down_timer, 0);
3203 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3162 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3204 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 3163 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
@@ -3217,6 +3176,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3217 memcpy(new_fcport->fabric_port_name, 3176 memcpy(new_fcport->fabric_port_name,
3218 swl[swl_idx].fabric_port_name, WWN_SIZE); 3177 swl[swl_idx].fabric_port_name, WWN_SIZE);
3219 new_fcport->fp_speed = swl[swl_idx].fp_speed; 3178 new_fcport->fp_speed = swl[swl_idx].fp_speed;
3179 new_fcport->fc4_type = swl[swl_idx].fc4_type;
3220 3180
3221 if (swl[swl_idx].d_id.b.rsvd_1 != 0) { 3181 if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
3222 last_dev = 1; 3182 last_dev = 1;
@@ -3278,6 +3238,11 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3278 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0) 3238 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
3279 continue; 3239 continue;
3280 3240
3241 /* Bypass ports whose FCP-4 type is not FCP_SCSI */
3242 if (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
3243 new_fcport->fc4_type != FC4_TYPE_UNKNOWN)
3244 continue;
3245
3281 /* Locate matching device in database. */ 3246 /* Locate matching device in database. */
3282 found = 0; 3247 found = 0;
3283 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3248 list_for_each_entry(fcport, &vha->vp_fcports, list) {
@@ -3868,8 +3833,13 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
3868 } 3833 }
3869 3834
3870 /* Make sure for ISP 82XX IO DMA is complete */ 3835 /* Make sure for ISP 82XX IO DMA is complete */
3871 if (IS_QLA82XX(ha)) 3836 if (IS_QLA82XX(ha)) {
3872 qla82xx_wait_for_pending_commands(vha); 3837 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
3838 WAIT_HOST) == QLA_SUCCESS) {
3839 DEBUG2(qla_printk(KERN_INFO, ha,
3840 "Done wait for pending commands\n"));
3841 }
3842 }
3873 3843
3874 /* Requeue all commands in outstanding command list. */ 3844 /* Requeue all commands in outstanding command list. */
3875 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 3845 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 84c2fea154d2..48f97a92e33d 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 8ef945365412..4e4c21fafe3a 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -11,8 +11,6 @@
11 11
12#include <scsi/scsi_tcq.h> 12#include <scsi/scsi_tcq.h>
13 13
14static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
15 struct rsp_que *rsp);
16static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *); 14static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
17 15
18static void qla25xx_set_que(srb_t *, struct rsp_que **); 16static void qla25xx_set_que(srb_t *, struct rsp_que **);
@@ -463,7 +461,7 @@ queuing_error:
463 * 461 *
464 * Returns non-zero if a failure occurred, else zero. 462 * Returns non-zero if a failure occurred, else zero.
465 */ 463 */
466int 464static int
467__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, 465__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
468 struct rsp_que *rsp, uint16_t loop_id, 466 struct rsp_que *rsp, uint16_t loop_id,
469 uint16_t lun, uint8_t type) 467 uint16_t lun, uint8_t type)
@@ -474,7 +472,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
474 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 472 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
475 473
476 mrk24 = NULL; 474 mrk24 = NULL;
477 mrk = (mrk_entry_t *)qla2x00_req_pkt(vha, req, rsp); 475 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
478 if (mrk == NULL) { 476 if (mrk == NULL) {
479 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n", 477 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
480 __func__, base_vha->host_no)); 478 __func__, base_vha->host_no));
@@ -521,84 +519,6 @@ qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
521} 519}
522 520
523/** 521/**
524 * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
525 * @ha: HA context
526 *
527 * Note: The caller must hold the hardware lock before calling this routine.
528 *
529 * Returns NULL if function failed, else, a pointer to the request packet.
530 */
531static request_t *
532qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
533 struct rsp_que *rsp)
534{
535 struct qla_hw_data *ha = vha->hw;
536 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
537 request_t *pkt = NULL;
538 uint16_t cnt;
539 uint32_t *dword_ptr;
540 uint32_t timer;
541 uint16_t req_cnt = 1;
542
543 /* Wait 1 second for slot. */
544 for (timer = HZ; timer; timer--) {
545 if ((req_cnt + 2) >= req->cnt) {
546 /* Calculate number of free request entries. */
547 if (ha->mqenable)
548 cnt = (uint16_t)
549 RD_REG_DWORD(&reg->isp25mq.req_q_out);
550 else {
551 if (IS_QLA82XX(ha))
552 cnt = (uint16_t)RD_REG_DWORD(
553 &reg->isp82.req_q_out);
554 else if (IS_FWI2_CAPABLE(ha))
555 cnt = (uint16_t)RD_REG_DWORD(
556 &reg->isp24.req_q_out);
557 else
558 cnt = qla2x00_debounce_register(
559 ISP_REQ_Q_OUT(ha, &reg->isp));
560 }
561 if (req->ring_index < cnt)
562 req->cnt = cnt - req->ring_index;
563 else
564 req->cnt = req->length -
565 (req->ring_index - cnt);
566 }
567 /* If room for request in request ring. */
568 if ((req_cnt + 2) < req->cnt) {
569 req->cnt--;
570 pkt = req->ring_ptr;
571
572 /* Zero out packet. */
573 dword_ptr = (uint32_t *)pkt;
574 for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
575 *dword_ptr++ = 0;
576
577 /* Set entry count. */
578 pkt->entry_count = 1;
579
580 break;
581 }
582
583 /* Release ring specific lock */
584 spin_unlock_irq(&ha->hardware_lock);
585
586 udelay(2); /* 2 us */
587
588 /* Check for pending interrupts. */
589 /* During init we issue marker directly */
590 if (!vha->marker_needed && !vha->flags.init_done)
591 qla2x00_poll(rsp);
592 spin_lock_irq(&ha->hardware_lock);
593 }
594 if (!pkt) {
595 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
596 }
597
598 return (pkt);
599}
600
601/**
602 * qla2x00_isp_cmd() - Modify the request ring pointer. 522 * qla2x00_isp_cmd() - Modify the request ring pointer.
603 * @ha: HA context 523 * @ha: HA context
604 * 524 *
@@ -792,6 +712,25 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
792 * match LBA in CDB + N 712 * match LBA in CDB + N
793 */ 713 */
794 case SCSI_PROT_DIF_TYPE2: 714 case SCSI_PROT_DIF_TYPE2:
715 if (!ql2xenablehba_err_chk)
716 break;
717
718 if (scsi_prot_sg_count(cmd)) {
719 spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
720 scsi_prot_sglist(cmd)[0].offset;
721 pkt->app_tag = swab32(spt->app_tag);
722 pkt->app_tag_mask[0] = 0xff;
723 pkt->app_tag_mask[1] = 0xff;
724 }
725
726 pkt->ref_tag = cpu_to_le32((uint32_t)
727 (0xffffffff & scsi_get_lba(cmd)));
728
729 /* enable ALL bytes of the ref tag */
730 pkt->ref_tag_mask[0] = 0xff;
731 pkt->ref_tag_mask[1] = 0xff;
732 pkt->ref_tag_mask[2] = 0xff;
733 pkt->ref_tag_mask[3] = 0xff;
795 break; 734 break;
796 735
797 /* For Type 3 protection: 16 bit GUARD only */ 736 /* For Type 3 protection: 16 bit GUARD only */
@@ -1142,7 +1081,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1142 total_bytes = data_bytes; 1081 total_bytes = data_bytes;
1143 dif_bytes = 0; 1082 dif_bytes = 0;
1144 blk_size = cmd->device->sector_size; 1083 blk_size = cmd->device->sector_size;
1145 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE1) { 1084 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1146 dif_bytes = (data_bytes / blk_size) * 8; 1085 dif_bytes = (data_bytes / blk_size) * 8;
1147 total_bytes += dif_bytes; 1086 total_bytes += dif_bytes;
1148 } 1087 }
@@ -1180,6 +1119,12 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1180 vha->host_no, dif_bytes, dif_bytes, total_bytes, total_bytes, 1119 vha->host_no, dif_bytes, dif_bytes, total_bytes, total_bytes,
1181 crc_ctx_pkt->blk_size, crc_ctx_pkt->blk_size)); 1120 crc_ctx_pkt->blk_size, crc_ctx_pkt->blk_size));
1182 1121
1122 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1123 DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
1124 __func__, data_bytes));
1125 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1126 return QLA_SUCCESS;
1127 }
1183 /* Walks data segments */ 1128 /* Walks data segments */
1184 1129
1185 cmd_pkt->control_flags |= 1130 cmd_pkt->control_flags |=
@@ -1390,9 +1335,11 @@ qla24xx_dif_start_scsi(srb_t *sp)
1390 1335
1391#define QDSS_GOT_Q_SPACE BIT_0 1336#define QDSS_GOT_Q_SPACE BIT_0
1392 1337
1393 /* Only process protection in this routine */ 1338 /* Only process protection or >16 cdb in this routine */
1394 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) 1339 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1395 return qla24xx_start_scsi(sp); 1340 if (cmd->cmd_len <= 16)
1341 return qla24xx_start_scsi(sp);
1342 }
1396 1343
1397 /* Setup device pointers. */ 1344 /* Setup device pointers. */
1398 1345
@@ -1559,11 +1506,9 @@ static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1559} 1506}
1560 1507
1561/* Generic Control-SRB manipulation functions. */ 1508/* Generic Control-SRB manipulation functions. */
1562 1509void *
1563static void * 1510qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1564qla2x00_alloc_iocbs(srb_t *sp)
1565{ 1511{
1566 scsi_qla_host_t *vha = sp->fcport->vha;
1567 struct qla_hw_data *ha = vha->hw; 1512 struct qla_hw_data *ha = vha->hw;
1568 struct req_que *req = ha->req_q_map[0]; 1513 struct req_que *req = ha->req_q_map[0];
1569 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); 1514 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
@@ -1573,6 +1518,10 @@ qla2x00_alloc_iocbs(srb_t *sp)
1573 1518
1574 pkt = NULL; 1519 pkt = NULL;
1575 req_cnt = 1; 1520 req_cnt = 1;
1521 handle = 0;
1522
1523 if (!sp)
1524 goto skip_cmd_array;
1576 1525
1577 /* Check for room in outstanding command list. */ 1526 /* Check for room in outstanding command list. */
1578 handle = req->current_outstanding_cmd; 1527 handle = req->current_outstanding_cmd;
@@ -1586,10 +1535,18 @@ qla2x00_alloc_iocbs(srb_t *sp)
1586 if (index == MAX_OUTSTANDING_COMMANDS) 1535 if (index == MAX_OUTSTANDING_COMMANDS)
1587 goto queuing_error; 1536 goto queuing_error;
1588 1537
1538 /* Prep command array. */
1539 req->current_outstanding_cmd = handle;
1540 req->outstanding_cmds[handle] = sp;
1541 sp->handle = handle;
1542
1543skip_cmd_array:
1589 /* Check for room on request queue. */ 1544 /* Check for room on request queue. */
1590 if (req->cnt < req_cnt) { 1545 if (req->cnt < req_cnt) {
1591 if (ha->mqenable) 1546 if (ha->mqenable)
1592 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out); 1547 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1548 else if (IS_QLA82XX(ha))
1549 cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
1593 else if (IS_FWI2_CAPABLE(ha)) 1550 else if (IS_FWI2_CAPABLE(ha))
1594 cnt = RD_REG_DWORD(&reg->isp24.req_q_out); 1551 cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1595 else 1552 else
@@ -1606,15 +1563,11 @@ qla2x00_alloc_iocbs(srb_t *sp)
1606 goto queuing_error; 1563 goto queuing_error;
1607 1564
1608 /* Prep packet */ 1565 /* Prep packet */
1609 req->current_outstanding_cmd = handle;
1610 req->outstanding_cmds[handle] = sp;
1611 req->cnt -= req_cnt; 1566 req->cnt -= req_cnt;
1612
1613 pkt = req->ring_ptr; 1567 pkt = req->ring_ptr;
1614 memset(pkt, 0, REQUEST_ENTRY_SIZE); 1568 memset(pkt, 0, REQUEST_ENTRY_SIZE);
1615 pkt->entry_count = req_cnt; 1569 pkt->entry_count = req_cnt;
1616 pkt->handle = handle; 1570 pkt->handle = handle;
1617 sp->handle = handle;
1618 1571
1619queuing_error: 1572queuing_error:
1620 return pkt; 1573 return pkt;
@@ -1683,7 +1636,7 @@ qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1683 struct srb_iocb *lio = ctx->u.iocb_cmd; 1636 struct srb_iocb *lio = ctx->u.iocb_cmd;
1684 uint16_t opts; 1637 uint16_t opts;
1685 1638
1686 mbx->entry_type = MBX_IOCB_TYPE;; 1639 mbx->entry_type = MBX_IOCB_TYPE;
1687 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); 1640 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1688 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT); 1641 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1689 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0; 1642 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
@@ -1718,7 +1671,7 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1718{ 1671{
1719 struct qla_hw_data *ha = sp->fcport->vha->hw; 1672 struct qla_hw_data *ha = sp->fcport->vha->hw;
1720 1673
1721 mbx->entry_type = MBX_IOCB_TYPE;; 1674 mbx->entry_type = MBX_IOCB_TYPE;
1722 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); 1675 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1723 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT); 1676 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1724 mbx->mb1 = HAS_EXTENDED_IDS(ha) ? 1677 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
@@ -1795,31 +1748,6 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1795} 1748}
1796 1749
1797static void 1750static void
1798qla24xx_marker_iocb(srb_t *sp, struct mrk_entry_24xx *mrk)
1799{
1800 uint16_t lun;
1801 uint8_t modif;
1802 struct fc_port *fcport = sp->fcport;
1803 scsi_qla_host_t *vha = fcport->vha;
1804 struct srb_ctx *ctx = sp->ctx;
1805 struct srb_iocb *iocb = ctx->u.iocb_cmd;
1806 struct req_que *req = vha->req;
1807
1808 lun = iocb->u.marker.lun;
1809 modif = iocb->u.marker.modif;
1810 mrk->entry_type = MARKER_TYPE;
1811 mrk->modifier = modif;
1812 if (modif != MK_SYNC_ALL) {
1813 mrk->nport_handle = cpu_to_le16(fcport->loop_id);
1814 mrk->lun[1] = LSB(lun);
1815 mrk->lun[2] = MSB(lun);
1816 host_to_fcp_swap(mrk->lun, sizeof(mrk->lun));
1817 mrk->vp_index = vha->vp_idx;
1818 mrk->handle = MAKE_HANDLE(req->id, mrk->handle);
1819 }
1820}
1821
1822static void
1823qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) 1751qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
1824{ 1752{
1825 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job; 1753 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
@@ -1864,6 +1792,82 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
1864} 1792}
1865 1793
1866static void 1794static void
1795qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
1796{
1797 uint16_t avail_dsds;
1798 uint32_t *cur_dsd;
1799 struct scatterlist *sg;
1800 int index;
1801 uint16_t tot_dsds;
1802 scsi_qla_host_t *vha = sp->fcport->vha;
1803 struct qla_hw_data *ha = vha->hw;
1804 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
1805 int loop_iterartion = 0;
1806 int cont_iocb_prsnt = 0;
1807 int entry_count = 1;
1808
1809 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
1810 ct_iocb->entry_type = CT_IOCB_TYPE;
1811 ct_iocb->entry_status = 0;
1812 ct_iocb->handle1 = sp->handle;
1813 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
1814 ct_iocb->status = __constant_cpu_to_le16(0);
1815 ct_iocb->control_flags = __constant_cpu_to_le16(0);
1816 ct_iocb->timeout = 0;
1817 ct_iocb->cmd_dsd_count =
1818 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1819 ct_iocb->total_dsd_count =
1820 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
1821 ct_iocb->req_bytecount =
1822 cpu_to_le32(bsg_job->request_payload.payload_len);
1823 ct_iocb->rsp_bytecount =
1824 cpu_to_le32(bsg_job->reply_payload.payload_len);
1825
1826 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
1827 (bsg_job->request_payload.sg_list)));
1828 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
1829 (bsg_job->request_payload.sg_list)));
1830 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
1831
1832 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
1833 (bsg_job->reply_payload.sg_list)));
1834 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
1835 (bsg_job->reply_payload.sg_list)));
1836 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
1837
1838 avail_dsds = 1;
1839 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
1840 index = 0;
1841 tot_dsds = bsg_job->reply_payload.sg_cnt;
1842
1843 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
1844 dma_addr_t sle_dma;
1845 cont_a64_entry_t *cont_pkt;
1846
1847 /* Allocate additional continuation packets? */
1848 if (avail_dsds == 0) {
1849 /*
1850 * Five DSDs are available in the Cont.
1851 * Type 1 IOCB.
1852 */
1853 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
1854 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
1855 avail_dsds = 5;
1856 cont_iocb_prsnt = 1;
1857 entry_count++;
1858 }
1859
1860 sle_dma = sg_dma_address(sg);
1861 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1862 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1863 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1864 loop_iterartion++;
1865 avail_dsds--;
1866 }
1867 ct_iocb->entry_count = entry_count;
1868}
1869
1870static void
1867qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) 1871qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
1868{ 1872{
1869 uint16_t avail_dsds; 1873 uint16_t avail_dsds;
@@ -1945,7 +1949,7 @@ qla2x00_start_sp(srb_t *sp)
1945 1949
1946 rval = QLA_FUNCTION_FAILED; 1950 rval = QLA_FUNCTION_FAILED;
1947 spin_lock_irqsave(&ha->hardware_lock, flags); 1951 spin_lock_irqsave(&ha->hardware_lock, flags);
1948 pkt = qla2x00_alloc_iocbs(sp); 1952 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
1949 if (!pkt) 1953 if (!pkt)
1950 goto done; 1954 goto done;
1951 1955
@@ -1966,7 +1970,9 @@ qla2x00_start_sp(srb_t *sp)
1966 qla24xx_els_iocb(sp, pkt); 1970 qla24xx_els_iocb(sp, pkt);
1967 break; 1971 break;
1968 case SRB_CT_CMD: 1972 case SRB_CT_CMD:
1969 qla24xx_ct_iocb(sp, pkt); 1973 IS_FWI2_CAPABLE(ha) ?
1974 qla24xx_ct_iocb(sp, pkt) :
1975 qla2x00_ct_iocb(sp, pkt);
1970 break; 1976 break;
1971 case SRB_ADISC_CMD: 1977 case SRB_ADISC_CMD:
1972 IS_FWI2_CAPABLE(ha) ? 1978 IS_FWI2_CAPABLE(ha) ?
@@ -1976,9 +1982,6 @@ qla2x00_start_sp(srb_t *sp)
1976 case SRB_TM_CMD: 1982 case SRB_TM_CMD:
1977 qla24xx_tm_iocb(sp, pkt); 1983 qla24xx_tm_iocb(sp, pkt);
1978 break; 1984 break;
1979 case SRB_MARKER_CMD:
1980 qla24xx_marker_iocb(sp, pkt);
1981 break;
1982 default: 1985 default:
1983 break; 1986 break;
1984 } 1987 }
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index be3d8bed2ecf..6982ba70e12a 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -545,10 +545,13 @@ skip_rio:
545 if (IS_QLA2100(ha)) 545 if (IS_QLA2100(ha))
546 break; 546 break;
547 547
548 if (IS_QLA8XXX_TYPE(ha)) 548 if (IS_QLA8XXX_TYPE(ha)) {
549 DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x " 549 DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x "
550 "%04x\n", vha->host_no, mb[1], mb[2], mb[3])); 550 "%04x\n", vha->host_no, mb[1], mb[2], mb[3]));
551 else 551 if (ha->notify_dcbx_comp)
552 complete(&ha->dcbx_comp);
553
554 } else
552 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE " 555 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE "
553 "received.\n", vha->host_no)); 556 "received.\n", vha->host_no));
554 557
@@ -918,12 +921,15 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
918 QLA_LOGIO_LOGIN_RETRIED : 0; 921 QLA_LOGIO_LOGIN_RETRIED : 0;
919 if (mbx->entry_status) { 922 if (mbx->entry_status) {
920 DEBUG2(printk(KERN_WARNING 923 DEBUG2(printk(KERN_WARNING
921 "scsi(%ld:%x): Async-%s error entry - entry-status=%x " 924 "scsi(%ld:%x): Async-%s error entry - portid=%02x%02x%02x "
922 "status=%x state-flag=%x status-flags=%x.\n", 925 "entry-status=%x status=%x state-flag=%x "
926 "status-flags=%x.\n",
923 fcport->vha->host_no, sp->handle, type, 927 fcport->vha->host_no, sp->handle, type,
924 mbx->entry_status, le16_to_cpu(mbx->status), 928 fcport->d_id.b.domain, fcport->d_id.b.area,
925 le16_to_cpu(mbx->state_flags), 929 fcport->d_id.b.al_pa, mbx->entry_status,
930 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
926 le16_to_cpu(mbx->status_flags))); 931 le16_to_cpu(mbx->status_flags)));
932
927 DEBUG2(qla2x00_dump_buffer((uint8_t *)mbx, sizeof(*mbx))); 933 DEBUG2(qla2x00_dump_buffer((uint8_t *)mbx, sizeof(*mbx)));
928 934
929 goto logio_done; 935 goto logio_done;
@@ -935,16 +941,18 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
935 status = 0; 941 status = 0;
936 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 942 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
937 DEBUG2(printk(KERN_DEBUG 943 DEBUG2(printk(KERN_DEBUG
938 "scsi(%ld:%x): Async-%s complete - mbx1=%x.\n", 944 "scsi(%ld:%x): Async-%s complete - portid=%02x%02x%02x "
945 "mbx1=%x.\n",
939 fcport->vha->host_no, sp->handle, type, 946 fcport->vha->host_no, sp->handle, type,
940 le16_to_cpu(mbx->mb1))); 947 fcport->d_id.b.domain, fcport->d_id.b.area,
948 fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1)));
941 949
942 data[0] = MBS_COMMAND_COMPLETE; 950 data[0] = MBS_COMMAND_COMPLETE;
943 if (ctx->type == SRB_LOGIN_CMD) { 951 if (ctx->type == SRB_LOGIN_CMD) {
944 fcport->port_type = FCT_TARGET; 952 fcport->port_type = FCT_TARGET;
945 if (le16_to_cpu(mbx->mb1) & BIT_0) 953 if (le16_to_cpu(mbx->mb1) & BIT_0)
946 fcport->port_type = FCT_INITIATOR; 954 fcport->port_type = FCT_INITIATOR;
947 if (le16_to_cpu(mbx->mb1) & BIT_1) 955 else if (le16_to_cpu(mbx->mb1) & BIT_1)
948 fcport->flags |= FCF_FCP2_DEVICE; 956 fcport->flags |= FCF_FCP2_DEVICE;
949 } 957 }
950 goto logio_done; 958 goto logio_done;
@@ -963,9 +971,10 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
963 } 971 }
964 972
965 DEBUG2(printk(KERN_WARNING 973 DEBUG2(printk(KERN_WARNING
966 "scsi(%ld:%x): Async-%s failed - status=%x mb0=%x mb1=%x mb2=%x " 974 "scsi(%ld:%x): Async-%s failed - portid=%02x%02x%02x status=%x "
967 "mb6=%x mb7=%x.\n", 975 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n",
968 fcport->vha->host_no, sp->handle, type, status, 976 fcport->vha->host_no, sp->handle, type, fcport->d_id.b.domain,
977 fcport->d_id.b.area, fcport->d_id.b.al_pa, status,
969 le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 978 le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
970 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 979 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
971 le16_to_cpu(mbx->mb7))); 980 le16_to_cpu(mbx->mb7)));
@@ -975,6 +984,86 @@ logio_done:
975} 984}
976 985
977static void 986static void
987qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
988 sts_entry_t *pkt, int iocb_type)
989{
990 const char func[] = "CT_IOCB";
991 const char *type;
992 struct qla_hw_data *ha = vha->hw;
993 srb_t *sp;
994 struct srb_ctx *sp_bsg;
995 struct fc_bsg_job *bsg_job;
996 uint16_t comp_status;
997
998 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
999 if (!sp)
1000 return;
1001
1002 sp_bsg = sp->ctx;
1003 bsg_job = sp_bsg->u.bsg_job;
1004
1005 type = NULL;
1006 switch (sp_bsg->type) {
1007 case SRB_CT_CMD:
1008 type = "ct pass-through";
1009 break;
1010 default:
1011 qla_printk(KERN_WARNING, ha,
1012 "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
1013 sp_bsg->type);
1014 return;
1015 }
1016
1017 comp_status = le16_to_cpu(pkt->comp_status);
1018
1019 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1020 * fc payload to the caller
1021 */
1022 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1023 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1024
1025 if (comp_status != CS_COMPLETE) {
1026 if (comp_status == CS_DATA_UNDERRUN) {
1027 bsg_job->reply->result = DID_OK << 16;
1028 bsg_job->reply->reply_payload_rcv_len =
1029 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1030
1031 DEBUG2(qla_printk(KERN_WARNING, ha,
1032 "scsi(%ld): CT pass-through-%s error "
1033 "comp_status-status=0x%x total_byte = 0x%x.\n",
1034 vha->host_no, type, comp_status,
1035 bsg_job->reply->reply_payload_rcv_len));
1036 } else {
1037 DEBUG2(qla_printk(KERN_WARNING, ha,
1038 "scsi(%ld): CT pass-through-%s error "
1039 "comp_status-status=0x%x.\n",
1040 vha->host_no, type, comp_status));
1041 bsg_job->reply->result = DID_ERROR << 16;
1042 bsg_job->reply->reply_payload_rcv_len = 0;
1043 }
1044 DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt)));
1045 } else {
1046 bsg_job->reply->result = DID_OK << 16;;
1047 bsg_job->reply->reply_payload_rcv_len =
1048 bsg_job->reply_payload.payload_len;
1049 bsg_job->reply_len = 0;
1050 }
1051
1052 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1053 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1054
1055 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1056 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1057
1058 if (sp_bsg->type == SRB_ELS_CMD_HST || sp_bsg->type == SRB_CT_CMD)
1059 kfree(sp->fcport);
1060
1061 kfree(sp->ctx);
1062 mempool_free(sp, ha->srb_mempool);
1063 bsg_job->job_done(bsg_job);
1064}
1065
1066static void
978qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1067qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
979 struct sts_entry_24xx *pkt, int iocb_type) 1068 struct sts_entry_24xx *pkt, int iocb_type)
980{ 1069{
@@ -1096,9 +1185,11 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1096 QLA_LOGIO_LOGIN_RETRIED : 0; 1185 QLA_LOGIO_LOGIN_RETRIED : 0;
1097 if (logio->entry_status) { 1186 if (logio->entry_status) {
1098 DEBUG2(printk(KERN_WARNING 1187 DEBUG2(printk(KERN_WARNING
1099 "scsi(%ld:%x): Async-%s error entry - entry-status=%x.\n", 1188 "scsi(%ld:%x): Async-%s error entry - "
1189 "portid=%02x%02x%02x entry-status=%x.\n",
1100 fcport->vha->host_no, sp->handle, type, 1190 fcport->vha->host_no, sp->handle, type,
1101 logio->entry_status)); 1191 fcport->d_id.b.domain, fcport->d_id.b.area,
1192 fcport->d_id.b.al_pa, logio->entry_status));
1102 DEBUG2(qla2x00_dump_buffer((uint8_t *)logio, sizeof(*logio))); 1193 DEBUG2(qla2x00_dump_buffer((uint8_t *)logio, sizeof(*logio)));
1103 1194
1104 goto logio_done; 1195 goto logio_done;
@@ -1106,8 +1197,11 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1106 1197
1107 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1198 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1108 DEBUG2(printk(KERN_DEBUG 1199 DEBUG2(printk(KERN_DEBUG
1109 "scsi(%ld:%x): Async-%s complete - iop0=%x.\n", 1200 "scsi(%ld:%x): Async-%s complete - portid=%02x%02x%02x "
1201 "iop0=%x.\n",
1110 fcport->vha->host_no, sp->handle, type, 1202 fcport->vha->host_no, sp->handle, type,
1203 fcport->d_id.b.domain, fcport->d_id.b.area,
1204 fcport->d_id.b.al_pa,
1111 le32_to_cpu(logio->io_parameter[0]))); 1205 le32_to_cpu(logio->io_parameter[0])));
1112 1206
1113 data[0] = MBS_COMMAND_COMPLETE; 1207 data[0] = MBS_COMMAND_COMPLETE;
@@ -1119,9 +1213,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1119 fcport->port_type = FCT_TARGET; 1213 fcport->port_type = FCT_TARGET;
1120 if (iop[0] & BIT_8) 1214 if (iop[0] & BIT_8)
1121 fcport->flags |= FCF_FCP2_DEVICE; 1215 fcport->flags |= FCF_FCP2_DEVICE;
1122 } 1216 } else if (iop[0] & BIT_5)
1123 if (iop[0] & BIT_5)
1124 fcport->port_type = FCT_INITIATOR; 1217 fcport->port_type = FCT_INITIATOR;
1218
1125 if (logio->io_parameter[7] || logio->io_parameter[8]) 1219 if (logio->io_parameter[7] || logio->io_parameter[8])
1126 fcport->supported_classes |= FC_COS_CLASS2; 1220 fcport->supported_classes |= FC_COS_CLASS2;
1127 if (logio->io_parameter[9] || logio->io_parameter[10]) 1221 if (logio->io_parameter[9] || logio->io_parameter[10])
@@ -1152,8 +1246,10 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1152 } 1246 }
1153 1247
1154 DEBUG2(printk(KERN_WARNING 1248 DEBUG2(printk(KERN_WARNING
1155 "scsi(%ld:%x): Async-%s failed - comp=%x iop0=%x iop1=%x.\n", 1249 "scsi(%ld:%x): Async-%s failed - portid=%02x%02x%02x comp=%x "
1156 fcport->vha->host_no, sp->handle, type, 1250 "iop0=%x iop1=%x.\n",
1251 fcport->vha->host_no, sp->handle, type, fcport->d_id.b.domain,
1252 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1157 le16_to_cpu(logio->comp_status), 1253 le16_to_cpu(logio->comp_status),
1158 le32_to_cpu(logio->io_parameter[0]), 1254 le32_to_cpu(logio->io_parameter[0]),
1159 le32_to_cpu(logio->io_parameter[1]))); 1255 le32_to_cpu(logio->io_parameter[1])));
@@ -1222,39 +1318,6 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1222 iocb->done(sp); 1318 iocb->done(sp);
1223} 1319}
1224 1320
1225static void
1226qla24xx_marker_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1227 struct mrk_entry_24xx *mrk)
1228{
1229 const char func[] = "MRK-IOCB";
1230 const char *type;
1231 fc_port_t *fcport;
1232 srb_t *sp;
1233 struct srb_iocb *iocb;
1234 struct srb_ctx *ctx;
1235 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)mrk;
1236
1237 sp = qla2x00_get_sp_from_handle(vha, func, req, mrk);
1238 if (!sp)
1239 return;
1240
1241 ctx = sp->ctx;
1242 iocb = ctx->u.iocb_cmd;
1243 type = ctx->name;
1244 fcport = sp->fcport;
1245
1246 if (sts->entry_status) {
1247 iocb->u.marker.data = 1;
1248 DEBUG2(printk(KERN_WARNING
1249 "scsi(%ld:%x): Async-%s error entry - entry-status=%x.\n",
1250 fcport->vha->host_no, sp->handle, type,
1251 sts->entry_status));
1252 DEBUG2(qla2x00_dump_buffer((uint8_t *)mrk, sizeof(*sts)));
1253 }
1254
1255 iocb->done(sp);
1256}
1257
1258/** 1321/**
1259 * qla2x00_process_response_queue() - Process response queue entries. 1322 * qla2x00_process_response_queue() - Process response queue entries.
1260 * @ha: SCSI driver HA context 1323 * @ha: SCSI driver HA context
@@ -1320,6 +1383,9 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
1320 qla2x00_mbx_iocb_entry(vha, rsp->req, 1383 qla2x00_mbx_iocb_entry(vha, rsp->req,
1321 (struct mbx_entry *)pkt); 1384 (struct mbx_entry *)pkt);
1322 break; 1385 break;
1386 case CT_IOCB_TYPE:
1387 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1388 break;
1323 default: 1389 default:
1324 /* Type Not Supported. */ 1390 /* Type Not Supported. */
1325 DEBUG4(printk(KERN_WARNING 1391 DEBUG4(printk(KERN_WARNING
@@ -1337,8 +1403,9 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
1337} 1403}
1338 1404
1339static inline void 1405static inline void
1340qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len, 1406
1341 struct rsp_que *rsp) 1407qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1408 uint32_t sense_len, struct rsp_que *rsp)
1342{ 1409{
1343 struct scsi_cmnd *cp = sp->cmd; 1410 struct scsi_cmnd *cp = sp->cmd;
1344 1411
@@ -1347,8 +1414,8 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len,
1347 1414
1348 sp->request_sense_length = sense_len; 1415 sp->request_sense_length = sense_len;
1349 sp->request_sense_ptr = cp->sense_buffer; 1416 sp->request_sense_ptr = cp->sense_buffer;
1350 if (sp->request_sense_length > 32) 1417 if (sp->request_sense_length > par_sense_len)
1351 sense_len = 32; 1418 sense_len = par_sense_len;
1352 1419
1353 memcpy(cp->sense_buffer, sense_data, sense_len); 1420 memcpy(cp->sense_buffer, sense_data, sense_len);
1354 1421
@@ -1455,7 +1522,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1455 uint16_t ox_id; 1522 uint16_t ox_id;
1456 uint8_t lscsi_status; 1523 uint8_t lscsi_status;
1457 int32_t resid; 1524 int32_t resid;
1458 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len; 1525 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
1526 fw_resid_len;
1459 uint8_t *rsp_info, *sense_data; 1527 uint8_t *rsp_info, *sense_data;
1460 struct qla_hw_data *ha = vha->hw; 1528 struct qla_hw_data *ha = vha->hw;
1461 uint32_t handle; 1529 uint32_t handle;
@@ -1513,7 +1581,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1513 fcport = sp->fcport; 1581 fcport = sp->fcport;
1514 1582
1515 ox_id = 0; 1583 ox_id = 0;
1516 sense_len = rsp_info_len = resid_len = fw_resid_len = 0; 1584 sense_len = par_sense_len = rsp_info_len = resid_len =
1585 fw_resid_len = 0;
1517 if (IS_FWI2_CAPABLE(ha)) { 1586 if (IS_FWI2_CAPABLE(ha)) {
1518 if (scsi_status & SS_SENSE_LEN_VALID) 1587 if (scsi_status & SS_SENSE_LEN_VALID)
1519 sense_len = le32_to_cpu(sts24->sense_len); 1588 sense_len = le32_to_cpu(sts24->sense_len);
@@ -1527,6 +1596,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1527 sense_data = sts24->data; 1596 sense_data = sts24->data;
1528 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 1597 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
1529 ox_id = le16_to_cpu(sts24->ox_id); 1598 ox_id = le16_to_cpu(sts24->ox_id);
1599 par_sense_len = sizeof(sts24->data);
1530 } else { 1600 } else {
1531 if (scsi_status & SS_SENSE_LEN_VALID) 1601 if (scsi_status & SS_SENSE_LEN_VALID)
1532 sense_len = le16_to_cpu(sts->req_sense_length); 1602 sense_len = le16_to_cpu(sts->req_sense_length);
@@ -1535,13 +1605,16 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1535 resid_len = le32_to_cpu(sts->residual_length); 1605 resid_len = le32_to_cpu(sts->residual_length);
1536 rsp_info = sts->rsp_info; 1606 rsp_info = sts->rsp_info;
1537 sense_data = sts->req_sense_data; 1607 sense_data = sts->req_sense_data;
1608 par_sense_len = sizeof(sts->req_sense_data);
1538 } 1609 }
1539 1610
1540 /* Check for any FCP transport errors. */ 1611 /* Check for any FCP transport errors. */
1541 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 1612 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
1542 /* Sense data lies beyond any FCP RESPONSE data. */ 1613 /* Sense data lies beyond any FCP RESPONSE data. */
1543 if (IS_FWI2_CAPABLE(ha)) 1614 if (IS_FWI2_CAPABLE(ha)) {
1544 sense_data += rsp_info_len; 1615 sense_data += rsp_info_len;
1616 par_sense_len -= rsp_info_len;
1617 }
1545 if (rsp_info_len > 3 && rsp_info[3]) { 1618 if (rsp_info_len > 3 && rsp_info[3]) {
1546 DEBUG2(qla_printk(KERN_INFO, ha, 1619 DEBUG2(qla_printk(KERN_INFO, ha,
1547 "scsi(%ld:%d:%d): FCP I/O protocol failure " 1620 "scsi(%ld:%d:%d): FCP I/O protocol failure "
@@ -1601,7 +1674,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1601 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1674 if (!(scsi_status & SS_SENSE_LEN_VALID))
1602 break; 1675 break;
1603 1676
1604 qla2x00_handle_sense(sp, sense_data, sense_len, rsp); 1677 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
1678 rsp);
1605 break; 1679 break;
1606 1680
1607 case CS_DATA_UNDERRUN: 1681 case CS_DATA_UNDERRUN:
@@ -1665,7 +1739,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1665 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1739 if (!(scsi_status & SS_SENSE_LEN_VALID))
1666 break; 1740 break;
1667 1741
1668 qla2x00_handle_sense(sp, sense_data, sense_len, rsp); 1742 qla2x00_handle_sense(sp, sense_data, par_sense_len,
1743 sense_len, rsp);
1669 } 1744 }
1670 break; 1745 break;
1671 1746
@@ -1700,6 +1775,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1700 break; 1775 break;
1701 1776
1702 case CS_RESET: 1777 case CS_RESET:
1778 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1779 break;
1780
1703 case CS_ABORTED: 1781 case CS_ABORTED:
1704 cp->result = DID_RESET << 16; 1782 cp->result = DID_RESET << 16;
1705 break; 1783 break;
@@ -1926,10 +2004,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1926 qla24xx_tm_iocb_entry(vha, rsp->req, 2004 qla24xx_tm_iocb_entry(vha, rsp->req,
1927 (struct tsk_mgmt_entry *)pkt); 2005 (struct tsk_mgmt_entry *)pkt);
1928 break; 2006 break;
1929 case MARKER_TYPE:
1930 qla24xx_marker_iocb_entry(vha, rsp->req,
1931 (struct mrk_entry_24xx *)pkt);
1932 break;
1933 case CT_IOCB_TYPE: 2007 case CT_IOCB_TYPE:
1934 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 2008 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1935 clear_bit(MBX_INTERRUPT, &vha->hw->mbx_cmd_flags); 2009 clear_bit(MBX_INTERRUPT, &vha->hw->mbx_cmd_flags);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index f3650d0434ca..6009b0c69488 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -37,7 +37,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
37 device_reg_t __iomem *reg; 37 device_reg_t __iomem *reg;
38 uint8_t abort_active; 38 uint8_t abort_active;
39 uint8_t io_lock_on; 39 uint8_t io_lock_on;
40 uint16_t command; 40 uint16_t command = 0;
41 uint16_t *iptr; 41 uint16_t *iptr;
42 uint16_t __iomem *optr; 42 uint16_t __iomem *optr;
43 uint32_t cnt; 43 uint32_t cnt;
@@ -83,6 +83,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
83 return QLA_FUNCTION_TIMEOUT; 83 return QLA_FUNCTION_TIMEOUT;
84 } 84 }
85 85
86 if (IS_QLA82XX(ha) && ha->flags.fw_hung) {
87 /* Setting Link-Down error */
88 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
89 rval = QLA_FUNCTION_FAILED;
90 goto premature_exit;
91 }
92
86 ha->flags.mbox_busy = 1; 93 ha->flags.mbox_busy = 1;
87 /* Save mailbox command for debug */ 94 /* Save mailbox command for debug */
88 ha->mcp = mcp; 95 ha->mcp = mcp;
@@ -151,7 +158,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
151 DEBUG2_3_11(printk(KERN_INFO 158 DEBUG2_3_11(printk(KERN_INFO
152 "%s(%ld): Pending Mailbox timeout. " 159 "%s(%ld): Pending Mailbox timeout. "
153 "Exiting.\n", __func__, base_vha->host_no)); 160 "Exiting.\n", __func__, base_vha->host_no));
154 return QLA_FUNCTION_TIMEOUT; 161 rval = QLA_FUNCTION_TIMEOUT;
162 goto premature_exit;
155 } 163 }
156 WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING); 164 WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
157 } else if (IS_FWI2_CAPABLE(ha)) 165 } else if (IS_FWI2_CAPABLE(ha))
@@ -176,7 +184,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
176 DEBUG2_3_11(printk(KERN_INFO 184 DEBUG2_3_11(printk(KERN_INFO
177 "%s(%ld): Pending Mailbox timeout. " 185 "%s(%ld): Pending Mailbox timeout. "
178 "Exiting.\n", __func__, base_vha->host_no)); 186 "Exiting.\n", __func__, base_vha->host_no));
179 return QLA_FUNCTION_TIMEOUT; 187 rval = QLA_FUNCTION_TIMEOUT;
188 goto premature_exit;
180 } 189 }
181 WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING); 190 WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
182 } else if (IS_FWI2_CAPABLE(ha)) 191 } else if (IS_FWI2_CAPABLE(ha))
@@ -214,6 +223,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
214 ha->flags.mbox_int = 0; 223 ha->flags.mbox_int = 0;
215 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 224 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
216 225
226 if (IS_QLA82XX(ha) && ha->flags.fw_hung) {
227 ha->flags.mbox_busy = 0;
228 /* Setting Link-Down error */
229 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
230 ha->mcp = NULL;
231 rval = QLA_FUNCTION_FAILED;
232 goto premature_exit;
233 }
234
217 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) 235 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE)
218 rval = QLA_FUNCTION_FAILED; 236 rval = QLA_FUNCTION_FAILED;
219 237
@@ -279,35 +297,51 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
279 DEBUG2_3_11(printk("%s(%ld): timeout schedule " 297 DEBUG2_3_11(printk("%s(%ld): timeout schedule "
280 "isp_abort_needed.\n", __func__, 298 "isp_abort_needed.\n", __func__,
281 base_vha->host_no)); 299 base_vha->host_no));
282 qla_printk(KERN_WARNING, ha, 300
283 "Mailbox command timeout occurred. Scheduling ISP " 301 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
284 "abort. eeh_busy: 0x%x\n", ha->flags.eeh_busy); 302 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
285 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 303 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
286 qla2xxx_wake_dpc(vha); 304
305 qla_printk(KERN_WARNING, ha,
306 "Mailbox command timeout occured. "
307 "Scheduling ISP " "abort. eeh_busy: 0x%x\n",
308 ha->flags.eeh_busy);
309 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
310 qla2xxx_wake_dpc(vha);
311 }
287 } else if (!abort_active) { 312 } else if (!abort_active) {
288 /* call abort directly since we are in the DPC thread */ 313 /* call abort directly since we are in the DPC thread */
289 DEBUG(printk("%s(%ld): timeout calling abort_isp\n", 314 DEBUG(printk("%s(%ld): timeout calling abort_isp\n",
290 __func__, base_vha->host_no)); 315 __func__, base_vha->host_no));
291 DEBUG2_3_11(printk("%s(%ld): timeout calling " 316 DEBUG2_3_11(printk("%s(%ld): timeout calling "
292 "abort_isp\n", __func__, base_vha->host_no)); 317 "abort_isp\n", __func__, base_vha->host_no));
293 qla_printk(KERN_WARNING, ha, 318
294 "Mailbox command timeout occurred. Issuing ISP " 319 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
295 "abort.\n"); 320 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
296 321 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
297 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 322
298 clear_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 323 qla_printk(KERN_WARNING, ha,
299 if (ha->isp_ops->abort_isp(base_vha)) { 324 "Mailbox command timeout occured. "
300 /* Failed. retry later. */ 325 "Issuing ISP abort.\n");
301 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 326
327 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
328 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
329 if (ha->isp_ops->abort_isp(vha)) {
330 /* Failed. retry later. */
331 set_bit(ISP_ABORT_NEEDED,
332 &vha->dpc_flags);
333 }
334 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
335 DEBUG(printk("%s(%ld): finished abort_isp\n",
336 __func__, vha->host_no));
337 DEBUG2_3_11(printk(
338 "%s(%ld): finished abort_isp\n",
339 __func__, vha->host_no));
302 } 340 }
303 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
304 DEBUG(printk("%s(%ld): finished abort_isp\n", __func__,
305 base_vha->host_no));
306 DEBUG2_3_11(printk("%s(%ld): finished abort_isp\n",
307 __func__, base_vha->host_no));
308 } 341 }
309 } 342 }
310 343
344premature_exit:
311 /* Allow next mbx cmd to come in. */ 345 /* Allow next mbx cmd to come in. */
312 complete(&ha->mbx_cmd_comp); 346 complete(&ha->mbx_cmd_comp);
313 347
@@ -866,8 +900,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
866 900
867 l = l; 901 l = l;
868 vha = fcport->vha; 902 vha = fcport->vha;
869 req = vha->hw->req_q_map[tag]; 903 req = vha->hw->req_q_map[0];
870 rsp = vha->hw->rsp_q_map[tag]; 904 rsp = req->rsp;
871 mcp->mb[0] = MBC_ABORT_TARGET; 905 mcp->mb[0] = MBC_ABORT_TARGET;
872 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; 906 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
873 if (HAS_EXTENDED_IDS(vha->hw)) { 907 if (HAS_EXTENDED_IDS(vha->hw)) {
@@ -915,8 +949,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
915 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no)); 949 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
916 950
917 vha = fcport->vha; 951 vha = fcport->vha;
918 req = vha->hw->req_q_map[tag]; 952 req = vha->hw->req_q_map[0];
919 rsp = vha->hw->rsp_q_map[tag]; 953 rsp = req->rsp;
920 mcp->mb[0] = MBC_LUN_RESET; 954 mcp->mb[0] = MBC_LUN_RESET;
921 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 955 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
922 if (HAS_EXTENDED_IDS(vha->hw)) 956 if (HAS_EXTENDED_IDS(vha->hw))
@@ -3950,6 +3984,72 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
3950} 3984}
3951 3985
3952int 3986int
3987qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
3988{
3989 int rval;
3990 mbx_cmd_t mc;
3991 mbx_cmd_t *mcp = &mc;
3992 struct qla_hw_data *ha = vha->hw;
3993
3994 DEBUG11(printk(KERN_INFO
3995 "%s(%ld): entered.\n", __func__, vha->host_no));
3996
3997 if (!IS_QLA81XX(ha))
3998 return QLA_FUNCTION_FAILED;
3999 mcp->mb[0] = MBC_GET_PORT_CONFIG;
4000 mcp->out_mb = MBX_0;
4001 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4002 mcp->tov = MBX_TOV_SECONDS;
4003 mcp->flags = 0;
4004
4005 rval = qla2x00_mailbox_command(vha, mcp);
4006
4007 if (rval != QLA_SUCCESS) {
4008 DEBUG2_3_11(printk(KERN_WARNING
4009 "%s(%ld): failed=%x (%x).\n", __func__,
4010 vha->host_no, rval, mcp->mb[0]));
4011 } else {
4012 /* Copy all bits to preserve original value */
4013 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
4014
4015 DEBUG11(printk(KERN_INFO
4016 "%s(%ld): done.\n", __func__, vha->host_no));
4017 }
4018 return rval;
4019}
4020
4021int
4022qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4023{
4024 int rval;
4025 mbx_cmd_t mc;
4026 mbx_cmd_t *mcp = &mc;
4027
4028 DEBUG11(printk(KERN_INFO
4029 "%s(%ld): entered.\n", __func__, vha->host_no));
4030
4031 mcp->mb[0] = MBC_SET_PORT_CONFIG;
4032 /* Copy all bits to preserve original setting */
4033 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
4034 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4035 mcp->in_mb = MBX_0;
4036 mcp->tov = MBX_TOV_SECONDS;
4037 mcp->flags = 0;
4038 rval = qla2x00_mailbox_command(vha, mcp);
4039
4040 if (rval != QLA_SUCCESS) {
4041 DEBUG2_3_11(printk(KERN_WARNING
4042 "%s(%ld): failed=%x (%x).\n", __func__,
4043 vha->host_no, rval, mcp->mb[0]));
4044 } else
4045 DEBUG11(printk(KERN_INFO
4046 "%s(%ld): done.\n", __func__, vha->host_no));
4047
4048 return rval;
4049}
4050
4051
4052int
3953qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, 4053qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
3954 uint16_t *mb) 4054 uint16_t *mb)
3955{ 4055{
@@ -4011,7 +4111,7 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
4011 "%s(%ld): entered.\n", __func__, vha->host_no)); 4111 "%s(%ld): entered.\n", __func__, vha->host_no));
4012 4112
4013 memset(mcp, 0, sizeof(mbx_cmd_t)); 4113 memset(mcp, 0, sizeof(mbx_cmd_t));
4014 mcp->mb[0] = MBC_TOGGLE_INTR; 4114 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
4015 mcp->mb[1] = 1; 4115 mcp->mb[1] = 1;
4016 4116
4017 mcp->out_mb = MBX_1|MBX_0; 4117 mcp->out_mb = MBX_1|MBX_0;
@@ -4047,7 +4147,7 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
4047 "%s(%ld): entered.\n", __func__, vha->host_no)); 4147 "%s(%ld): entered.\n", __func__, vha->host_no));
4048 4148
4049 memset(mcp, 0, sizeof(mbx_cmd_t)); 4149 memset(mcp, 0, sizeof(mbx_cmd_t));
4050 mcp->mb[0] = MBC_TOGGLE_INTR; 4150 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
4051 mcp->mb[1] = 0; 4151 mcp->mb[1] = 0;
4052 4152
4053 mcp->out_mb = MBX_1|MBX_0; 4153 mcp->out_mb = MBX_1|MBX_0;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 8220e7b9799b..987c5b0ca78e 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -136,7 +136,8 @@ qla24xx_enable_vp(scsi_qla_host_t *vha)
136 136
137 /* Check if physical ha port is Up */ 137 /* Check if physical ha port is Up */
138 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN || 138 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
139 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 139 atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
140 !(ha->current_topology & ISP_CFG_F)) {
140 vha->vp_err_state = VP_ERR_PORTDWN; 141 vha->vp_err_state = VP_ERR_PORTDWN;
141 fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN); 142 fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
142 goto enable_failed; 143 goto enable_failed;
@@ -398,7 +399,10 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
398 host->can_queue = base_vha->req->length + 128; 399 host->can_queue = base_vha->req->length + 128;
399 host->this_id = 255; 400 host->this_id = 255;
400 host->cmd_per_lun = 3; 401 host->cmd_per_lun = 3;
401 host->max_cmd_len = MAX_CMDSZ; 402 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif)
403 host->max_cmd_len = 32;
404 else
405 host->max_cmd_len = MAX_CMDSZ;
402 host->max_channel = MAX_BUSES - 1; 406 host->max_channel = MAX_BUSES - 1;
403 host->max_lun = MAX_LUNS; 407 host->max_lun = MAX_LUNS;
404 host->unique_id = host->host_no; 408 host->unique_id = host->host_no;
@@ -481,7 +485,7 @@ qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
481 return ret; 485 return ret;
482} 486}
483 487
484int 488static int
485qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) 489qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
486{ 490{
487 int ret = -1; 491 int ret = -1;
@@ -496,23 +500,6 @@ qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
496 return ret; 500 return ret;
497} 501}
498 502
499int qla25xx_update_req_que(struct scsi_qla_host *vha, uint8_t que, uint8_t qos)
500{
501 int ret = 0;
502 struct qla_hw_data *ha = vha->hw;
503 struct req_que *req = ha->req_q_map[que];
504
505 req->options |= BIT_3;
506 req->qos = qos;
507 ret = qla25xx_init_req_que(vha, req);
508 if (ret != QLA_SUCCESS)
509 DEBUG2_17(printk(KERN_WARNING "%s failed\n", __func__));
510 /* restore options bit */
511 req->options &= ~BIT_3;
512 return ret;
513}
514
515
516/* Delete all queues for a given vhost */ 503/* Delete all queues for a given vhost */
517int 504int
518qla25xx_delete_queues(struct scsi_qla_host *vha) 505qla25xx_delete_queues(struct scsi_qla_host *vha)
@@ -739,35 +726,3 @@ que_failed:
739failed: 726failed:
740 return 0; 727 return 0;
741} 728}
742
743int
744qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos)
745{
746 uint16_t options = 0;
747 uint8_t ret = 0;
748 struct qla_hw_data *ha = vha->hw;
749 struct rsp_que *rsp;
750
751 options |= BIT_1;
752 ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0, -1);
753 if (!ret) {
754 qla_printk(KERN_WARNING, ha, "Response Que create failed\n");
755 return ret;
756 } else
757 qla_printk(KERN_INFO, ha, "Response Que:%d created.\n", ret);
758 rsp = ha->rsp_q_map[ret];
759
760 options = 0;
761 if (qos & BIT_7)
762 options |= BIT_8;
763 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, ret,
764 qos & ~BIT_7);
765 if (ret) {
766 vha->req = ha->req_q_map[ret];
767 qla_printk(KERN_INFO, ha, "Request Que:%d created.\n", ret);
768 } else
769 qla_printk(KERN_WARNING, ha, "Request Que create failed\n");
770 rsp->req = ha->req_q_map[ret];
771
772 return ret;
773}
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index ff562de0e8e7..915b77a6e193 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -19,6 +19,7 @@
19#define QLA82XX_PCI_OCM0_2M (0xc0000) 19#define QLA82XX_PCI_OCM0_2M (0xc0000)
20#define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800) 20#define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800)
21#define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) 21#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
22#define BLOCK_PROTECT_BITS 0x0F
22 23
23/* CRB window related */ 24/* CRB window related */
24#define CRB_BLK(off) ((off >> 20) & 0x3f) 25#define CRB_BLK(off) ((off >> 20) & 0x3f)
@@ -796,179 +797,6 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
796 return ret; 797 return ret;
797} 798}
798 799
799int
800qla82xx_wrmem(struct qla_hw_data *ha, u64 off, void *data, int size)
801{
802 int i, j, ret = 0, loop, sz[2], off0;
803 u32 temp;
804 u64 off8, mem_crb, tmpw, word[2] = {0, 0};
805#define MAX_CTL_CHECK 1000
806 /*
807 * If not MN, go check for MS or invalid.
808 */
809 if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) {
810 mem_crb = QLA82XX_CRB_QDR_NET;
811 } else {
812 mem_crb = QLA82XX_CRB_DDR_NET;
813 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
814 return qla82xx_pci_mem_write_direct(ha, off,
815 data, size);
816 }
817
818 off8 = off & 0xfffffff8;
819 off0 = off & 0x7;
820 sz[0] = (size < (8 - off0)) ? size : (8 - off0);
821 sz[1] = size - sz[0];
822 loop = ((off0 + size - 1) >> 3) + 1;
823
824 if ((size != 8) || (off0 != 0)) {
825 for (i = 0; i < loop; i++) {
826 if (qla82xx_rdmem(ha, off8 + (i << 3), &word[i], 8))
827 return -1;
828 }
829 }
830
831 switch (size) {
832 case 1:
833 tmpw = *((u8 *)data);
834 break;
835 case 2:
836 tmpw = *((u16 *)data);
837 break;
838 case 4:
839 tmpw = *((u32 *)data);
840 break;
841 case 8:
842 default:
843 tmpw = *((u64 *)data);
844 break;
845 }
846
847 word[0] &= ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
848 word[0] |= tmpw << (off0 * 8);
849
850 if (loop == 2) {
851 word[1] &= ~(~0ULL << (sz[1] * 8));
852 word[1] |= tmpw >> (sz[0] * 8);
853 }
854
855 for (i = 0; i < loop; i++) {
856 temp = off8 + (i << 3);
857 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
858 temp = 0;
859 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
860 temp = word[i] & 0xffffffff;
861 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
862 temp = (word[i] >> 32) & 0xffffffff;
863 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
864 temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
865 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
866 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
867 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
868
869 for (j = 0; j < MAX_CTL_CHECK; j++) {
870 temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
871 if ((temp & MIU_TA_CTL_BUSY) == 0)
872 break;
873 }
874
875 if (j >= MAX_CTL_CHECK) {
876 qla_printk(KERN_WARNING, ha,
877 "%s: Fail to write through agent\n",
878 QLA2XXX_DRIVER_NAME);
879 ret = -1;
880 break;
881 }
882 }
883 return ret;
884}
885
886int
887qla82xx_rdmem(struct qla_hw_data *ha, u64 off, void *data, int size)
888{
889 int i, j = 0, k, start, end, loop, sz[2], off0[2];
890 u32 temp;
891 u64 off8, val, mem_crb, word[2] = {0, 0};
892#define MAX_CTL_CHECK 1000
893
894 /*
895 * If not MN, go check for MS or invalid.
896 */
897 if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
898 mem_crb = QLA82XX_CRB_QDR_NET;
899 else {
900 mem_crb = QLA82XX_CRB_DDR_NET;
901 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
902 return qla82xx_pci_mem_read_direct(ha, off,
903 data, size);
904 }
905
906 off8 = off & 0xfffffff8;
907 off0[0] = off & 0x7;
908 off0[1] = 0;
909 sz[0] = (size < (8 - off0[0])) ? size : (8 - off0[0]);
910 sz[1] = size - sz[0];
911 loop = ((off0[0] + size - 1) >> 3) + 1;
912
913 for (i = 0; i < loop; i++) {
914 temp = off8 + (i << 3);
915 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
916 temp = 0;
917 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
918 temp = MIU_TA_CTL_ENABLE;
919 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
920 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
921 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
922
923 for (j = 0; j < MAX_CTL_CHECK; j++) {
924 temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
925 if ((temp & MIU_TA_CTL_BUSY) == 0)
926 break;
927 }
928
929 if (j >= MAX_CTL_CHECK) {
930 qla_printk(KERN_INFO, ha,
931 "%s: Fail to read through agent\n",
932 QLA2XXX_DRIVER_NAME);
933 break;
934 }
935
936 start = off0[i] >> 2;
937 end = (off0[i] + sz[i] - 1) >> 2;
938 for (k = start; k <= end; k++) {
939 temp = qla82xx_rd_32(ha,
940 mem_crb + MIU_TEST_AGT_RDDATA(k));
941 word[i] |= ((u64)temp << (32 * k));
942 }
943 }
944
945 if (j >= MAX_CTL_CHECK)
946 return -1;
947
948 if (sz[0] == 8) {
949 val = word[0];
950 } else {
951 val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
952 ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
953 }
954
955 switch (size) {
956 case 1:
957 *(u8 *)data = val;
958 break;
959 case 2:
960 *(u16 *)data = val;
961 break;
962 case 4:
963 *(u32 *)data = val;
964 break;
965 case 8:
966 *(u64 *)data = val;
967 break;
968 }
969 return 0;
970}
971
972#define MTU_FUDGE_FACTOR 100 800#define MTU_FUDGE_FACTOR 100
973unsigned long qla82xx_decode_crb_addr(unsigned long addr) 801unsigned long qla82xx_decode_crb_addr(unsigned long addr)
974{ 802{
@@ -1346,11 +1174,6 @@ int qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1346 continue; 1174 continue;
1347 } 1175 }
1348 1176
1349 if (off == (QLA82XX_CRB_PEG_NET_1 + 0x18)) {
1350 if (!QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision))
1351 buf[i].data = 0x1020;
1352 }
1353
1354 qla82xx_wr_32(ha, off, buf[i].data); 1177 qla82xx_wr_32(ha, off, buf[i].data);
1355 1178
1356 /* ISP requires much bigger delay to settle down, 1179 /* ISP requires much bigger delay to settle down,
@@ -1407,7 +1230,8 @@ qla82xx_fw_load_from_flash(struct qla_hw_data *ha)
1407{ 1230{
1408 int i; 1231 int i;
1409 long size = 0; 1232 long size = 0;
1410 long flashaddr = BOOTLD_START, memaddr = BOOTLD_START; 1233 long flashaddr = ha->flt_region_bootload << 2;
1234 long memaddr = BOOTLD_START;
1411 u64 data; 1235 u64 data;
1412 u32 high, low; 1236 u32 high, low;
1413 size = (IMAGE_START - BOOTLD_START) / 8; 1237 size = (IMAGE_START - BOOTLD_START) / 8;
@@ -1427,12 +1251,8 @@ qla82xx_fw_load_from_flash(struct qla_hw_data *ha)
1427 } 1251 }
1428 udelay(100); 1252 udelay(100);
1429 read_lock(&ha->hw_lock); 1253 read_lock(&ha->hw_lock);
1430 if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) { 1254 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1431 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020); 1255 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
1432 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
1433 } else {
1434 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001d);
1435 }
1436 read_unlock(&ha->hw_lock); 1256 read_unlock(&ha->hw_lock);
1437 return 0; 1257 return 0;
1438} 1258}
@@ -1459,17 +1279,10 @@ qla82xx_pci_mem_read_2M(struct qla_hw_data *ha,
1459 off, data, size); 1279 off, data, size);
1460 } 1280 }
1461 1281
1462 if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) { 1282 off8 = off & 0xfffffff0;
1463 off8 = off & 0xfffffff0; 1283 off0[0] = off & 0xf;
1464 off0[0] = off & 0xf; 1284 sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]);
1465 sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]); 1285 shift_amount = 4;
1466 shift_amount = 4;
1467 } else {
1468 off8 = off & 0xfffffff8;
1469 off0[0] = off & 0x7;
1470 sz[0] = (size < (8 - off0[0])) ? size : (8 - off0[0]);
1471 shift_amount = 4;
1472 }
1473 loop = ((off0[0] + size - 1) >> shift_amount) + 1; 1286 loop = ((off0[0] + size - 1) >> shift_amount) + 1;
1474 off0[1] = 0; 1287 off0[1] = 0;
1475 sz[1] = size - sz[0]; 1288 sz[1] = size - sz[0];
@@ -1549,7 +1362,7 @@ qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
1549 u64 off, void *data, int size) 1362 u64 off, void *data, int size)
1550{ 1363{
1551 int i, j, ret = 0, loop, sz[2], off0; 1364 int i, j, ret = 0, loop, sz[2], off0;
1552 int scale, shift_amount, p3p, startword; 1365 int scale, shift_amount, startword;
1553 uint32_t temp; 1366 uint32_t temp;
1554 uint64_t off8, mem_crb, tmpw, word[2] = {0, 0}; 1367 uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
1555 1368
@@ -1569,28 +1382,16 @@ qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
1569 sz[0] = (size < (8 - off0)) ? size : (8 - off0); 1382 sz[0] = (size < (8 - off0)) ? size : (8 - off0);
1570 sz[1] = size - sz[0]; 1383 sz[1] = size - sz[0];
1571 1384
1572 if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) { 1385 off8 = off & 0xfffffff0;
1573 off8 = off & 0xfffffff0; 1386 loop = (((off & 0xf) + size - 1) >> 4) + 1;
1574 loop = (((off & 0xf) + size - 1) >> 4) + 1; 1387 shift_amount = 4;
1575 shift_amount = 4; 1388 scale = 2;
1576 scale = 2; 1389 startword = (off & 0xf)/8;
1577 p3p = 1; 1390
1578 startword = (off & 0xf)/8; 1391 for (i = 0; i < loop; i++) {
1579 } else { 1392 if (qla82xx_pci_mem_read_2M(ha, off8 +
1580 off8 = off & 0xfffffff8; 1393 (i << shift_amount), &word[i * scale], 8))
1581 loop = ((off0 + size - 1) >> 3) + 1; 1394 return -1;
1582 shift_amount = 3;
1583 scale = 1;
1584 p3p = 0;
1585 startword = 0;
1586 }
1587
1588 if (p3p || (size != 8) || (off0 != 0)) {
1589 for (i = 0; i < loop; i++) {
1590 if (qla82xx_pci_mem_read_2M(ha, off8 +
1591 (i << shift_amount), &word[i * scale], 8))
1592 return -1;
1593 }
1594 } 1395 }
1595 1396
1596 switch (size) { 1397 switch (size) {
@@ -1609,26 +1410,16 @@ qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
1609 break; 1410 break;
1610 } 1411 }
1611 1412
1612 if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) { 1413 if (sz[0] == 8) {
1613 if (sz[0] == 8) { 1414 word[startword] = tmpw;
1614 word[startword] = tmpw;
1615 } else {
1616 word[startword] &=
1617 ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
1618 word[startword] |= tmpw << (off0 * 8);
1619 }
1620 if (sz[1] != 0) {
1621 word[startword+1] &= ~(~0ULL << (sz[1] * 8));
1622 word[startword+1] |= tmpw >> (sz[0] * 8);
1623 }
1624 } else { 1415 } else {
1625 word[startword] &= ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8)); 1416 word[startword] &=
1417 ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
1626 word[startword] |= tmpw << (off0 * 8); 1418 word[startword] |= tmpw << (off0 * 8);
1627 1419 }
1628 if (loop == 2) { 1420 if (sz[1] != 0) {
1629 word[1] &= ~(~0ULL << (sz[1] * 8)); 1421 word[startword+1] &= ~(~0ULL << (sz[1] * 8));
1630 word[1] |= tmpw >> (sz[0] * 8); 1422 word[startword+1] |= tmpw >> (sz[0] * 8);
1631 }
1632 } 1423 }
1633 1424
1634 /* 1425 /*
@@ -1645,14 +1436,12 @@ qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
1645 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp); 1436 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
1646 temp = (word[i * scale] >> 32) & 0xffffffff; 1437 temp = (word[i * scale] >> 32) & 0xffffffff;
1647 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp); 1438 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
1648 if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) { 1439 temp = word[i*scale + 1] & 0xffffffff;
1649 temp = word[i*scale + 1] & 0xffffffff; 1440 qla82xx_wr_32(ha, mem_crb +
1650 qla82xx_wr_32(ha, mem_crb + 1441 MIU_TEST_AGT_WRDATA_UPPER_LO, temp);
1651 MIU_TEST_AGT_WRDATA_UPPER_LO, temp); 1442 temp = (word[i*scale + 1] >> 32) & 0xffffffff;
1652 temp = (word[i*scale + 1] >> 32) & 0xffffffff; 1443 qla82xx_wr_32(ha, mem_crb +
1653 qla82xx_wr_32(ha, mem_crb + 1444 MIU_TEST_AGT_WRDATA_UPPER_HI, temp);
1654 MIU_TEST_AGT_WRDATA_UPPER_HI, temp);
1655 }
1656 1445
1657 temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; 1446 temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1658 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); 1447 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
@@ -1677,6 +1466,94 @@ qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
1677 return ret; 1466 return ret;
1678} 1467}
1679 1468
1469static struct qla82xx_uri_table_desc *
1470qla82xx_get_table_desc(const u8 *unirom, int section)
1471{
1472 uint32_t i;
1473 struct qla82xx_uri_table_desc *directory =
1474 (struct qla82xx_uri_table_desc *)&unirom[0];
1475 __le32 offset;
1476 __le32 tab_type;
1477 __le32 entries = cpu_to_le32(directory->num_entries);
1478
1479 for (i = 0; i < entries; i++) {
1480 offset = cpu_to_le32(directory->findex) +
1481 (i * cpu_to_le32(directory->entry_size));
1482 tab_type = cpu_to_le32(*((u32 *)&unirom[offset] + 8));
1483
1484 if (tab_type == section)
1485 return (struct qla82xx_uri_table_desc *)&unirom[offset];
1486 }
1487
1488 return NULL;
1489}
1490
1491static struct qla82xx_uri_data_desc *
1492qla82xx_get_data_desc(struct qla_hw_data *ha,
1493 u32 section, u32 idx_offset)
1494{
1495 const u8 *unirom = ha->hablob->fw->data;
1496 int idx = cpu_to_le32(*((int *)&unirom[ha->file_prd_off] + idx_offset));
1497 struct qla82xx_uri_table_desc *tab_desc = NULL;
1498 __le32 offset;
1499
1500 tab_desc = qla82xx_get_table_desc(unirom, section);
1501 if (!tab_desc)
1502 return NULL;
1503
1504 offset = cpu_to_le32(tab_desc->findex) +
1505 (cpu_to_le32(tab_desc->entry_size) * idx);
1506
1507 return (struct qla82xx_uri_data_desc *)&unirom[offset];
1508}
1509
1510static u8 *
1511qla82xx_get_bootld_offset(struct qla_hw_data *ha)
1512{
1513 u32 offset = BOOTLD_START;
1514 struct qla82xx_uri_data_desc *uri_desc = NULL;
1515
1516 if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1517 uri_desc = qla82xx_get_data_desc(ha,
1518 QLA82XX_URI_DIR_SECT_BOOTLD, QLA82XX_URI_BOOTLD_IDX_OFF);
1519 if (uri_desc)
1520 offset = cpu_to_le32(uri_desc->findex);
1521 }
1522
1523 return (u8 *)&ha->hablob->fw->data[offset];
1524}
1525
1526static __le32
1527qla82xx_get_fw_size(struct qla_hw_data *ha)
1528{
1529 struct qla82xx_uri_data_desc *uri_desc = NULL;
1530
1531 if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1532 uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
1533 QLA82XX_URI_FIRMWARE_IDX_OFF);
1534 if (uri_desc)
1535 return cpu_to_le32(uri_desc->size);
1536 }
1537
1538 return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]);
1539}
1540
1541static u8 *
1542qla82xx_get_fw_offs(struct qla_hw_data *ha)
1543{
1544 u32 offset = IMAGE_START;
1545 struct qla82xx_uri_data_desc *uri_desc = NULL;
1546
1547 if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1548 uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
1549 QLA82XX_URI_FIRMWARE_IDX_OFF);
1550 if (uri_desc)
1551 offset = cpu_to_le32(uri_desc->findex);
1552 }
1553
1554 return (u8 *)&ha->hablob->fw->data[offset];
1555}
1556
1680/* PCI related functions */ 1557/* PCI related functions */
1681char * 1558char *
1682qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str) 1559qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str)
@@ -1714,22 +1591,6 @@ int qla82xx_pci_region_offset(struct pci_dev *pdev, int region)
1714 return val; 1591 return val;
1715} 1592}
1716 1593
1717int qla82xx_pci_region_len(struct pci_dev *pdev, int region)
1718{
1719 unsigned long val = 0;
1720 u32 control;
1721 switch (region) {
1722 case 0:
1723 pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control);
1724 val = control;
1725 break;
1726 case 1:
1727 val = pci_resource_len(pdev, 0) -
1728 qla82xx_pci_region_offset(pdev, 1);
1729 break;
1730 }
1731 return val;
1732}
1733 1594
1734int 1595int
1735qla82xx_iospace_config(struct qla_hw_data *ha) 1596qla82xx_iospace_config(struct qla_hw_data *ha)
@@ -1851,12 +1712,6 @@ void qla82xx_config_rings(struct scsi_qla_host *vha)
1851 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); 1712 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1852 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); 1713 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1853 1714
1854 icb->version = 1;
1855 icb->frame_payload_size = 2112;
1856 icb->execution_throttle = 8;
1857 icb->exchange_count = 128;
1858 icb->login_retry_count = 8;
1859
1860 WRT_REG_DWORD((unsigned long __iomem *)&reg->req_q_out[0], 0); 1715 WRT_REG_DWORD((unsigned long __iomem *)&reg->req_q_out[0], 0);
1861 WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_in[0], 0); 1716 WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_in[0], 0);
1862 WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_out[0], 0); 1717 WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_out[0], 0);
@@ -1878,19 +1733,19 @@ int qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
1878 1733
1879 size = (IMAGE_START - BOOTLD_START) / 8; 1734 size = (IMAGE_START - BOOTLD_START) / 8;
1880 1735
1881 ptr64 = (u64 *)&ha->hablob->fw->data[BOOTLD_START]; 1736 ptr64 = (u64 *)qla82xx_get_bootld_offset(ha);
1882 flashaddr = BOOTLD_START; 1737 flashaddr = BOOTLD_START;
1883 1738
1884 for (i = 0; i < size; i++) { 1739 for (i = 0; i < size; i++) {
1885 data = cpu_to_le64(ptr64[i]); 1740 data = cpu_to_le64(ptr64[i]);
1886 qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8); 1741 if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
1742 return -EIO;
1887 flashaddr += 8; 1743 flashaddr += 8;
1888 } 1744 }
1889 1745
1890 size = *(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET];
1891 size = (__force u32)cpu_to_le32(size) / 8;
1892 ptr64 = (u64 *)&ha->hablob->fw->data[IMAGE_START];
1893 flashaddr = FLASH_ADDR_START; 1746 flashaddr = FLASH_ADDR_START;
1747 size = (__force u32)qla82xx_get_fw_size(ha) / 8;
1748 ptr64 = (u64 *)qla82xx_get_fw_offs(ha);
1894 1749
1895 for (i = 0; i < size; i++) { 1750 for (i = 0; i < size; i++) {
1896 data = cpu_to_le64(ptr64[i]); 1751 data = cpu_to_le64(ptr64[i]);
@@ -1899,19 +1754,85 @@ int qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
1899 return -EIO; 1754 return -EIO;
1900 flashaddr += 8; 1755 flashaddr += 8;
1901 } 1756 }
1757 udelay(100);
1902 1758
1903 /* Write a magic value to CAMRAM register 1759 /* Write a magic value to CAMRAM register
1904 * at a specified offset to indicate 1760 * at a specified offset to indicate
1905 * that all data is written and 1761 * that all data is written and
1906 * ready for firmware to initialize. 1762 * ready for firmware to initialize.
1907 */ 1763 */
1908 qla82xx_wr_32(ha, QLA82XX_CAM_RAM(0x1fc), 0x12345678); 1764 qla82xx_wr_32(ha, QLA82XX_CAM_RAM(0x1fc), QLA82XX_BDINFO_MAGIC);
1909 1765
1910 if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) { 1766 read_lock(&ha->hw_lock);
1911 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020); 1767 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1912 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e); 1768 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
1913 } else 1769 read_unlock(&ha->hw_lock);
1914 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001d); 1770 return 0;
1771}
1772
1773static int
1774qla82xx_set_product_offset(struct qla_hw_data *ha)
1775{
1776 struct qla82xx_uri_table_desc *ptab_desc = NULL;
1777 const uint8_t *unirom = ha->hablob->fw->data;
1778 uint32_t i;
1779 __le32 entries;
1780 __le32 flags, file_chiprev, offset;
1781 uint8_t chiprev = ha->chip_revision;
1782 /* Hardcoding mn_present flag for P3P */
1783 int mn_present = 0;
1784 uint32_t flagbit;
1785
1786 ptab_desc = qla82xx_get_table_desc(unirom,
1787 QLA82XX_URI_DIR_SECT_PRODUCT_TBL);
1788 if (!ptab_desc)
1789 return -1;
1790
1791 entries = cpu_to_le32(ptab_desc->num_entries);
1792
1793 for (i = 0; i < entries; i++) {
1794 offset = cpu_to_le32(ptab_desc->findex) +
1795 (i * cpu_to_le32(ptab_desc->entry_size));
1796 flags = cpu_to_le32(*((int *)&unirom[offset] +
1797 QLA82XX_URI_FLAGS_OFF));
1798 file_chiprev = cpu_to_le32(*((int *)&unirom[offset] +
1799 QLA82XX_URI_CHIP_REV_OFF));
1800
1801 flagbit = mn_present ? 1 : 2;
1802
1803 if ((chiprev == file_chiprev) && ((1ULL << flagbit) & flags)) {
1804 ha->file_prd_off = offset;
1805 return 0;
1806 }
1807 }
1808 return -1;
1809}
1810
1811int
1812qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type)
1813{
1814 __le32 val;
1815 uint32_t min_size;
1816 struct qla_hw_data *ha = vha->hw;
1817 const struct firmware *fw = ha->hablob->fw;
1818
1819 ha->fw_type = fw_type;
1820
1821 if (fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1822 if (qla82xx_set_product_offset(ha))
1823 return -EINVAL;
1824
1825 min_size = QLA82XX_URI_FW_MIN_SIZE;
1826 } else {
1827 val = cpu_to_le32(*(u32 *)&fw->data[QLA82XX_FW_MAGIC_OFFSET]);
1828 if ((__force u32)val != QLA82XX_BDINFO_MAGIC)
1829 return -EINVAL;
1830
1831 min_size = QLA82XX_FW_MIN_SIZE;
1832 }
1833
1834 if (fw->size < min_size)
1835 return -EINVAL;
1915 return 0; 1836 return 0;
1916} 1837}
1917 1838
@@ -2097,8 +2018,6 @@ qla82xx_intr_handler(int irq, void *dev_id)
2097 2018
2098 if (RD_REG_DWORD(&reg->host_int)) { 2019 if (RD_REG_DWORD(&reg->host_int)) {
2099 stat = RD_REG_DWORD(&reg->host_status); 2020 stat = RD_REG_DWORD(&reg->host_status);
2100 if ((stat & HSRX_RISC_INT) == 0)
2101 break;
2102 2021
2103 switch (stat & 0xff) { 2022 switch (stat & 0xff) {
2104 case 0x1: 2023 case 0x1:
@@ -2173,8 +2092,6 @@ qla82xx_msix_default(int irq, void *dev_id)
2173 do { 2092 do {
2174 if (RD_REG_DWORD(&reg->host_int)) { 2093 if (RD_REG_DWORD(&reg->host_int)) {
2175 stat = RD_REG_DWORD(&reg->host_status); 2094 stat = RD_REG_DWORD(&reg->host_status);
2176 if ((stat & HSRX_RISC_INT) == 0)
2177 break;
2178 2095
2179 switch (stat & 0xff) { 2096 switch (stat & 0xff) {
2180 case 0x1: 2097 case 0x1:
@@ -2424,12 +2341,6 @@ int qla82xx_load_fw(scsi_qla_host_t *vha)
2424 struct fw_blob *blob; 2341 struct fw_blob *blob;
2425 struct qla_hw_data *ha = vha->hw; 2342 struct qla_hw_data *ha = vha->hw;
2426 2343
2427 /* Put both the PEG CMD and RCV PEG to default state
2428 * of 0 before resetting the hardware
2429 */
2430 qla82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
2431 qla82xx_wr_32(ha, CRB_RCVPEG_STATE, 0);
2432
2433 if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) { 2344 if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) {
2434 qla_printk(KERN_ERR, ha, 2345 qla_printk(KERN_ERR, ha,
2435 "%s: Error during CRB Initialization\n", __func__); 2346 "%s: Error during CRB Initialization\n", __func__);
@@ -2470,6 +2381,18 @@ try_blob_fw:
2470 goto fw_load_failed; 2381 goto fw_load_failed;
2471 } 2382 }
2472 2383
2384 /* Validating firmware blob */
2385 if (qla82xx_validate_firmware_blob(vha,
2386 QLA82XX_FLASH_ROMIMAGE)) {
2387 /* Fallback to URI format */
2388 if (qla82xx_validate_firmware_blob(vha,
2389 QLA82XX_UNIFIED_ROMIMAGE)) {
2390 qla_printk(KERN_ERR, ha,
2391 "No valid firmware image found!!!");
2392 return QLA_FUNCTION_FAILED;
2393 }
2394 }
2395
2473 if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) { 2396 if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) {
2474 qla_printk(KERN_ERR, ha, 2397 qla_printk(KERN_ERR, ha,
2475 "%s: Firmware loaded successfully " 2398 "%s: Firmware loaded successfully "
@@ -2498,6 +2421,12 @@ qla82xx_start_firmware(scsi_qla_host_t *vha)
2498 /* scrub dma mask expansion register */ 2421 /* scrub dma mask expansion register */
2499 qla82xx_wr_32(ha, CRB_DMA_SHIFT, 0x55555555); 2422 qla82xx_wr_32(ha, CRB_DMA_SHIFT, 0x55555555);
2500 2423
2424 /* Put both the PEG CMD and RCV PEG to default state
2425 * of 0 before resetting the hardware
2426 */
2427 qla82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
2428 qla82xx_wr_32(ha, CRB_RCVPEG_STATE, 0);
2429
2501 /* Overwrite stale initialization register values */ 2430 /* Overwrite stale initialization register values */
2502 qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0); 2431 qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
2503 qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0); 2432 qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
@@ -2977,10 +2906,10 @@ qla82xx_unprotect_flash(struct qla_hw_data *ha)
2977 if (ret < 0) 2906 if (ret < 0)
2978 goto done_unprotect; 2907 goto done_unprotect;
2979 2908
2980 val &= ~(0x7 << 2); 2909 val &= ~(BLOCK_PROTECT_BITS << 2);
2981 ret = qla82xx_write_status_reg(ha, val); 2910 ret = qla82xx_write_status_reg(ha, val);
2982 if (ret < 0) { 2911 if (ret < 0) {
2983 val |= (0x7 << 2); 2912 val |= (BLOCK_PROTECT_BITS << 2);
2984 qla82xx_write_status_reg(ha, val); 2913 qla82xx_write_status_reg(ha, val);
2985 } 2914 }
2986 2915
@@ -3008,7 +2937,7 @@ qla82xx_protect_flash(struct qla_hw_data *ha)
3008 if (ret < 0) 2937 if (ret < 0)
3009 goto done_protect; 2938 goto done_protect;
3010 2939
3011 val |= (0x7 << 2); 2940 val |= (BLOCK_PROTECT_BITS << 2);
3012 /* LOCK all sectors */ 2941 /* LOCK all sectors */
3013 ret = qla82xx_write_status_reg(ha, val); 2942 ret = qla82xx_write_status_reg(ha, val);
3014 if (ret < 0) 2943 if (ret < 0)
@@ -3201,11 +3130,16 @@ qla82xx_start_iocbs(srb_t *sp)
3201 dbval = 0x04 | (ha->portnum << 5); 3130 dbval = 0x04 | (ha->portnum << 5);
3202 3131
3203 dbval = dbval | (req->id << 8) | (req->ring_index << 16); 3132 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3204 WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval); 3133 if (ql2xdbwr)
3205 wmb(); 3134 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
3206 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) { 3135 else {
3207 WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval); 3136 WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval);
3208 wmb(); 3137 wmb();
3138 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3139 WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr,
3140 dbval);
3141 wmb();
3142 }
3209 } 3143 }
3210} 3144}
3211 3145
@@ -3279,6 +3213,10 @@ qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
3279 /* Disable the board */ 3213 /* Disable the board */
3280 qla_printk(KERN_INFO, ha, "Disabling the board\n"); 3214 qla_printk(KERN_INFO, ha, "Disabling the board\n");
3281 3215
3216 qla82xx_idc_lock(ha);
3217 qla82xx_clear_drv_active(ha);
3218 qla82xx_idc_unlock(ha);
3219
3282 /* Set DEV_FAILED flag to disable timer */ 3220 /* Set DEV_FAILED flag to disable timer */
3283 vha->device_flags |= DFLG_DEV_FAILED; 3221 vha->device_flags |= DFLG_DEV_FAILED;
3284 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); 3222 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
@@ -3369,6 +3307,14 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3369 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3307 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3370 } 3308 }
3371 qla2xxx_wake_dpc(vha); 3309 qla2xxx_wake_dpc(vha);
3310 if (ha->flags.mbox_busy) {
3311 ha->flags.fw_hung = 1;
3312 ha->flags.mbox_int = 1;
3313 DEBUG2(qla_printk(KERN_ERR, ha,
3314 "Due to fw hung, doing premature "
3315 "completion of mbx command\n"));
3316 complete(&ha->mbx_intr_comp);
3317 }
3372 } 3318 }
3373 } 3319 }
3374 vha->fw_heartbeat_counter = fw_heartbeat_counter; 3320 vha->fw_heartbeat_counter = fw_heartbeat_counter;
@@ -3472,6 +3418,14 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3472 "%s(): Adapter reset needed!\n", __func__); 3418 "%s(): Adapter reset needed!\n", __func__);
3473 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3419 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3474 qla2xxx_wake_dpc(vha); 3420 qla2xxx_wake_dpc(vha);
3421 if (ha->flags.mbox_busy) {
3422 ha->flags.fw_hung = 1;
3423 ha->flags.mbox_int = 1;
3424 DEBUG2(qla_printk(KERN_ERR, ha,
3425 "Need reset, doing premature "
3426 "completion of mbx command\n"));
3427 complete(&ha->mbx_intr_comp);
3428 }
3475 } else { 3429 } else {
3476 qla82xx_check_fw_alive(vha); 3430 qla82xx_check_fw_alive(vha);
3477 } 3431 }
@@ -3527,8 +3481,10 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3527 qla82xx_clear_rst_ready(ha); 3481 qla82xx_clear_rst_ready(ha);
3528 qla82xx_idc_unlock(ha); 3482 qla82xx_idc_unlock(ha);
3529 3483
3530 if (rval == QLA_SUCCESS) 3484 if (rval == QLA_SUCCESS) {
3485 ha->flags.fw_hung = 0;
3531 qla82xx_restart_isp(vha); 3486 qla82xx_restart_isp(vha);
3487 }
3532 3488
3533 if (rval) { 3489 if (rval) {
3534 vha->flags.online = 1; 3490 vha->flags.online = 1;
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index f8f99a5ea532..569232b45502 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -538,11 +538,10 @@
538/* Driver Coexistence Defines */ 538/* Driver Coexistence Defines */
539#define QLA82XX_CRB_DRV_ACTIVE (QLA82XX_CAM_RAM(0x138)) 539#define QLA82XX_CRB_DRV_ACTIVE (QLA82XX_CAM_RAM(0x138))
540#define QLA82XX_CRB_DEV_STATE (QLA82XX_CAM_RAM(0x140)) 540#define QLA82XX_CRB_DEV_STATE (QLA82XX_CAM_RAM(0x140))
541#define QLA82XX_CRB_DEV_PART_INFO (QLA82XX_CAM_RAM(0x14c))
542#define QLA82XX_CRB_DRV_IDC_VERSION (QLA82XX_CAM_RAM(0x174))
543#define QLA82XX_CRB_DRV_STATE (QLA82XX_CAM_RAM(0x144)) 541#define QLA82XX_CRB_DRV_STATE (QLA82XX_CAM_RAM(0x144))
544#define QLA82XX_CRB_DRV_SCRATCH (QLA82XX_CAM_RAM(0x148)) 542#define QLA82XX_CRB_DRV_SCRATCH (QLA82XX_CAM_RAM(0x148))
545#define QLA82XX_CRB_DEV_PART_INFO (QLA82XX_CAM_RAM(0x14c)) 543#define QLA82XX_CRB_DEV_PART_INFO (QLA82XX_CAM_RAM(0x14c))
544#define QLA82XX_CRB_DRV_IDC_VERSION (QLA82XX_CAM_RAM(0x174))
546 545
547/* Every driver should use these Device State */ 546/* Every driver should use these Device State */
548#define QLA82XX_DEV_COLD 1 547#define QLA82XX_DEV_COLD 1
@@ -774,15 +773,49 @@ struct qla82xx_legacy_intr_set {
774 .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \ 773 .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \
775} 774}
776 775
776#define BRDCFG_START 0x4000
777#define BOOTLD_START 0x10000 777#define BOOTLD_START 0x10000
778#define IMAGE_START 0x100000 778#define IMAGE_START 0x100000
779#define FLASH_ADDR_START 0x43000 779#define FLASH_ADDR_START 0x43000
780 780
781/* Magic number to let user know flash is programmed */ 781/* Magic number to let user know flash is programmed */
782#define QLA82XX_BDINFO_MAGIC 0x12345678 782#define QLA82XX_BDINFO_MAGIC 0x12345678
783#define QLA82XX_FW_MAGIC_OFFSET (BRDCFG_START + 0x128)
783#define FW_SIZE_OFFSET (0x3e840c) 784#define FW_SIZE_OFFSET (0x3e840c)
785#define QLA82XX_FW_MIN_SIZE 0x3fffff
786
787/* UNIFIED ROMIMAGE START */
788#define QLA82XX_URI_FW_MIN_SIZE 0xc8000
789#define QLA82XX_URI_DIR_SECT_PRODUCT_TBL 0x0
790#define QLA82XX_URI_DIR_SECT_BOOTLD 0x6
791#define QLA82XX_URI_DIR_SECT_FW 0x7
792
793/* Offsets */
794#define QLA82XX_URI_CHIP_REV_OFF 10
795#define QLA82XX_URI_FLAGS_OFF 11
796#define QLA82XX_URI_BIOS_VERSION_OFF 12
797#define QLA82XX_URI_BOOTLD_IDX_OFF 27
798#define QLA82XX_URI_FIRMWARE_IDX_OFF 29
799
800struct qla82xx_uri_table_desc{
801 uint32_t findex;
802 uint32_t num_entries;
803 uint32_t entry_size;
804 uint32_t reserved[5];
805};
806
807struct qla82xx_uri_data_desc{
808 uint32_t findex;
809 uint32_t size;
810 uint32_t reserved[5];
811};
812
813/* UNIFIED ROMIMAGE END */
814
815#define QLA82XX_UNIFIED_ROMIMAGE 3
816#define QLA82XX_FLASH_ROMIMAGE 4
817#define QLA82XX_UNKNOWN_ROMIMAGE 0xff
784 818
785#define QLA82XX_IS_REVISION_P3PLUS(_rev_) ((_rev_) >= 0x50)
786#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x0b0) 819#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x0b0)
787#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x0b4) 820#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x0b4)
788 821
@@ -853,7 +886,7 @@ struct ct6_dsd {
853 struct list_head dsd_list; 886 struct list_head dsd_list;
854}; 887};
855 888
856#define MBC_TOGGLE_INTR 0x10 889#define MBC_TOGGLE_INTERRUPT 0x10
857 890
858/* Flash offset */ 891/* Flash offset */
859#define FLT_REG_BOOTLOAD_82XX 0x72 892#define FLT_REG_BOOTLOAD_82XX 0x72
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index be1a8fcbb1fb..ff2172da7c19 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -140,7 +140,7 @@ MODULE_PARM_DESC(ql2xetsenable,
140 "Enables firmware ETS burst." 140 "Enables firmware ETS burst."
141 "Default is 0 - skip ETS enablement."); 141 "Default is 0 - skip ETS enablement.");
142 142
143int ql2xdbwr; 143int ql2xdbwr = 1;
144module_param(ql2xdbwr, int, S_IRUGO|S_IRUSR); 144module_param(ql2xdbwr, int, S_IRUGO|S_IRUSR);
145MODULE_PARM_DESC(ql2xdbwr, 145MODULE_PARM_DESC(ql2xdbwr,
146 "Option to specify scheme for request queue posting\n" 146 "Option to specify scheme for request queue posting\n"
@@ -517,6 +517,7 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
517 if (!sp) 517 if (!sp)
518 return sp; 518 return sp;
519 519
520 atomic_set(&sp->ref_count, 1);
520 sp->fcport = fcport; 521 sp->fcport = fcport;
521 sp->cmd = cmd; 522 sp->cmd = cmd;
522 sp->flags = 0; 523 sp->flags = 0;
@@ -700,7 +701,7 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
700 * Success (Adapter is online/no flash ops) : 0 701 * Success (Adapter is online/no flash ops) : 0
701 * Failed (Adapter is offline/disabled/flash ops in progress) : 1 702 * Failed (Adapter is offline/disabled/flash ops in progress) : 1
702 */ 703 */
703int 704static int
704qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha) 705qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha)
705{ 706{
706 int return_status; 707 int return_status;
@@ -797,6 +798,12 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
797 return (return_status); 798 return (return_status);
798} 799}
799 800
801static void
802sp_get(struct srb *sp)
803{
804 atomic_inc(&sp->ref_count);
805}
806
800/************************************************************************** 807/**************************************************************************
801* qla2xxx_eh_abort 808* qla2xxx_eh_abort
802* 809*
@@ -825,6 +832,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
825 struct qla_hw_data *ha = vha->hw; 832 struct qla_hw_data *ha = vha->hw;
826 struct req_que *req = vha->req; 833 struct req_que *req = vha->req;
827 srb_t *spt; 834 srb_t *spt;
835 int got_ref = 0;
828 836
829 fc_block_scsi_eh(cmd); 837 fc_block_scsi_eh(cmd);
830 838
@@ -856,6 +864,10 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
856 DEBUG2(printk("%s(%ld): aborting sp %p from RISC." 864 DEBUG2(printk("%s(%ld): aborting sp %p from RISC."
857 " pid=%ld.\n", __func__, vha->host_no, sp, serial)); 865 " pid=%ld.\n", __func__, vha->host_no, sp, serial));
858 866
867 /* Get a reference to the sp and drop the lock.*/
868 sp_get(sp);
869 got_ref++;
870
859 spin_unlock_irqrestore(&ha->hardware_lock, flags); 871 spin_unlock_irqrestore(&ha->hardware_lock, flags);
860 if (ha->isp_ops->abort_command(sp)) { 872 if (ha->isp_ops->abort_command(sp)) {
861 DEBUG2(printk("%s(%ld): abort_command " 873 DEBUG2(printk("%s(%ld): abort_command "
@@ -881,6 +893,9 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
881 } 893 }
882 } 894 }
883 895
896 if (got_ref)
897 qla2x00_sp_compl(ha, sp);
898
884 qla_printk(KERN_INFO, ha, 899 qla_printk(KERN_INFO, ha,
885 "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n", 900 "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n",
886 vha->host_no, id, lun, wait, serial, ret); 901 vha->host_no, id, lun, wait, serial, ret);
@@ -888,24 +903,17 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
888 return ret; 903 return ret;
889} 904}
890 905
891enum nexus_wait_type { 906int
892 WAIT_HOST = 0,
893 WAIT_TARGET,
894 WAIT_LUN,
895};
896
897static int
898qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, 907qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
899 unsigned int l, srb_t *sp, enum nexus_wait_type type) 908 unsigned int l, enum nexus_wait_type type)
900{ 909{
901 int cnt, match, status; 910 int cnt, match, status;
902 unsigned long flags; 911 unsigned long flags;
903 struct qla_hw_data *ha = vha->hw; 912 struct qla_hw_data *ha = vha->hw;
904 struct req_que *req; 913 struct req_que *req;
914 srb_t *sp;
905 915
906 status = QLA_SUCCESS; 916 status = QLA_SUCCESS;
907 if (!sp)
908 return status;
909 917
910 spin_lock_irqsave(&ha->hardware_lock, flags); 918 spin_lock_irqsave(&ha->hardware_lock, flags);
911 req = vha->req; 919 req = vha->req;
@@ -943,24 +951,6 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
943 return status; 951 return status;
944} 952}
945 953
946void qla82xx_wait_for_pending_commands(scsi_qla_host_t *vha)
947{
948 int cnt;
949 srb_t *sp;
950 struct req_que *req = vha->req;
951
952 DEBUG2(qla_printk(KERN_INFO, vha->hw,
953 "Waiting for pending commands\n"));
954 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
955 sp = req->outstanding_cmds[cnt];
956 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
957 sp, WAIT_HOST) == QLA_SUCCESS) {
958 DEBUG2(qla_printk(KERN_INFO, vha->hw,
959 "Done wait for pending commands\n"));
960 }
961 }
962}
963
964static char *reset_errors[] = { 954static char *reset_errors[] = {
965 "HBA not online", 955 "HBA not online",
966 "HBA not ready", 956 "HBA not ready",
@@ -996,7 +986,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
996 goto eh_reset_failed; 986 goto eh_reset_failed;
997 err = 3; 987 err = 3;
998 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, 988 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
999 cmd->device->lun, (srb_t *) CMD_SP(cmd), type) != QLA_SUCCESS) 989 cmd->device->lun, type) != QLA_SUCCESS)
1000 goto eh_reset_failed; 990 goto eh_reset_failed;
1001 991
1002 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n", 992 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n",
@@ -1004,7 +994,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
1004 994
1005 return SUCCESS; 995 return SUCCESS;
1006 996
1007 eh_reset_failed: 997eh_reset_failed:
1008 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n" 998 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n"
1009 , vha->host_no, cmd->device->id, cmd->device->lun, name, 999 , vha->host_no, cmd->device->id, cmd->device->lun, name,
1010 reset_errors[err]); 1000 reset_errors[err]);
@@ -1054,7 +1044,6 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1054 int ret = FAILED; 1044 int ret = FAILED;
1055 unsigned int id, lun; 1045 unsigned int id, lun;
1056 unsigned long serial; 1046 unsigned long serial;
1057 srb_t *sp = (srb_t *) CMD_SP(cmd);
1058 1047
1059 fc_block_scsi_eh(cmd); 1048 fc_block_scsi_eh(cmd);
1060 1049
@@ -1081,7 +1070,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1081 goto eh_bus_reset_done; 1070 goto eh_bus_reset_done;
1082 1071
1083 /* Flush outstanding commands. */ 1072 /* Flush outstanding commands. */
1084 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, sp, WAIT_HOST) != 1073 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) !=
1085 QLA_SUCCESS) 1074 QLA_SUCCESS)
1086 ret = FAILED; 1075 ret = FAILED;
1087 1076
@@ -1116,7 +1105,6 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1116 int ret = FAILED; 1105 int ret = FAILED;
1117 unsigned int id, lun; 1106 unsigned int id, lun;
1118 unsigned long serial; 1107 unsigned long serial;
1119 srb_t *sp = (srb_t *) CMD_SP(cmd);
1120 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1108 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1121 1109
1122 fc_block_scsi_eh(cmd); 1110 fc_block_scsi_eh(cmd);
@@ -1171,7 +1159,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1171 } 1159 }
1172 1160
1173 /* Waiting for command to be returned to OS.*/ 1161 /* Waiting for command to be returned to OS.*/
1174 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, sp, WAIT_HOST) == 1162 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) ==
1175 QLA_SUCCESS) 1163 QLA_SUCCESS)
1176 ret = SUCCESS; 1164 ret = SUCCESS;
1177 1165
@@ -1662,7 +1650,7 @@ static struct isp_operations qla81xx_isp_ops = {
1662 .read_optrom = qla25xx_read_optrom_data, 1650 .read_optrom = qla25xx_read_optrom_data,
1663 .write_optrom = qla24xx_write_optrom_data, 1651 .write_optrom = qla24xx_write_optrom_data,
1664 .get_flash_version = qla24xx_get_flash_version, 1652 .get_flash_version = qla24xx_get_flash_version,
1665 .start_scsi = qla24xx_start_scsi, 1653 .start_scsi = qla24xx_dif_start_scsi,
1666 .abort_isp = qla2x00_abort_isp, 1654 .abort_isp = qla2x00_abort_isp,
1667}; 1655};
1668 1656
@@ -2113,6 +2101,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2113 init_completion(&ha->mbx_cmd_comp); 2101 init_completion(&ha->mbx_cmd_comp);
2114 complete(&ha->mbx_cmd_comp); 2102 complete(&ha->mbx_cmd_comp);
2115 init_completion(&ha->mbx_intr_comp); 2103 init_completion(&ha->mbx_intr_comp);
2104 init_completion(&ha->dcbx_comp);
2116 2105
2117 set_bit(0, (unsigned long *) ha->vp_idx_map); 2106 set_bit(0, (unsigned long *) ha->vp_idx_map);
2118 2107
@@ -2158,7 +2147,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2158 host->this_id = 255; 2147 host->this_id = 255;
2159 host->cmd_per_lun = 3; 2148 host->cmd_per_lun = 3;
2160 host->unique_id = host->host_no; 2149 host->unique_id = host->host_no;
2161 host->max_cmd_len = MAX_CMDSZ; 2150 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif)
2151 host->max_cmd_len = 32;
2152 else
2153 host->max_cmd_len = MAX_CMDSZ;
2162 host->max_channel = MAX_BUSES - 1; 2154 host->max_channel = MAX_BUSES - 1;
2163 host->max_lun = MAX_LUNS; 2155 host->max_lun = MAX_LUNS;
2164 host->transportt = qla2xxx_transport_template; 2156 host->transportt = qla2xxx_transport_template;
@@ -2258,7 +2250,7 @@ skip_dpc:
2258 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", 2250 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
2259 base_vha->host_no, ha)); 2251 base_vha->host_no, ha));
2260 2252
2261 if (IS_QLA25XX(ha) && ql2xenabledif) { 2253 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
2262 if (ha->fw_attributes & BIT_4) { 2254 if (ha->fw_attributes & BIT_4) {
2263 base_vha->flags.difdix_supported = 1; 2255 base_vha->flags.difdix_supported = 1;
2264 DEBUG18(qla_printk(KERN_INFO, ha, 2256 DEBUG18(qla_printk(KERN_INFO, ha,
@@ -2266,8 +2258,10 @@ skip_dpc:
2266 " protection.\n")); 2258 " protection.\n"));
2267 scsi_host_set_prot(host, 2259 scsi_host_set_prot(host,
2268 SHOST_DIF_TYPE1_PROTECTION 2260 SHOST_DIF_TYPE1_PROTECTION
2261 | SHOST_DIF_TYPE2_PROTECTION
2269 | SHOST_DIF_TYPE3_PROTECTION 2262 | SHOST_DIF_TYPE3_PROTECTION
2270 | SHOST_DIX_TYPE1_PROTECTION 2263 | SHOST_DIX_TYPE1_PROTECTION
2264 | SHOST_DIX_TYPE2_PROTECTION
2271 | SHOST_DIX_TYPE3_PROTECTION); 2265 | SHOST_DIX_TYPE3_PROTECTION);
2272 scsi_host_set_guard(host, SHOST_DIX_GUARD_CRC); 2266 scsi_host_set_guard(host, SHOST_DIX_GUARD_CRC);
2273 } else 2267 } else
@@ -2402,6 +2396,10 @@ qla2x00_remove_one(struct pci_dev *pdev)
2402 scsi_host_put(base_vha->host); 2396 scsi_host_put(base_vha->host);
2403 2397
2404 if (IS_QLA82XX(ha)) { 2398 if (IS_QLA82XX(ha)) {
2399 qla82xx_idc_lock(ha);
2400 qla82xx_clear_drv_active(ha);
2401 qla82xx_idc_unlock(ha);
2402
2405 iounmap((device_reg_t __iomem *)ha->nx_pcibase); 2403 iounmap((device_reg_t __iomem *)ha->nx_pcibase);
2406 if (!ql2xdbwr) 2404 if (!ql2xdbwr)
2407 iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr); 2405 iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
@@ -2467,11 +2465,24 @@ qla2x00_free_device(scsi_qla_host_t *vha)
2467 2465
2468 qla2x00_free_irqs(vha); 2466 qla2x00_free_irqs(vha);
2469 2467
2468 qla2x00_free_fcports(vha);
2469
2470 qla2x00_mem_free(ha); 2470 qla2x00_mem_free(ha);
2471 2471
2472 qla2x00_free_queues(ha); 2472 qla2x00_free_queues(ha);
2473} 2473}
2474 2474
2475void qla2x00_free_fcports(struct scsi_qla_host *vha)
2476{
2477 fc_port_t *fcport, *tfcport;
2478
2479 list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
2480 list_del(&fcport->list);
2481 kfree(fcport);
2482 fcport = NULL;
2483 }
2484}
2485
2475static inline void 2486static inline void
2476qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, 2487qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
2477 int defer) 2488 int defer)
@@ -3463,8 +3474,8 @@ qla2x00_sp_free_dma(srb_t *sp)
3463 CMD_SP(cmd) = NULL; 3474 CMD_SP(cmd) = NULL;
3464} 3475}
3465 3476
3466void 3477static void
3467qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp) 3478qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
3468{ 3479{
3469 struct scsi_cmnd *cmd = sp->cmd; 3480 struct scsi_cmnd *cmd = sp->cmd;
3470 3481
@@ -3485,6 +3496,20 @@ qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
3485 cmd->scsi_done(cmd); 3496 cmd->scsi_done(cmd);
3486} 3497}
3487 3498
3499void
3500qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
3501{
3502 if (atomic_read(&sp->ref_count) == 0) {
3503 DEBUG2(qla_printk(KERN_WARNING, ha,
3504 "SP reference-count to ZERO -- sp=%p\n", sp));
3505 DEBUG2(BUG());
3506 return;
3507 }
3508 if (!atomic_dec_and_test(&sp->ref_count))
3509 return;
3510 qla2x00_sp_final_compl(ha, sp);
3511}
3512
3488/************************************************************************** 3513/**************************************************************************
3489* qla2x00_timer 3514* qla2x00_timer
3490* 3515*
diff --git a/drivers/scsi/qla2xxx/qla_settings.h b/drivers/scsi/qla2xxx/qla_settings.h
index 2801c2664b40..f0b2b9986a55 100644
--- a/drivers/scsi/qla2xxx/qla_settings.h
+++ b/drivers/scsi/qla2xxx/qla_settings.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index de92504d7585..76de9574b385 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -664,6 +664,11 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
664 struct qla_hw_data *ha = vha->hw; 664 struct qla_hw_data *ha = vha->hw;
665 struct req_que *req = ha->req_q_map[0]; 665 struct req_que *req = ha->req_q_map[0];
666 666
667 def = 0;
668 if (IS_QLA25XX(ha))
669 def = 1;
670 else if (IS_QLA81XX(ha))
671 def = 2;
667 ha->flt_region_flt = flt_addr; 672 ha->flt_region_flt = flt_addr;
668 wptr = (uint16_t *)req->ring; 673 wptr = (uint16_t *)req->ring;
669 flt = (struct qla_flt_header *)req->ring; 674 flt = (struct qla_flt_header *)req->ring;
@@ -691,6 +696,10 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
691 goto no_flash_data; 696 goto no_flash_data;
692 } 697 }
693 698
699 /* Assign FCP prio region since older FLT's may not have it */
700 ha->flt_region_fcp_prio = ha->flags.port0 ?
701 fcp_prio_cfg0[def] : fcp_prio_cfg1[def];
702
694 loc = locations[1]; 703 loc = locations[1];
695 cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region); 704 cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
696 for ( ; cnt; cnt--, region++) { 705 for ( ; cnt; cnt--, region++) {
@@ -773,13 +782,6 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
773no_flash_data: 782no_flash_data:
774 /* Use hardcoded defaults. */ 783 /* Use hardcoded defaults. */
775 loc = locations[0]; 784 loc = locations[0];
776 def = 0;
777 if (IS_QLA24XX_TYPE(ha))
778 def = 0;
779 else if (IS_QLA25XX(ha))
780 def = 1;
781 else if (IS_QLA81XX(ha))
782 def = 2;
783 ha->flt_region_fw = def_fw[def]; 785 ha->flt_region_fw = def_fw[def];
784 ha->flt_region_boot = def_boot[def]; 786 ha->flt_region_boot = def_boot[def];
785 ha->flt_region_vpd_nvram = def_vpd_nvram[def]; 787 ha->flt_region_vpd_nvram = def_vpd_nvram[def];
@@ -790,14 +792,13 @@ no_flash_data:
790 ha->flt_region_fdt = def_fdt[def]; 792 ha->flt_region_fdt = def_fdt[def];
791 ha->flt_region_npiv_conf = ha->flags.port0 ? 793 ha->flt_region_npiv_conf = ha->flags.port0 ?
792 def_npiv_conf0[def] : def_npiv_conf1[def]; 794 def_npiv_conf0[def] : def_npiv_conf1[def];
793 ha->flt_region_fcp_prio = ha->flags.port0 ?
794 fcp_prio_cfg0[def] : fcp_prio_cfg1[def];
795done: 795done:
796 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x " 796 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
797 "vpd_nvram=0x%x vpd=0x%x nvram=0x%x fdt=0x%x flt=0x%x " 797 "vpd_nvram=0x%x vpd=0x%x nvram=0x%x fdt=0x%x flt=0x%x "
798 "npiv=0x%x.\n", loc, ha->flt_region_boot, ha->flt_region_fw, 798 "npiv=0x%x. fcp_prio_cfg=0x%x\n", loc, ha->flt_region_boot,
799 ha->flt_region_vpd_nvram, ha->flt_region_vpd, ha->flt_region_nvram, 799 ha->flt_region_fw, ha->flt_region_vpd_nvram, ha->flt_region_vpd,
800 ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_npiv_conf)); 800 ha->flt_region_nvram, ha->flt_region_fdt, ha->flt_region_flt,
801 ha->flt_region_npiv_conf, ha->flt_region_fcp_prio));
801} 802}
802 803
803static void 804static void
@@ -2758,6 +2759,28 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2758 ha->fw_revision[3] = dcode[3]; 2759 ha->fw_revision[3] = dcode[3];
2759 } 2760 }
2760 2761
2762 /* Check for golden firmware and get version if available */
2763 if (!IS_QLA81XX(ha)) {
2764 /* Golden firmware is not present in non 81XX adapters */
2765 return ret;
2766 }
2767
2768 memset(ha->gold_fw_version, 0, sizeof(ha->gold_fw_version));
2769 dcode = mbuf;
2770 ha->isp_ops->read_optrom(vha, (uint8_t *)dcode,
2771 ha->flt_region_gold_fw << 2, 32);
2772
2773 if (dcode[4] == 0xFFFFFFFF && dcode[5] == 0xFFFFFFFF &&
2774 dcode[6] == 0xFFFFFFFF && dcode[7] == 0xFFFFFFFF) {
2775 DEBUG2(qla_printk(KERN_INFO, ha,
2776 "%s(%ld): Unrecognized golden fw at 0x%x.\n",
2777 __func__, vha->host_no, ha->flt_region_gold_fw * 4));
2778 return ret;
2779 }
2780
2781 for (i = 4; i < 8; i++)
2782 ha->gold_fw_version[i-4] = be32_to_cpu(dcode[i]);
2783
2761 return ret; 2784 return ret;
2762} 2785}
2763 2786
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 109068df933f..e75ccb91317d 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -1,15 +1,15 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation 3 * Copyright (c) 2003-2010 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.02-k2" 10#define QLA2XXX_VERSION "8.03.03-k0"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 3
14#define QLA_DRIVER_PATCH_VER 2 14#define QLA_DRIVER_PATCH_VER 3
15#define QLA_DRIVER_BETA_VER 2 15#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla4xxx/Kconfig b/drivers/scsi/qla4xxx/Kconfig
index 69cbff3f57cf..2c33ce6eac1e 100644
--- a/drivers/scsi/qla4xxx/Kconfig
+++ b/drivers/scsi/qla4xxx/Kconfig
@@ -1,7 +1,7 @@
1config SCSI_QLA_ISCSI 1config SCSI_QLA_ISCSI
2 tristate "QLogic ISP4XXX host adapter family support" 2 tristate "QLogic ISP4XXX and ISP82XX host adapter family support"
3 depends on PCI && SCSI && NET 3 depends on PCI && SCSI
4 select SCSI_ISCSI_ATTRS 4 select SCSI_ISCSI_ATTRS
5 ---help--- 5 ---help---
6 This driver supports the QLogic 40xx (ISP4XXX) iSCSI host 6 This driver supports the QLogic 40xx (ISP4XXX) and 8022 (ISP82XX)
7 adapter family. 7 iSCSI host adapter family.
diff --git a/drivers/scsi/qla4xxx/Makefile b/drivers/scsi/qla4xxx/Makefile
index 86ea37baa0fc..0339ff03a535 100644
--- a/drivers/scsi/qla4xxx/Makefile
+++ b/drivers/scsi/qla4xxx/Makefile
@@ -1,5 +1,5 @@
1qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \ 1qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \
2 ql4_nvram.o ql4_dbg.o 2 ql4_nx.o ql4_nvram.o ql4_dbg.o
3 3
4obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o 4obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o
5 5
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 428802616e33..a79da8dd2064 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -33,6 +33,8 @@
33#include <scsi/scsi_transport.h> 33#include <scsi/scsi_transport.h>
34#include <scsi/scsi_transport_iscsi.h> 34#include <scsi/scsi_transport_iscsi.h>
35 35
36#include "ql4_dbg.h"
37#include "ql4_nx.h"
36 38
37#ifndef PCI_DEVICE_ID_QLOGIC_ISP4010 39#ifndef PCI_DEVICE_ID_QLOGIC_ISP4010
38#define PCI_DEVICE_ID_QLOGIC_ISP4010 0x4010 40#define PCI_DEVICE_ID_QLOGIC_ISP4010 0x4010
@@ -46,6 +48,10 @@
46#define PCI_DEVICE_ID_QLOGIC_ISP4032 0x4032 48#define PCI_DEVICE_ID_QLOGIC_ISP4032 0x4032
47#endif 49#endif
48 50
51#ifndef PCI_DEVICE_ID_QLOGIC_ISP8022
52#define PCI_DEVICE_ID_QLOGIC_ISP8022 0x8022
53#endif
54
49#define QLA_SUCCESS 0 55#define QLA_SUCCESS 0
50#define QLA_ERROR 1 56#define QLA_ERROR 1
51 57
@@ -85,15 +91,22 @@
85#define BIT_30 0x40000000 91#define BIT_30 0x40000000
86#define BIT_31 0x80000000 92#define BIT_31 0x80000000
87 93
94/**
95 * Macros to help code, maintain, etc.
96 **/
97#define ql4_printk(level, ha, format, arg...) \
98 dev_printk(level , &((ha)->pdev->dev) , format , ## arg)
99
100
88/* 101/*
89 * Host adapter default definitions 102 * Host adapter default definitions
90 ***********************************/ 103 ***********************************/
91#define MAX_HBAS 16 104#define MAX_HBAS 16
92#define MAX_BUSES 1 105#define MAX_BUSES 1
93#define MAX_TARGETS (MAX_PRST_DEV_DB_ENTRIES + MAX_DEV_DB_ENTRIES) 106#define MAX_TARGETS MAX_DEV_DB_ENTRIES
94#define MAX_LUNS 0xffff 107#define MAX_LUNS 0xffff
95#define MAX_AEN_ENTRIES 256 /* should be > EXT_DEF_MAX_AEN_QUEUE */ 108#define MAX_AEN_ENTRIES 256 /* should be > EXT_DEF_MAX_AEN_QUEUE */
96#define MAX_DDB_ENTRIES (MAX_PRST_DEV_DB_ENTRIES + MAX_DEV_DB_ENTRIES) 109#define MAX_DDB_ENTRIES MAX_DEV_DB_ENTRIES
97#define MAX_PDU_ENTRIES 32 110#define MAX_PDU_ENTRIES 32
98#define INVALID_ENTRY 0xFFFF 111#define INVALID_ENTRY 0xFFFF
99#define MAX_CMDS_TO_RISC 1024 112#define MAX_CMDS_TO_RISC 1024
@@ -118,7 +131,7 @@
118#define DRIVER_NAME "qla4xxx" 131#define DRIVER_NAME "qla4xxx"
119 132
120#define MAX_LINKED_CMDS_PER_LUN 3 133#define MAX_LINKED_CMDS_PER_LUN 3
121#define MAX_REQS_SERVICED_PER_INTR 16 134#define MAX_REQS_SERVICED_PER_INTR 1
122 135
123#define ISCSI_IPADDR_SIZE 4 /* IP address size */ 136#define ISCSI_IPADDR_SIZE 4 /* IP address size */
124#define ISCSI_ALIAS_SIZE 32 /* ISCSI Alias name size */ 137#define ISCSI_ALIAS_SIZE 32 /* ISCSI Alias name size */
@@ -134,7 +147,7 @@
134#define SOFT_RESET_TOV 30 147#define SOFT_RESET_TOV 30
135#define RESET_INTR_TOV 3 148#define RESET_INTR_TOV 3
136#define SEMAPHORE_TOV 10 149#define SEMAPHORE_TOV 10
137#define ADAPTER_INIT_TOV 120 150#define ADAPTER_INIT_TOV 30
138#define ADAPTER_RESET_TOV 180 151#define ADAPTER_RESET_TOV 180
139#define EXTEND_CMD_TOV 60 152#define EXTEND_CMD_TOV 60
140#define WAIT_CMD_TOV 30 153#define WAIT_CMD_TOV 30
@@ -184,8 +197,6 @@ struct srb {
184 uint16_t iocb_tov; 197 uint16_t iocb_tov;
185 uint16_t iocb_cnt; /* Number of used iocbs */ 198 uint16_t iocb_cnt; /* Number of used iocbs */
186 uint16_t cc_stat; 199 uint16_t cc_stat;
187 u_long r_start; /* Time we recieve a cmd from OS */
188 u_long u_start; /* Time when we handed the cmd to F/W */
189 200
190 /* Used for extended sense / status continuation */ 201 /* Used for extended sense / status continuation */
191 uint8_t *req_sense_ptr; 202 uint8_t *req_sense_ptr;
@@ -221,7 +232,6 @@ struct ddb_entry {
221 unsigned long dev_scan_wait_to_start_relogin; 232 unsigned long dev_scan_wait_to_start_relogin;
222 unsigned long dev_scan_wait_to_complete_relogin; 233 unsigned long dev_scan_wait_to_complete_relogin;
223 234
224 uint16_t os_target_id; /* Target ID */
225 uint16_t fw_ddb_index; /* DDB firmware index */ 235 uint16_t fw_ddb_index; /* DDB firmware index */
226 uint16_t options; 236 uint16_t options;
227 uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */ 237 uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */
@@ -285,6 +295,67 @@ struct ddb_entry {
285#include "ql4_fw.h" 295#include "ql4_fw.h"
286#include "ql4_nvram.h" 296#include "ql4_nvram.h"
287 297
298struct ql82xx_hw_data {
299 /* Offsets for flash/nvram access (set to ~0 if not used). */
300 uint32_t flash_conf_off;
301 uint32_t flash_data_off;
302
303 uint32_t fdt_wrt_disable;
304 uint32_t fdt_erase_cmd;
305 uint32_t fdt_block_size;
306 uint32_t fdt_unprotect_sec_cmd;
307 uint32_t fdt_protect_sec_cmd;
308
309 uint32_t flt_region_flt;
310 uint32_t flt_region_fdt;
311 uint32_t flt_region_boot;
312 uint32_t flt_region_bootload;
313 uint32_t flt_region_fw;
314 uint32_t reserved;
315};
316
317struct qla4_8xxx_legacy_intr_set {
318 uint32_t int_vec_bit;
319 uint32_t tgt_status_reg;
320 uint32_t tgt_mask_reg;
321 uint32_t pci_int_reg;
322};
323
324/* MSI-X Support */
325
326#define QLA_MSIX_DEFAULT 0x00
327#define QLA_MSIX_RSP_Q 0x01
328
329#define QLA_MSIX_ENTRIES 2
330#define QLA_MIDX_DEFAULT 0
331#define QLA_MIDX_RSP_Q 1
332
333struct ql4_msix_entry {
334 int have_irq;
335 uint16_t msix_vector;
336 uint16_t msix_entry;
337};
338
339/*
340 * ISP Operations
341 */
342struct isp_operations {
343 int (*iospace_config) (struct scsi_qla_host *ha);
344 void (*pci_config) (struct scsi_qla_host *);
345 void (*disable_intrs) (struct scsi_qla_host *);
346 void (*enable_intrs) (struct scsi_qla_host *);
347 int (*start_firmware) (struct scsi_qla_host *);
348 irqreturn_t (*intr_handler) (int , void *);
349 void (*interrupt_service_routine) (struct scsi_qla_host *, uint32_t);
350 int (*reset_chip) (struct scsi_qla_host *);
351 int (*reset_firmware) (struct scsi_qla_host *);
352 void (*queue_iocb) (struct scsi_qla_host *);
353 void (*complete_iocb) (struct scsi_qla_host *);
354 uint16_t (*rd_shdw_req_q_out) (struct scsi_qla_host *);
355 uint16_t (*rd_shdw_rsp_q_in) (struct scsi_qla_host *);
356 int (*get_sys_info) (struct scsi_qla_host *);
357};
358
288/* 359/*
289 * Linux Host Adapter structure 360 * Linux Host Adapter structure
290 */ 361 */
@@ -296,28 +367,39 @@ struct scsi_qla_host {
296#define AF_INIT_DONE 1 /* 0x00000002 */ 367#define AF_INIT_DONE 1 /* 0x00000002 */
297#define AF_MBOX_COMMAND 2 /* 0x00000004 */ 368#define AF_MBOX_COMMAND 2 /* 0x00000004 */
298#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */ 369#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */
370#define AF_DPC_SCHEDULED 5 /* 0x00000020 */
299#define AF_INTERRUPTS_ON 6 /* 0x00000040 */ 371#define AF_INTERRUPTS_ON 6 /* 0x00000040 */
300#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */ 372#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
301#define AF_LINK_UP 8 /* 0x00000100 */ 373#define AF_LINK_UP 8 /* 0x00000100 */
302#define AF_IRQ_ATTACHED 10 /* 0x00000400 */ 374#define AF_IRQ_ATTACHED 10 /* 0x00000400 */
303#define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */ 375#define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */
376#define AF_HBA_GOING_AWAY 12 /* 0x00001000 */
377#define AF_INTx_ENABLED 15 /* 0x00008000 */
378#define AF_MSI_ENABLED 16 /* 0x00010000 */
379#define AF_MSIX_ENABLED 17 /* 0x00020000 */
380#define AF_MBOX_COMMAND_NOPOLL 18 /* 0x00040000 */
381
304 382
305 unsigned long dpc_flags; 383 unsigned long dpc_flags;
306 384
307#define DPC_RESET_HA 1 /* 0x00000002 */ 385#define DPC_RESET_HA 1 /* 0x00000002 */
308#define DPC_RETRY_RESET_HA 2 /* 0x00000004 */ 386#define DPC_RETRY_RESET_HA 2 /* 0x00000004 */
309#define DPC_RELOGIN_DEVICE 3 /* 0x00000008 */ 387#define DPC_RELOGIN_DEVICE 3 /* 0x00000008 */
310#define DPC_RESET_HA_DESTROY_DDB_LIST 4 /* 0x00000010 */ 388#define DPC_RESET_HA_FW_CONTEXT 4 /* 0x00000010 */
311#define DPC_RESET_HA_INTR 5 /* 0x00000020 */ 389#define DPC_RESET_HA_INTR 5 /* 0x00000020 */
312#define DPC_ISNS_RESTART 7 /* 0x00000080 */ 390#define DPC_ISNS_RESTART 7 /* 0x00000080 */
313#define DPC_AEN 9 /* 0x00000200 */ 391#define DPC_AEN 9 /* 0x00000200 */
314#define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */ 392#define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */
315#define DPC_LINK_CHANGED 18 /* 0x00040000 */ 393#define DPC_LINK_CHANGED 18 /* 0x00040000 */
394#define DPC_RESET_ACTIVE 20 /* 0x00040000 */
395#define DPC_HA_UNRECOVERABLE 21 /* 0x00080000 ISP-82xx only*/
396#define DPC_HA_NEED_QUIESCENT 22 /* 0x00100000 ISP-82xx only*/
397
316 398
317 struct Scsi_Host *host; /* pointer to host data */ 399 struct Scsi_Host *host; /* pointer to host data */
318 uint32_t tot_ddbs; 400 uint32_t tot_ddbs;
319 401
320 uint16_t iocb_cnt; 402 uint16_t iocb_cnt;
321 403
322 /* SRB cache. */ 404 /* SRB cache. */
323#define SRB_MIN_REQ 128 405#define SRB_MIN_REQ 128
@@ -332,14 +414,13 @@ struct scsi_qla_host {
332#define MIN_IOBASE_LEN 0x100 414#define MIN_IOBASE_LEN 0x100
333 415
334 uint16_t req_q_count; 416 uint16_t req_q_count;
335 uint8_t rsvd1[2];
336 417
337 unsigned long host_no; 418 unsigned long host_no;
338 419
339 /* NVRAM registers */ 420 /* NVRAM registers */
340 struct eeprom_data *nvram; 421 struct eeprom_data *nvram;
341 spinlock_t hardware_lock ____cacheline_aligned; 422 spinlock_t hardware_lock ____cacheline_aligned;
342 uint32_t eeprom_cmd_data; 423 uint32_t eeprom_cmd_data;
343 424
344 /* Counters for general statistics */ 425 /* Counters for general statistics */
345 uint64_t isr_count; 426 uint64_t isr_count;
@@ -375,7 +456,6 @@ struct scsi_qla_host {
375 uint8_t alias[32]; 456 uint8_t alias[32];
376 uint8_t name_string[256]; 457 uint8_t name_string[256];
377 uint8_t heartbeat_interval; 458 uint8_t heartbeat_interval;
378 uint8_t rsvd;
379 459
380 /* --- From FlashSysInfo --- */ 460 /* --- From FlashSysInfo --- */
381 uint8_t my_mac[MAC_ADDR_LEN]; 461 uint8_t my_mac[MAC_ADDR_LEN];
@@ -469,6 +549,40 @@ struct scsi_qla_host {
469 struct in6_addr ipv6_addr0; 549 struct in6_addr ipv6_addr0;
470 struct in6_addr ipv6_addr1; 550 struct in6_addr ipv6_addr1;
471 struct in6_addr ipv6_default_router_addr; 551 struct in6_addr ipv6_default_router_addr;
552
553 /* qla82xx specific fields */
554 struct device_reg_82xx __iomem *qla4_8xxx_reg; /* Base I/O address */
555 unsigned long nx_pcibase; /* Base I/O address */
556 uint8_t *nx_db_rd_ptr; /* Doorbell read pointer */
557 unsigned long nx_db_wr_ptr; /* Door bell write pointer */
558 unsigned long first_page_group_start;
559 unsigned long first_page_group_end;
560
561 uint32_t crb_win;
562 uint32_t curr_window;
563 uint32_t ddr_mn_window;
564 unsigned long mn_win_crb;
565 unsigned long ms_win_crb;
566 int qdr_sn_window;
567 rwlock_t hw_lock;
568 uint16_t func_num;
569 int link_width;
570
571 struct qla4_8xxx_legacy_intr_set nx_legacy_intr;
572 u32 nx_crb_mask;
573
574 uint8_t revision_id;
575 uint32_t fw_heartbeat_counter;
576
577 struct isp_operations *isp_ops;
578 struct ql82xx_hw_data hw;
579
580 struct ql4_msix_entry msix_entries[QLA_MSIX_ENTRIES];
581
582 uint32_t nx_dev_init_timeout;
583 uint32_t nx_reset_timeout;
584
585 struct completion mbx_intr_comp;
472}; 586};
473 587
474static inline int is_ipv4_enabled(struct scsi_qla_host *ha) 588static inline int is_ipv4_enabled(struct scsi_qla_host *ha)
@@ -496,6 +610,11 @@ static inline int is_qla4032(struct scsi_qla_host *ha)
496 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4032; 610 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4032;
497} 611}
498 612
613static inline int is_qla8022(struct scsi_qla_host *ha)
614{
615 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022;
616}
617
499static inline int adapter_up(struct scsi_qla_host *ha) 618static inline int adapter_up(struct scsi_qla_host *ha)
500{ 619{
501 return (test_bit(AF_ONLINE, &ha->flags) != 0) && 620 return (test_bit(AF_ONLINE, &ha->flags) != 0) &&
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 855226e08665..c94c9ddfb3a6 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -11,7 +11,7 @@
11 11
12#define MAX_PRST_DEV_DB_ENTRIES 64 12#define MAX_PRST_DEV_DB_ENTRIES 64
13#define MIN_DISC_DEV_DB_ENTRY MAX_PRST_DEV_DB_ENTRIES 13#define MIN_DISC_DEV_DB_ENTRY MAX_PRST_DEV_DB_ENTRIES
14#define MAX_DEV_DB_ENTRIES 512 14#define MAX_DEV_DB_ENTRIES 512
15 15
16/************************************************************************* 16/*************************************************************************
17 * 17 *
@@ -37,6 +37,33 @@ struct host_mem_cfg_regs {
37 __le32 rsrvd1[31]; /* 0x84-0xFF */ 37 __le32 rsrvd1[31]; /* 0x84-0xFF */
38}; 38};
39 39
40/*
41 * ISP 82xx I/O Register Set structure definitions.
42 */
43struct device_reg_82xx {
44 __le32 req_q_out; /* 0x0000 (R): Request Queue out-Pointer. */
45 __le32 reserve1[63]; /* Request Queue out-Pointer. (64 * 4) */
46 __le32 rsp_q_in; /* 0x0100 (R/W): Response Queue In-Pointer. */
47 __le32 reserve2[63]; /* Response Queue In-Pointer. */
48 __le32 rsp_q_out; /* 0x0200 (R/W): Response Queue Out-Pointer. */
49 __le32 reserve3[63]; /* Response Queue Out-Pointer. */
50
51 __le32 mailbox_in[8]; /* 0x0300 (R/W): Mail box In registers */
52 __le32 reserve4[24];
53 __le32 hint; /* 0x0380 (R/W): Host interrupt register */
54#define HINT_MBX_INT_PENDING BIT_0
55 __le32 reserve5[31];
56 __le32 mailbox_out[8]; /* 0x0400 (R): Mail box Out registers */
57 __le32 reserve6[56];
58
59 __le32 host_status; /* Offset 0x500 (R): host status */
60#define HSRX_RISC_MB_INT BIT_0 /* RISC to Host Mailbox interrupt */
61#define HSRX_RISC_IOCB_INT BIT_1 /* RISC to Host IOCB interrupt */
62
63 __le32 host_int; /* Offset 0x0504 (R/W): Interrupt status. */
64#define ISRX_82XX_RISC_INT BIT_0 /* RISC interrupt. */
65};
66
40/* remote register set (access via PCI memory read/write) */ 67/* remote register set (access via PCI memory read/write) */
41struct isp_reg { 68struct isp_reg {
42#define MBOX_REG_COUNT 8 69#define MBOX_REG_COUNT 8
@@ -206,6 +233,79 @@ union external_hw_config_reg {
206 uint32_t Asuint32_t; 233 uint32_t Asuint32_t;
207}; 234};
208 235
236/* 82XX Support start */
237/* 82xx Default FLT Addresses */
238#define FA_FLASH_LAYOUT_ADDR_82 0xFC400
239#define FA_FLASH_DESCR_ADDR_82 0xFC000
240#define FA_BOOT_LOAD_ADDR_82 0x04000
241#define FA_BOOT_CODE_ADDR_82 0x20000
242#define FA_RISC_CODE_ADDR_82 0x40000
243#define FA_GOLD_RISC_CODE_ADDR_82 0x80000
244
245/* Flash Description Table */
246struct qla_fdt_layout {
247 uint8_t sig[4];
248 uint16_t version;
249 uint16_t len;
250 uint16_t checksum;
251 uint8_t unused1[2];
252 uint8_t model[16];
253 uint16_t man_id;
254 uint16_t id;
255 uint8_t flags;
256 uint8_t erase_cmd;
257 uint8_t alt_erase_cmd;
258 uint8_t wrt_enable_cmd;
259 uint8_t wrt_enable_bits;
260 uint8_t wrt_sts_reg_cmd;
261 uint8_t unprotect_sec_cmd;
262 uint8_t read_man_id_cmd;
263 uint32_t block_size;
264 uint32_t alt_block_size;
265 uint32_t flash_size;
266 uint32_t wrt_enable_data;
267 uint8_t read_id_addr_len;
268 uint8_t wrt_disable_bits;
269 uint8_t read_dev_id_len;
270 uint8_t chip_erase_cmd;
271 uint16_t read_timeout;
272 uint8_t protect_sec_cmd;
273 uint8_t unused2[65];
274};
275
276/* Flash Layout Table */
277
278struct qla_flt_location {
279 uint8_t sig[4];
280 uint16_t start_lo;
281 uint16_t start_hi;
282 uint8_t version;
283 uint8_t unused[5];
284 uint16_t checksum;
285};
286
287struct qla_flt_header {
288 uint16_t version;
289 uint16_t length;
290 uint16_t checksum;
291 uint16_t unused;
292};
293
294/* 82xx FLT Regions */
295#define FLT_REG_FDT 0x1a
296#define FLT_REG_FLT 0x1c
297#define FLT_REG_BOOTLOAD_82 0x72
298#define FLT_REG_FW_82 0x74
299#define FLT_REG_GOLD_FW_82 0x75
300#define FLT_REG_BOOT_CODE_82 0x78
301
302struct qla_flt_region {
303 uint32_t code;
304 uint32_t size;
305 uint32_t start;
306 uint32_t end;
307};
308
209/************************************************************************* 309/*************************************************************************
210 * 310 *
211 * Mailbox Commands Structures and Definitions 311 * Mailbox Commands Structures and Definitions
@@ -215,6 +315,10 @@ union external_hw_config_reg {
215/* Mailbox command definitions */ 315/* Mailbox command definitions */
216#define MBOX_CMD_ABOUT_FW 0x0009 316#define MBOX_CMD_ABOUT_FW 0x0009
217#define MBOX_CMD_PING 0x000B 317#define MBOX_CMD_PING 0x000B
318#define MBOX_CMD_ENABLE_INTRS 0x0010
319#define INTR_DISABLE 0
320#define INTR_ENABLE 1
321#define MBOX_CMD_STOP_FW 0x0014
218#define MBOX_CMD_ABORT_TASK 0x0015 322#define MBOX_CMD_ABORT_TASK 0x0015
219#define MBOX_CMD_LUN_RESET 0x0016 323#define MBOX_CMD_LUN_RESET 0x0016
220#define MBOX_CMD_TARGET_WARM_RESET 0x0017 324#define MBOX_CMD_TARGET_WARM_RESET 0x0017
@@ -243,6 +347,7 @@ union external_hw_config_reg {
243#define DDB_DS_LOGIN_IN_PROCESS 0x07 347#define DDB_DS_LOGIN_IN_PROCESS 0x07
244#define MBOX_CMD_GET_FW_STATE 0x0069 348#define MBOX_CMD_GET_FW_STATE 0x0069
245#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK_DEFAULTS 0x006A 349#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK_DEFAULTS 0x006A
350#define MBOX_CMD_GET_SYS_INFO 0x0078
246#define MBOX_CMD_RESTORE_FACTORY_DEFAULTS 0x0087 351#define MBOX_CMD_RESTORE_FACTORY_DEFAULTS 0x0087
247#define MBOX_CMD_SET_ACB 0x0088 352#define MBOX_CMD_SET_ACB 0x0088
248#define MBOX_CMD_GET_ACB 0x0089 353#define MBOX_CMD_GET_ACB 0x0089
@@ -318,6 +423,15 @@ union external_hw_config_reg {
318#define MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR 0x8022 423#define MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR 0x8022
319#define MBOX_ASTS_SUBNET_STATE_CHANGE 0x8027 424#define MBOX_ASTS_SUBNET_STATE_CHANGE 0x8027
320 425
426/* ACB State Defines */
427#define ACB_STATE_UNCONFIGURED 0x00
428#define ACB_STATE_INVALID 0x01
429#define ACB_STATE_ACQUIRING 0x02
430#define ACB_STATE_TENTATIVE 0x03
431#define ACB_STATE_DEPRICATED 0x04
432#define ACB_STATE_VALID 0x05
433#define ACB_STATE_DISABLING 0x06
434
321/*************************************************************************/ 435/*************************************************************************/
322 436
323/* Host Adapter Initialization Control Block (from host) */ 437/* Host Adapter Initialization Control Block (from host) */
@@ -558,6 +672,20 @@ struct flash_sys_info {
558 uint32_t reserved1[39]; /* 170-1ff */ 672 uint32_t reserved1[39]; /* 170-1ff */
559}; /* 200 */ 673}; /* 200 */
560 674
675struct mbx_sys_info {
676 uint8_t board_id_str[16]; /* Keep board ID string first */
677 /* in this structure for GUI. */
678 uint16_t board_id; /* board ID code */
679 uint16_t phys_port_cnt; /* number of physical network ports */
680 uint16_t port_num; /* network port for this PCI function */
681 /* (port 0 is first port) */
682 uint8_t mac_addr[6]; /* MAC address for this PCI function */
683 uint32_t iscsi_pci_func_cnt; /* number of iSCSI PCI functions */
684 uint32_t pci_func; /* this PCI function */
685 unsigned char serial_number[16]; /* serial number string */
686 uint8_t reserved[16];
687};
688
561struct crash_record { 689struct crash_record {
562 uint16_t fw_major_version; /* 00 - 01 */ 690 uint16_t fw_major_version; /* 00 - 01 */
563 uint16_t fw_minor_version; /* 02 - 03 */ 691 uint16_t fw_minor_version; /* 02 - 03 */
@@ -814,4 +942,13 @@ struct passthru_status {
814 uint8_t res4[16]; /* 30-3F */ 942 uint8_t res4[16]; /* 30-3F */
815}; 943};
816 944
945/*
946 * ISP queue - response queue entry definition.
947 */
948struct response {
949 uint8_t data[60];
950 uint32_t signature;
951#define RESPONSE_PROCESSED 0xDEADDEAD /* Signature */
952};
953
817#endif /* _QLA4X_FW_H */ 954#endif /* _QLA4X_FW_H */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index c4636f6cb3cb..c9cd5d6db982 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -10,31 +10,32 @@
10 10
11struct iscsi_cls_conn; 11struct iscsi_cls_conn;
12 12
13void qla4xxx_hw_reset(struct scsi_qla_host *ha); 13int qla4xxx_hw_reset(struct scsi_qla_host *ha);
14int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a); 14int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
15int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port); 15int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port);
16int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb); 16int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb *srb);
17int qla4xxx_initialize_adapter(struct scsi_qla_host * ha, 17int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
18 uint8_t renew_ddb_list); 18 uint8_t renew_ddb_list);
19int qla4xxx_soft_reset(struct scsi_qla_host *ha); 19int qla4xxx_soft_reset(struct scsi_qla_host *ha);
20irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id); 20irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id);
21 21
22void qla4xxx_free_ddb_list(struct scsi_qla_host * ha); 22void qla4xxx_free_ddb_list(struct scsi_qla_host *ha);
23void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen); 23void qla4xxx_free_ddb(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry);
24void qla4xxx_process_aen(struct scsi_qla_host *ha, uint8_t process_aen);
24 25
25int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha); 26int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host *ha);
26int qla4xxx_relogin_device(struct scsi_qla_host * ha, 27int qla4xxx_relogin_device(struct scsi_qla_host *ha,
27 struct ddb_entry * ddb_entry); 28 struct ddb_entry *ddb_entry);
28int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb); 29int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb);
29int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry, 30int qla4xxx_reset_lun(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry,
30 int lun); 31 int lun);
31int qla4xxx_reset_target(struct scsi_qla_host * ha, 32int qla4xxx_reset_target(struct scsi_qla_host *ha,
32 struct ddb_entry * ddb_entry); 33 struct ddb_entry *ddb_entry);
33int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr, 34int qla4xxx_get_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr,
34 uint32_t offset, uint32_t len); 35 uint32_t offset, uint32_t len);
35int qla4xxx_get_firmware_status(struct scsi_qla_host * ha); 36int qla4xxx_get_firmware_status(struct scsi_qla_host *ha);
36int qla4xxx_get_firmware_state(struct scsi_qla_host * ha); 37int qla4xxx_get_firmware_state(struct scsi_qla_host *ha);
37int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha); 38int qla4xxx_initialize_fw_cb(struct scsi_qla_host *ha);
38 39
39/* FIXME: Goodness! this really wants a small struct to hold the 40/* FIXME: Goodness! this really wants a small struct to hold the
40 * parameters. On x86 the args will get passed on the stack! */ 41 * parameters. On x86 the args will get passed on the stack! */
@@ -54,20 +55,20 @@ int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
54 55
55void qla4xxx_mark_device_missing(struct scsi_qla_host *ha, 56void qla4xxx_mark_device_missing(struct scsi_qla_host *ha,
56 struct ddb_entry *ddb_entry); 57 struct ddb_entry *ddb_entry);
57u16 rd_nvram_word(struct scsi_qla_host * ha, int offset); 58u16 rd_nvram_word(struct scsi_qla_host *ha, int offset);
58void qla4xxx_get_crash_record(struct scsi_qla_host * ha); 59void qla4xxx_get_crash_record(struct scsi_qla_host *ha);
59struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha); 60struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha);
60int qla4xxx_add_sess(struct ddb_entry *); 61int qla4xxx_add_sess(struct ddb_entry *);
61void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry); 62void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry);
62int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host * ha); 63int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host *ha);
63int qla4xxx_get_fw_version(struct scsi_qla_host * ha); 64int qla4xxx_get_fw_version(struct scsi_qla_host * ha);
64void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha, 65void qla4xxx_interrupt_service_routine(struct scsi_qla_host *ha,
65 uint32_t intr_status); 66 uint32_t intr_status);
66int qla4xxx_init_rings(struct scsi_qla_host * ha); 67int qla4xxx_init_rings(struct scsi_qla_host *ha);
67struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
68 uint32_t index);
69void qla4xxx_srb_compl(struct kref *ref); 68void qla4xxx_srb_compl(struct kref *ref);
70int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host * ha); 69struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
70 uint32_t index);
71int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host *ha);
71int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, 72int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
72 uint32_t state, uint32_t conn_error); 73 uint32_t state, uint32_t conn_error);
73void qla4xxx_dump_buffer(void *b, uint32_t size); 74void qla4xxx_dump_buffer(void *b, uint32_t size);
@@ -75,8 +76,65 @@ int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
75 struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod); 76 struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod);
76int qla4_is_relogin_allowed(struct scsi_qla_host *ha, uint32_t conn_err); 77int qla4_is_relogin_allowed(struct scsi_qla_host *ha, uint32_t conn_err);
77 78
79int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
80 uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts);
81
82void qla4xxx_queue_iocb(struct scsi_qla_host *ha);
83void qla4xxx_complete_iocb(struct scsi_qla_host *ha);
84int qla4xxx_get_sys_info(struct scsi_qla_host *ha);
85int qla4xxx_iospace_config(struct scsi_qla_host *ha);
86void qla4xxx_pci_config(struct scsi_qla_host *ha);
87int qla4xxx_start_firmware(struct scsi_qla_host *ha);
88irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id);
89uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha);
90uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha);
91int qla4xxx_request_irqs(struct scsi_qla_host *ha);
92void qla4xxx_free_irqs(struct scsi_qla_host *ha);
93void qla4xxx_process_response_queue(struct scsi_qla_host *ha);
94void qla4xxx_wake_dpc(struct scsi_qla_host *ha);
95void qla4xxx_get_conn_event_log(struct scsi_qla_host *ha);
96
97void qla4_8xxx_pci_config(struct scsi_qla_host *);
98int qla4_8xxx_iospace_config(struct scsi_qla_host *ha);
99int qla4_8xxx_load_risc(struct scsi_qla_host *);
100irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id);
101void qla4_8xxx_queue_iocb(struct scsi_qla_host *ha);
102void qla4_8xxx_complete_iocb(struct scsi_qla_host *ha);
103
104int qla4_8xxx_crb_win_lock(struct scsi_qla_host *);
105void qla4_8xxx_crb_win_unlock(struct scsi_qla_host *);
106int qla4_8xxx_pci_get_crb_addr_2M(struct scsi_qla_host *, ulong *);
107void qla4_8xxx_wr_32(struct scsi_qla_host *, ulong, u32);
108int qla4_8xxx_rd_32(struct scsi_qla_host *, ulong);
109int qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *, u64, void *, int);
110int qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha, u64, void *, int);
111int qla4_8xxx_isp_reset(struct scsi_qla_host *ha);
112void qla4_8xxx_interrupt_service_routine(struct scsi_qla_host *ha,
113 uint32_t intr_status);
114uint16_t qla4_8xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha);
115uint16_t qla4_8xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha);
116int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha);
117void qla4_8xxx_watchdog(struct scsi_qla_host *ha);
118int qla4_8xxx_stop_firmware(struct scsi_qla_host *ha);
119int qla4_8xxx_get_flash_info(struct scsi_qla_host *ha);
120void qla4_8xxx_enable_intrs(struct scsi_qla_host *ha);
121void qla4_8xxx_disable_intrs(struct scsi_qla_host *ha);
122int qla4_8xxx_enable_msix(struct scsi_qla_host *ha);
123void qla4_8xxx_disable_msix(struct scsi_qla_host *ha);
124irqreturn_t qla4_8xxx_msi_handler(int irq, void *dev_id);
125irqreturn_t qla4_8xxx_default_intr_handler(int irq, void *dev_id);
126irqreturn_t qla4_8xxx_msix_rsp_q(int irq, void *dev_id);
127void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha);
128void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha);
129int qla4_8xxx_idc_lock(struct scsi_qla_host *ha);
130void qla4_8xxx_idc_unlock(struct scsi_qla_host *ha);
131int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha);
132void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha);
133void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha);
134
78extern int ql4xextended_error_logging; 135extern int ql4xextended_error_logging;
79extern int ql4xdiscoverywait; 136extern int ql4xdiscoverywait;
80extern int ql4xdontresethba; 137extern int ql4xdontresethba;
81extern int ql4_mod_unload; 138extern int ql4xenablemsix;
139
82#endif /* _QLA4x_GBL_H */ 140#endif /* _QLA4x_GBL_H */
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 5510df8a7fa6..266ebd45396d 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -11,8 +11,8 @@
11#include "ql4_dbg.h" 11#include "ql4_dbg.h"
12#include "ql4_inline.h" 12#include "ql4_inline.h"
13 13
14static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha, 14static struct ddb_entry *qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
15 uint32_t fw_ddb_index); 15 uint32_t fw_ddb_index);
16 16
17static void ql4xxx_set_mac_number(struct scsi_qla_host *ha) 17static void ql4xxx_set_mac_number(struct scsi_qla_host *ha)
18{ 18{
@@ -51,8 +51,8 @@ static void ql4xxx_set_mac_number(struct scsi_qla_host *ha)
51 * This routine deallocates and unlinks the specified ddb_entry from the 51 * This routine deallocates and unlinks the specified ddb_entry from the
52 * adapter's 52 * adapter's
53 **/ 53 **/
54static void qla4xxx_free_ddb(struct scsi_qla_host *ha, 54void qla4xxx_free_ddb(struct scsi_qla_host *ha,
55 struct ddb_entry *ddb_entry) 55 struct ddb_entry *ddb_entry)
56{ 56{
57 /* Remove device entry from list */ 57 /* Remove device entry from list */
58 list_del_init(&ddb_entry->list); 58 list_del_init(&ddb_entry->list);
@@ -86,6 +86,25 @@ void qla4xxx_free_ddb_list(struct scsi_qla_host *ha)
86} 86}
87 87
88/** 88/**
89 * qla4xxx_init_response_q_entries() - Initializes response queue entries.
90 * @ha: HA context
91 *
92 * Beginning of request ring has initialization control block already built
93 * by nvram config routine.
94 **/
95static void qla4xxx_init_response_q_entries(struct scsi_qla_host *ha)
96{
97 uint16_t cnt;
98 struct response *pkt;
99
100 pkt = (struct response *)ha->response_ptr;
101 for (cnt = 0; cnt < RESPONSE_QUEUE_DEPTH; cnt++) {
102 pkt->signature = RESPONSE_PROCESSED;
103 pkt++;
104 }
105}
106
107/**
89 * qla4xxx_init_rings - initialize hw queues 108 * qla4xxx_init_rings - initialize hw queues
90 * @ha: pointer to host adapter structure. 109 * @ha: pointer to host adapter structure.
91 * 110 *
@@ -109,19 +128,31 @@ int qla4xxx_init_rings(struct scsi_qla_host *ha)
109 ha->response_out = 0; 128 ha->response_out = 0;
110 ha->response_ptr = &ha->response_ring[ha->response_out]; 129 ha->response_ptr = &ha->response_ring[ha->response_out];
111 130
112 /* 131 if (is_qla8022(ha)) {
113 * Initialize DMA Shadow registers. The firmware is really supposed to 132 writel(0,
114 * take care of this, but on some uniprocessor systems, the shadow 133 (unsigned long __iomem *)&ha->qla4_8xxx_reg->req_q_out);
115 * registers aren't cleared-- causing the interrupt_handler to think 134 writel(0,
116 * there are responses to be processed when there aren't. 135 (unsigned long __iomem *)&ha->qla4_8xxx_reg->rsp_q_in);
117 */ 136 writel(0,
118 ha->shadow_regs->req_q_out = __constant_cpu_to_le32(0); 137 (unsigned long __iomem *)&ha->qla4_8xxx_reg->rsp_q_out);
119 ha->shadow_regs->rsp_q_in = __constant_cpu_to_le32(0); 138 } else {
120 wmb(); 139 /*
140 * Initialize DMA Shadow registers. The firmware is really
141 * supposed to take care of this, but on some uniprocessor
142 * systems, the shadow registers aren't cleared-- causing
143 * the interrupt_handler to think there are responses to be
144 * processed when there aren't.
145 */
146 ha->shadow_regs->req_q_out = __constant_cpu_to_le32(0);
147 ha->shadow_regs->rsp_q_in = __constant_cpu_to_le32(0);
148 wmb();
121 149
122 writel(0, &ha->reg->req_q_in); 150 writel(0, &ha->reg->req_q_in);
123 writel(0, &ha->reg->rsp_q_out); 151 writel(0, &ha->reg->rsp_q_out);
124 readl(&ha->reg->rsp_q_out); 152 readl(&ha->reg->rsp_q_out);
153 }
154
155 qla4xxx_init_response_q_entries(ha);
125 156
126 spin_unlock_irqrestore(&ha->hardware_lock, flags); 157 spin_unlock_irqrestore(&ha->hardware_lock, flags);
127 158
@@ -129,11 +160,11 @@ int qla4xxx_init_rings(struct scsi_qla_host *ha)
129} 160}
130 161
131/** 162/**
132 * qla4xxx_validate_mac_address - validate adapter MAC address(es) 163 * qla4xxx_get_sys_info - validate adapter MAC address(es)
133 * @ha: pointer to host adapter structure. 164 * @ha: pointer to host adapter structure.
134 * 165 *
135 **/ 166 **/
136static int qla4xxx_validate_mac_address(struct scsi_qla_host *ha) 167int qla4xxx_get_sys_info(struct scsi_qla_host *ha)
137{ 168{
138 struct flash_sys_info *sys_info; 169 struct flash_sys_info *sys_info;
139 dma_addr_t sys_info_dma; 170 dma_addr_t sys_info_dma;
@@ -145,7 +176,7 @@ static int qla4xxx_validate_mac_address(struct scsi_qla_host *ha)
145 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", 176 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
146 ha->host_no, __func__)); 177 ha->host_no, __func__));
147 178
148 goto exit_validate_mac_no_free; 179 goto exit_get_sys_info_no_free;
149 } 180 }
150 memset(sys_info, 0, sizeof(*sys_info)); 181 memset(sys_info, 0, sizeof(*sys_info));
151 182
@@ -155,7 +186,7 @@ static int qla4xxx_validate_mac_address(struct scsi_qla_host *ha)
155 DEBUG2(printk("scsi%ld: %s: get_flash FLASH_OFFSET_SYS_INFO " 186 DEBUG2(printk("scsi%ld: %s: get_flash FLASH_OFFSET_SYS_INFO "
156 "failed\n", ha->host_no, __func__)); 187 "failed\n", ha->host_no, __func__));
157 188
158 goto exit_validate_mac; 189 goto exit_get_sys_info;
159 } 190 }
160 191
161 /* Save M.A.C. address & serial_number */ 192 /* Save M.A.C. address & serial_number */
@@ -168,11 +199,11 @@ static int qla4xxx_validate_mac_address(struct scsi_qla_host *ha)
168 199
169 status = QLA_SUCCESS; 200 status = QLA_SUCCESS;
170 201
171 exit_validate_mac: 202exit_get_sys_info:
172 dma_free_coherent(&ha->pdev->dev, sizeof(*sys_info), sys_info, 203 dma_free_coherent(&ha->pdev->dev, sizeof(*sys_info), sys_info,
173 sys_info_dma); 204 sys_info_dma);
174 205
175 exit_validate_mac_no_free: 206exit_get_sys_info_no_free:
176 return status; 207 return status;
177} 208}
178 209
@@ -266,7 +297,7 @@ static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
266 uint32_t timeout_count; 297 uint32_t timeout_count;
267 int ready = 0; 298 int ready = 0;
268 299
269 DEBUG2(dev_info(&ha->pdev->dev, "Waiting for Firmware Ready..\n")); 300 DEBUG2(ql4_printk(KERN_INFO, ha, "Waiting for Firmware Ready..\n"));
270 for (timeout_count = ADAPTER_INIT_TOV; timeout_count > 0; 301 for (timeout_count = ADAPTER_INIT_TOV; timeout_count > 0;
271 timeout_count--) { 302 timeout_count--) {
272 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags)) 303 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
@@ -339,29 +370,29 @@ static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
339 370
340 if (!qla4xxx_wait_for_ip_config(ha) || 371 if (!qla4xxx_wait_for_ip_config(ha) ||
341 timeout_count == 1) { 372 timeout_count == 1) {
342 DEBUG2(dev_info(&ha->pdev->dev, 373 DEBUG2(ql4_printk(KERN_INFO, ha,
343 "Firmware Ready..\n")); 374 "Firmware Ready..\n"));
344 /* The firmware is ready to process SCSI 375 /* The firmware is ready to process SCSI
345 commands. */ 376 commands. */
346 DEBUG2(dev_info(&ha->pdev->dev, 377 DEBUG2(ql4_printk(KERN_INFO, ha,
347 "scsi%ld: %s: MEDIA TYPE" 378 "scsi%ld: %s: MEDIA TYPE"
348 " - %s\n", ha->host_no, 379 " - %s\n", ha->host_no,
349 __func__, (ha->addl_fw_state & 380 __func__, (ha->addl_fw_state &
350 FW_ADDSTATE_OPTICAL_MEDIA) 381 FW_ADDSTATE_OPTICAL_MEDIA)
351 != 0 ? "OPTICAL" : "COPPER")); 382 != 0 ? "OPTICAL" : "COPPER"));
352 DEBUG2(dev_info(&ha->pdev->dev, 383 DEBUG2(ql4_printk(KERN_INFO, ha,
353 "scsi%ld: %s: DHCPv4 STATE" 384 "scsi%ld: %s: DHCPv4 STATE"
354 " Enabled %s\n", ha->host_no, 385 " Enabled %s\n", ha->host_no,
355 __func__, (ha->addl_fw_state & 386 __func__, (ha->addl_fw_state &
356 FW_ADDSTATE_DHCPv4_ENABLED) != 0 ? 387 FW_ADDSTATE_DHCPv4_ENABLED) != 0 ?
357 "YES" : "NO")); 388 "YES" : "NO"));
358 DEBUG2(dev_info(&ha->pdev->dev, 389 DEBUG2(ql4_printk(KERN_INFO, ha,
359 "scsi%ld: %s: LINK %s\n", 390 "scsi%ld: %s: LINK %s\n",
360 ha->host_no, __func__, 391 ha->host_no, __func__,
361 (ha->addl_fw_state & 392 (ha->addl_fw_state &
362 FW_ADDSTATE_LINK_UP) != 0 ? 393 FW_ADDSTATE_LINK_UP) != 0 ?
363 "UP" : "DOWN")); 394 "UP" : "DOWN"));
364 DEBUG2(dev_info(&ha->pdev->dev, 395 DEBUG2(ql4_printk(KERN_INFO, ha,
365 "scsi%ld: %s: iSNS Service " 396 "scsi%ld: %s: iSNS Service "
366 "Started %s\n", 397 "Started %s\n",
367 ha->host_no, __func__, 398 ha->host_no, __func__,
@@ -399,6 +430,7 @@ static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
399 DEBUG2(printk("scsi%ld: %s: FW initialized, but " 430 DEBUG2(printk("scsi%ld: %s: FW initialized, but "
400 "auto-discovery still in process\n", 431 "auto-discovery still in process\n",
401 ha->host_no, __func__)); 432 ha->host_no, __func__));
433 ready = 1;
402 } 434 }
403 435
404 return ready; 436 return ready;
@@ -413,7 +445,7 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
413{ 445{
414 int status = QLA_ERROR; 446 int status = QLA_ERROR;
415 447
416 dev_info(&ha->pdev->dev, "Initializing firmware..\n"); 448 ql4_printk(KERN_INFO, ha, "Initializing firmware..\n");
417 if (qla4xxx_initialize_fw_cb(ha) == QLA_ERROR) { 449 if (qla4xxx_initialize_fw_cb(ha) == QLA_ERROR) {
418 DEBUG2(printk("scsi%ld: %s: Failed to initialize firmware " 450 DEBUG2(printk("scsi%ld: %s: Failed to initialize firmware "
419 "control block\n", ha->host_no, __func__)); 451 "control block\n", ha->host_no, __func__));
@@ -443,17 +475,17 @@ static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha,
443 if (fw_ddb_entry == NULL) { 475 if (fw_ddb_entry == NULL) {
444 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", 476 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
445 ha->host_no, __func__)); 477 ha->host_no, __func__));
446 return NULL; 478 goto exit_get_ddb_entry_no_free;
447 } 479 }
448 480
449 if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry, 481 if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry,
450 fw_ddb_entry_dma, NULL, NULL, 482 fw_ddb_entry_dma, NULL, NULL,
451 &device_state, NULL, NULL, NULL) == 483 &device_state, NULL, NULL, NULL) ==
452 QLA_ERROR) { 484 QLA_ERROR) {
453 DEBUG2(printk("scsi%ld: %s: failed get_ddb_entry for " 485 DEBUG2(printk("scsi%ld: %s: failed get_ddb_entry for "
454 "fw_ddb_index %d\n", ha->host_no, __func__, 486 "fw_ddb_index %d\n", ha->host_no, __func__,
455 fw_ddb_index)); 487 fw_ddb_index));
456 return NULL; 488 goto exit_get_ddb_entry;
457 } 489 }
458 490
459 /* Allocate DDB if not already allocated. */ 491 /* Allocate DDB if not already allocated. */
@@ -471,6 +503,7 @@ static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha,
471 } 503 }
472 } 504 }
473 505
506 /* if not found allocate new ddb */
474 if (!found) { 507 if (!found) {
475 DEBUG2(printk("scsi%ld: %s: ddb[%d] not found - allocating " 508 DEBUG2(printk("scsi%ld: %s: ddb[%d] not found - allocating "
476 "new ddb\n", ha->host_no, __func__, 509 "new ddb\n", ha->host_no, __func__,
@@ -479,10 +512,11 @@ static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha,
479 ddb_entry = qla4xxx_alloc_ddb(ha, fw_ddb_index); 512 ddb_entry = qla4xxx_alloc_ddb(ha, fw_ddb_index);
480 } 513 }
481 514
482 /* if not found allocate new ddb */ 515exit_get_ddb_entry:
483 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, 516 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry,
484 fw_ddb_entry_dma); 517 fw_ddb_entry_dma);
485 518
519exit_get_ddb_entry_no_free:
486 return ddb_entry; 520 return ddb_entry;
487} 521}
488 522
@@ -510,7 +544,8 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
510 if (ddb_entry == NULL) { 544 if (ddb_entry == NULL) {
511 DEBUG2(printk("scsi%ld: %s: ddb_entry is NULL\n", ha->host_no, 545 DEBUG2(printk("scsi%ld: %s: ddb_entry is NULL\n", ha->host_no,
512 __func__)); 546 __func__));
513 goto exit_update_ddb; 547
548 goto exit_update_ddb_no_free;
514 } 549 }
515 550
516 /* Make sure the dma buffer is valid */ 551 /* Make sure the dma buffer is valid */
@@ -521,7 +556,7 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
521 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", 556 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
522 ha->host_no, __func__)); 557 ha->host_no, __func__));
523 558
524 goto exit_update_ddb; 559 goto exit_update_ddb_no_free;
525 } 560 }
526 561
527 if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry, 562 if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry,
@@ -529,7 +564,7 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
529 &ddb_entry->fw_ddb_device_state, &conn_err, 564 &ddb_entry->fw_ddb_device_state, &conn_err,
530 &ddb_entry->tcp_source_port_num, 565 &ddb_entry->tcp_source_port_num,
531 &ddb_entry->connection_id) == 566 &ddb_entry->connection_id) ==
532 QLA_ERROR) { 567 QLA_ERROR) {
533 DEBUG2(printk("scsi%ld: %s: failed get_ddb_entry for " 568 DEBUG2(printk("scsi%ld: %s: failed get_ddb_entry for "
534 "fw_ddb_index %d\n", ha->host_no, __func__, 569 "fw_ddb_index %d\n", ha->host_no, __func__,
535 fw_ddb_index)); 570 fw_ddb_index));
@@ -559,6 +594,9 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
559 memcpy(&ddb_entry->iscsi_name[0], &fw_ddb_entry->iscsi_name[0], 594 memcpy(&ddb_entry->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
560 min(sizeof(ddb_entry->iscsi_name), 595 min(sizeof(ddb_entry->iscsi_name),
561 sizeof(fw_ddb_entry->iscsi_name))); 596 sizeof(fw_ddb_entry->iscsi_name)));
597 memcpy(&ddb_entry->iscsi_alias[0], &fw_ddb_entry->iscsi_alias[0],
598 min(sizeof(ddb_entry->iscsi_alias),
599 sizeof(fw_ddb_entry->iscsi_alias)));
562 memcpy(&ddb_entry->ip_addr[0], &fw_ddb_entry->ip_addr[0], 600 memcpy(&ddb_entry->ip_addr[0], &fw_ddb_entry->ip_addr[0],
563 min(sizeof(ddb_entry->ip_addr), sizeof(fw_ddb_entry->ip_addr))); 601 min(sizeof(ddb_entry->ip_addr), sizeof(fw_ddb_entry->ip_addr)));
564 602
@@ -580,21 +618,19 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
580 min(sizeof(ddb_entry->link_local_ipv6_addr), 618 min(sizeof(ddb_entry->link_local_ipv6_addr),
581 sizeof(fw_ddb_entry->link_local_ipv6_addr))); 619 sizeof(fw_ddb_entry->link_local_ipv6_addr)));
582 620
583 DEBUG2(dev_info(&ha->pdev->dev, "%s: DDB[%d] osIdx = %d " 621 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: DDB[%d] State %04x"
584 "State %04x ConnErr %08x IP %pI6 " 622 " ConnErr %08x IP %pI6 "
585 ":%04d \"%s\"\n", 623 ":%04d \"%s\"\n",
586 __func__, fw_ddb_index, 624 __func__, fw_ddb_index,
587 ddb_entry->os_target_id,
588 ddb_entry->fw_ddb_device_state, 625 ddb_entry->fw_ddb_device_state,
589 conn_err, fw_ddb_entry->ip_addr, 626 conn_err, fw_ddb_entry->ip_addr,
590 le16_to_cpu(fw_ddb_entry->port), 627 le16_to_cpu(fw_ddb_entry->port),
591 fw_ddb_entry->iscsi_name)); 628 fw_ddb_entry->iscsi_name));
592 } else 629 } else
593 DEBUG2(dev_info(&ha->pdev->dev, "%s: DDB[%d] osIdx = %d " 630 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: DDB[%d] State %04x"
594 "State %04x ConnErr %08x IP %pI4 " 631 " ConnErr %08x IP %pI4 "
595 ":%04d \"%s\"\n", 632 ":%04d \"%s\"\n",
596 __func__, fw_ddb_index, 633 __func__, fw_ddb_index,
597 ddb_entry->os_target_id,
598 ddb_entry->fw_ddb_device_state, 634 ddb_entry->fw_ddb_device_state,
599 conn_err, fw_ddb_entry->ip_addr, 635 conn_err, fw_ddb_entry->ip_addr,
600 le16_to_cpu(fw_ddb_entry->port), 636 le16_to_cpu(fw_ddb_entry->port),
@@ -604,6 +640,7 @@ exit_update_ddb:
604 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 640 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
605 fw_ddb_entry, fw_ddb_entry_dma); 641 fw_ddb_entry, fw_ddb_entry_dma);
606 642
643exit_update_ddb_no_free:
607 return status; 644 return status;
608} 645}
609 646
@@ -660,18 +697,18 @@ int qla4_is_relogin_allowed(struct scsi_qla_host *ha, uint32_t conn_err)
660 err_code = ((conn_err & 0x00ff0000) >> 16); 697 err_code = ((conn_err & 0x00ff0000) >> 16);
661 login_rsp_sts_class = ((conn_err & 0x0000ff00) >> 8); 698 login_rsp_sts_class = ((conn_err & 0x0000ff00) >> 8);
662 if (err_code == 0x1c || err_code == 0x06) { 699 if (err_code == 0x1c || err_code == 0x06) {
663 DEBUG2(dev_info(&ha->pdev->dev, 700 DEBUG2(ql4_printk(KERN_INFO, ha,
664 ": conn_err=0x%08x, send target completed" 701 ": conn_err=0x%08x, send target completed"
665 " or access denied failure\n", conn_err)); 702 " or access denied failure\n", conn_err));
666 relogin = 0; 703 relogin = 0;
667 } 704 }
668 if ((err_code == 0x08) && (login_rsp_sts_class == 0x02)) { 705 if ((err_code == 0x08) && (login_rsp_sts_class == 0x02)) {
669 /* Login Response PDU returned an error. 706 /* Login Response PDU returned an error.
670 Login Response Status in Error Code Detail 707 Login Response Status in Error Code Detail
671 indicates login should not be retried.*/ 708 indicates login should not be retried.*/
672 DEBUG2(dev_info(&ha->pdev->dev, 709 DEBUG2(ql4_printk(KERN_INFO, ha,
673 ": conn_err=0x%08x, do not retry relogin\n", 710 ": conn_err=0x%08x, do not retry relogin\n",
674 conn_err)); 711 conn_err));
675 relogin = 0; 712 relogin = 0;
676 } 713 }
677 714
@@ -688,7 +725,7 @@ int qla4_is_relogin_allowed(struct scsi_qla_host *ha, uint32_t conn_err)
688 **/ 725 **/
689static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha) 726static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
690{ 727{
691 int status = QLA_SUCCESS; 728 int status = QLA_ERROR;
692 uint32_t fw_ddb_index = 0; 729 uint32_t fw_ddb_index = 0;
693 uint32_t next_fw_ddb_index = 0; 730 uint32_t next_fw_ddb_index = 0;
694 uint32_t ddb_state; 731 uint32_t ddb_state;
@@ -702,12 +739,13 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
702 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 739 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
703 &fw_ddb_entry_dma, GFP_KERNEL); 740 &fw_ddb_entry_dma, GFP_KERNEL);
704 if (fw_ddb_entry == NULL) { 741 if (fw_ddb_entry == NULL) {
705 DEBUG2(dev_info(&ha->pdev->dev, "%s: DMA alloc failed\n", 742 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: DMA alloc failed\n",
706 __func__)); 743 __func__));
707 return QLA_ERROR; 744
745 goto exit_build_ddb_list_no_free;
708 } 746 }
709 747
710 dev_info(&ha->pdev->dev, "Initializing DDBs ...\n"); 748 ql4_printk(KERN_INFO, ha, "Initializing DDBs ...\n");
711 for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES; 749 for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES;
712 fw_ddb_index = next_fw_ddb_index) { 750 fw_ddb_index = next_fw_ddb_index) {
713 /* First, let's see if a device exists here */ 751 /* First, let's see if a device exists here */
@@ -719,7 +757,7 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
719 DEBUG2(printk("scsi%ld: %s: get_ddb_entry, " 757 DEBUG2(printk("scsi%ld: %s: get_ddb_entry, "
720 "fw_ddb_index %d failed", ha->host_no, 758 "fw_ddb_index %d failed", ha->host_no,
721 __func__, fw_ddb_index)); 759 __func__, fw_ddb_index));
722 return QLA_ERROR; 760 goto exit_build_ddb_list;
723 } 761 }
724 762
725 DEBUG2(printk("scsi%ld: %s: Getting DDB[%d] ddbstate=0x%x, " 763 DEBUG2(printk("scsi%ld: %s: Getting DDB[%d] ddbstate=0x%x, "
@@ -749,7 +787,7 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
749 "get_ddb_entry %d failed\n", 787 "get_ddb_entry %d failed\n",
750 ha->host_no, 788 ha->host_no,
751 __func__, fw_ddb_index)); 789 __func__, fw_ddb_index));
752 return QLA_ERROR; 790 goto exit_build_ddb_list;
753 } 791 }
754 } 792 }
755 } 793 }
@@ -769,7 +807,7 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
769 DEBUG2(printk("scsi%ld: %s: Unable to allocate memory " 807 DEBUG2(printk("scsi%ld: %s: Unable to allocate memory "
770 "for device at fw_ddb_index %d\n", 808 "for device at fw_ddb_index %d\n",
771 ha->host_no, __func__, fw_ddb_index)); 809 ha->host_no, __func__, fw_ddb_index));
772 return QLA_ERROR; 810 goto exit_build_ddb_list;
773 } 811 }
774 /* Fill in the device structure */ 812 /* Fill in the device structure */
775 if (qla4xxx_update_ddb_entry(ha, ddb_entry, fw_ddb_index) == 813 if (qla4xxx_update_ddb_entry(ha, ddb_entry, fw_ddb_index) ==
@@ -777,11 +815,10 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
777 ha->fw_ddb_index_map[fw_ddb_index] = 815 ha->fw_ddb_index_map[fw_ddb_index] =
778 (struct ddb_entry *)INVALID_ENTRY; 816 (struct ddb_entry *)INVALID_ENTRY;
779 817
780
781 DEBUG2(printk("scsi%ld: %s: update_ddb_entry failed " 818 DEBUG2(printk("scsi%ld: %s: update_ddb_entry failed "
782 "for fw_ddb_index %d.\n", 819 "for fw_ddb_index %d.\n",
783 ha->host_no, __func__, fw_ddb_index)); 820 ha->host_no, __func__, fw_ddb_index));
784 return QLA_ERROR; 821 goto exit_build_ddb_list;
785 } 822 }
786 823
787next_one: 824next_one:
@@ -791,8 +828,14 @@ next_one:
791 break; 828 break;
792 } 829 }
793 830
794 dev_info(&ha->pdev->dev, "DDB list done..\n"); 831 status = QLA_SUCCESS;
832 ql4_printk(KERN_INFO, ha, "DDB list done..\n");
833
834exit_build_ddb_list:
835 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry,
836 fw_ddb_entry_dma);
795 837
838exit_build_ddb_list_no_free:
796 return status; 839 return status;
797} 840}
798 841
@@ -951,6 +994,9 @@ static int qla4xxx_initialize_ddb_list(struct scsi_qla_host *ha)
951 994
952 qla4xxx_flush_AENS(ha); 995 qla4xxx_flush_AENS(ha);
953 996
997 /* Wait for an AEN */
998 qla4xxx_devices_ready(ha);
999
954 /* 1000 /*
955 * First perform device discovery for active 1001 * First perform device discovery for active
956 * fw ddb indexes and build 1002 * fw ddb indexes and build
@@ -959,9 +1005,6 @@ static int qla4xxx_initialize_ddb_list(struct scsi_qla_host *ha)
959 if ((status = qla4xxx_build_ddb_list(ha)) == QLA_ERROR) 1005 if ((status = qla4xxx_build_ddb_list(ha)) == QLA_ERROR)
960 return status; 1006 return status;
961 1007
962 /* Wait for an AEN */
963 qla4xxx_devices_ready(ha);
964
965 /* 1008 /*
966 * Targets can come online after the inital discovery, so processing 1009 * Targets can come online after the inital discovery, so processing
967 * the aens here will catch them. 1010 * the aens here will catch them.
@@ -973,7 +1016,7 @@ static int qla4xxx_initialize_ddb_list(struct scsi_qla_host *ha)
973} 1016}
974 1017
975/** 1018/**
976 * qla4xxx_update_ddb_list - update the driver ddb list 1019 * qla4xxx_reinitialize_ddb_list - update the driver ddb list
977 * @ha: pointer to host adapter structure. 1020 * @ha: pointer to host adapter structure.
978 * 1021 *
979 * This routine obtains device information from the F/W database after 1022 * This routine obtains device information from the F/W database after
@@ -993,6 +1036,7 @@ int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host *ha)
993 DEBUG2(printk ("scsi%ld: %s: ddb index [%d] marked " 1036 DEBUG2(printk ("scsi%ld: %s: ddb index [%d] marked "
994 "ONLINE\n", ha->host_no, __func__, 1037 "ONLINE\n", ha->host_no, __func__,
995 ddb_entry->fw_ddb_index)); 1038 ddb_entry->fw_ddb_index));
1039 iscsi_unblock_session(ddb_entry->sess);
996 } else if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) 1040 } else if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
997 qla4xxx_mark_device_missing(ha, ddb_entry); 1041 qla4xxx_mark_device_missing(ha, ddb_entry);
998 } 1042 }
@@ -1016,7 +1060,7 @@ int qla4xxx_relogin_device(struct scsi_qla_host *ha,
1016 (uint16_t)RELOGIN_TOV); 1060 (uint16_t)RELOGIN_TOV);
1017 atomic_set(&ddb_entry->relogin_timer, relogin_timer); 1061 atomic_set(&ddb_entry->relogin_timer, relogin_timer);
1018 1062
1019 DEBUG2(printk("scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no, 1063 DEBUG2(printk("scsi%ld: Relogin ddb [%d]. TOV=%d\n", ha->host_no,
1020 ddb_entry->fw_ddb_index, relogin_timer)); 1064 ddb_entry->fw_ddb_index, relogin_timer));
1021 1065
1022 qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index, 0); 1066 qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index, 0);
@@ -1039,17 +1083,17 @@ static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
1039 } 1083 }
1040 1084
1041 /* Get EEPRom Parameters from NVRAM and validate */ 1085 /* Get EEPRom Parameters from NVRAM and validate */
1042 dev_info(&ha->pdev->dev, "Configuring NVRAM ...\n"); 1086 ql4_printk(KERN_INFO, ha, "Configuring NVRAM ...\n");
1043 if (qla4xxx_is_nvram_configuration_valid(ha) == QLA_SUCCESS) { 1087 if (qla4xxx_is_nvram_configuration_valid(ha) == QLA_SUCCESS) {
1044 spin_lock_irqsave(&ha->hardware_lock, flags); 1088 spin_lock_irqsave(&ha->hardware_lock, flags);
1045 extHwConfig.Asuint32_t = 1089 extHwConfig.Asuint32_t =
1046 rd_nvram_word(ha, eeprom_ext_hw_conf_offset(ha)); 1090 rd_nvram_word(ha, eeprom_ext_hw_conf_offset(ha));
1047 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1091 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1048 } else { 1092 } else {
1049 dev_warn(&ha->pdev->dev, 1093 ql4_printk(KERN_WARNING, ha,
1050 "scsi%ld: %s: EEProm checksum invalid. " 1094 "scsi%ld: %s: EEProm checksum invalid. "
1051 "Please update your EEPROM\n", ha->host_no, 1095 "Please update your EEPROM\n", ha->host_no,
1052 __func__); 1096 __func__);
1053 1097
1054 /* Attempt to set defaults */ 1098 /* Attempt to set defaults */
1055 if (is_qla4010(ha)) 1099 if (is_qla4010(ha))
@@ -1073,12 +1117,21 @@ static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
1073 return QLA_SUCCESS; 1117 return QLA_SUCCESS;
1074} 1118}
1075 1119
1076static void qla4x00_pci_config(struct scsi_qla_host *ha) 1120/**
1121 * qla4_8xxx_pci_config() - Setup ISP82xx PCI configuration registers.
1122 * @ha: HA context
1123 */
1124void qla4_8xxx_pci_config(struct scsi_qla_host *ha)
1125{
1126 pci_set_master(ha->pdev);
1127}
1128
1129void qla4xxx_pci_config(struct scsi_qla_host *ha)
1077{ 1130{
1078 uint16_t w; 1131 uint16_t w;
1079 int status; 1132 int status;
1080 1133
1081 dev_info(&ha->pdev->dev, "Configuring PCI space...\n"); 1134 ql4_printk(KERN_INFO, ha, "Configuring PCI space...\n");
1082 1135
1083 pci_set_master(ha->pdev); 1136 pci_set_master(ha->pdev);
1084 status = pci_set_mwi(ha->pdev); 1137 status = pci_set_mwi(ha->pdev);
@@ -1100,7 +1153,7 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
1100 unsigned long flags; 1153 unsigned long flags;
1101 uint32_t mbox_status; 1154 uint32_t mbox_status;
1102 1155
1103 dev_info(&ha->pdev->dev, "Starting firmware ...\n"); 1156 ql4_printk(KERN_INFO, ha, "Starting firmware ...\n");
1104 1157
1105 /* 1158 /*
1106 * Start firmware from flash ROM 1159 * Start firmware from flash ROM
@@ -1204,7 +1257,7 @@ int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a)
1204 * This routine performs the necessary steps to start the firmware for 1257 * This routine performs the necessary steps to start the firmware for
1205 * the QLA4010 adapter. 1258 * the QLA4010 adapter.
1206 **/ 1259 **/
1207static int qla4xxx_start_firmware(struct scsi_qla_host *ha) 1260int qla4xxx_start_firmware(struct scsi_qla_host *ha)
1208{ 1261{
1209 unsigned long flags = 0; 1262 unsigned long flags = 0;
1210 uint32_t mbox_status; 1263 uint32_t mbox_status;
@@ -1283,7 +1336,8 @@ static int qla4xxx_start_firmware(struct scsi_qla_host *ha)
1283 if (soft_reset) { 1336 if (soft_reset) {
1284 DEBUG(printk("scsi%ld: %s: Issue Soft Reset\n", ha->host_no, 1337 DEBUG(printk("scsi%ld: %s: Issue Soft Reset\n", ha->host_no,
1285 __func__)); 1338 __func__));
1286 status = qla4xxx_soft_reset(ha); 1339 status = qla4xxx_soft_reset(ha); /* NOTE: acquires drvr
1340 * lock again, but ok */
1287 if (status == QLA_ERROR) { 1341 if (status == QLA_ERROR) {
1288 DEBUG(printk("scsi%d: %s: Soft Reset failed!\n", 1342 DEBUG(printk("scsi%d: %s: Soft Reset failed!\n",
1289 ha->host_no, __func__)); 1343 ha->host_no, __func__));
@@ -1304,7 +1358,6 @@ static int qla4xxx_start_firmware(struct scsi_qla_host *ha)
1304 1358
1305 ql4xxx_unlock_drvr(ha); 1359 ql4xxx_unlock_drvr(ha);
1306 if (status == QLA_SUCCESS) { 1360 if (status == QLA_SUCCESS) {
1307 qla4xxx_get_fw_version(ha);
1308 if (test_and_clear_bit(AF_GET_CRASH_RECORD, &ha->flags)) 1361 if (test_and_clear_bit(AF_GET_CRASH_RECORD, &ha->flags))
1309 qla4xxx_get_crash_record(ha); 1362 qla4xxx_get_crash_record(ha);
1310 } else { 1363 } else {
@@ -1331,18 +1384,21 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
1331 int status = QLA_ERROR; 1384 int status = QLA_ERROR;
1332 int8_t ip_address[IP_ADDR_LEN] = {0} ; 1385 int8_t ip_address[IP_ADDR_LEN] = {0} ;
1333 1386
1334 clear_bit(AF_ONLINE, &ha->flags);
1335 ha->eeprom_cmd_data = 0; 1387 ha->eeprom_cmd_data = 0;
1336 1388
1337 qla4x00_pci_config(ha); 1389 ql4_printk(KERN_INFO, ha, "Configuring PCI space...\n");
1390 ha->isp_ops->pci_config(ha);
1338 1391
1339 qla4xxx_disable_intrs(ha); 1392 ha->isp_ops->disable_intrs(ha);
1340 1393
1341 /* Initialize the Host adapter request/response queues and firmware */ 1394 /* Initialize the Host adapter request/response queues and firmware */
1342 if (qla4xxx_start_firmware(ha) == QLA_ERROR) 1395 if (ha->isp_ops->start_firmware(ha) == QLA_ERROR)
1343 goto exit_init_hba; 1396 goto exit_init_hba;
1344 1397
1345 if (qla4xxx_validate_mac_address(ha) == QLA_ERROR) 1398 if (qla4xxx_get_fw_version(ha) == QLA_ERROR)
1399 goto exit_init_hba;
1400
1401 if (ha->isp_ops->get_sys_info(ha) == QLA_ERROR)
1346 goto exit_init_hba; 1402 goto exit_init_hba;
1347 1403
1348 if (qla4xxx_init_local_data(ha) == QLA_ERROR) 1404 if (qla4xxx_init_local_data(ha) == QLA_ERROR)
@@ -1395,6 +1451,8 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
1395exit_init_online: 1451exit_init_online:
1396 set_bit(AF_ONLINE, &ha->flags); 1452 set_bit(AF_ONLINE, &ha->flags);
1397exit_init_hba: 1453exit_init_hba:
1454 DEBUG2(printk("scsi%ld: initialize adapter: %s\n", ha->host_no,
1455 status == QLA_ERROR ? "FAILED" : "SUCCEDED"));
1398 return status; 1456 return status;
1399} 1457}
1400 1458
@@ -1487,7 +1545,10 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
1487 ddb_entry->fw_ddb_device_state, state, fw_ddb_index)); 1545 ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
1488 if (old_fw_ddb_device_state == state && 1546 if (old_fw_ddb_device_state == state &&
1489 state == DDB_DS_SESSION_ACTIVE) { 1547 state == DDB_DS_SESSION_ACTIVE) {
1490 /* Do nothing, state not changed. */ 1548 if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
1549 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
1550 iscsi_unblock_session(ddb_entry->sess);
1551 }
1491 return QLA_SUCCESS; 1552 return QLA_SUCCESS;
1492 } 1553 }
1493 1554
@@ -1511,7 +1572,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
1511 } else { 1572 } else {
1512 /* Device went away, mark device missing */ 1573 /* Device went away, mark device missing */
1513 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) { 1574 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) {
1514 DEBUG2(dev_info(&ha->pdev->dev, "%s mark missing " 1575 DEBUG2(ql4_printk(KERN_INFO, ha, "%s mark missing "
1515 "ddb_entry 0x%p sess 0x%p conn 0x%p\n", 1576 "ddb_entry 0x%p sess 0x%p conn 0x%p\n",
1516 __func__, ddb_entry, 1577 __func__, ddb_entry,
1517 ddb_entry->sess, ddb_entry->conn)); 1578 ddb_entry->sess, ddb_entry->conn));
@@ -1543,9 +1604,20 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
1543 atomic_set(&ddb_entry->relogin_timer, 0); 1604 atomic_set(&ddb_entry->relogin_timer, 0);
1544 atomic_set(&ddb_entry->retry_relogin_timer, 1605 atomic_set(&ddb_entry->retry_relogin_timer,
1545 ddb_entry->default_time2wait + 4); 1606 ddb_entry->default_time2wait + 4);
1607 DEBUG(printk("scsi%ld: %s: ddb[%d] "
1608 "initiate relogin after %d seconds\n",
1609 ha->host_no, __func__,
1610 ddb_entry->fw_ddb_index,
1611 ddb_entry->default_time2wait + 4));
1612 } else {
1613 DEBUG(printk("scsi%ld: %s: ddb[%d] "
1614 "relogin not initiated, state = %d, "
1615 "ddb_entry->flags = 0x%lx\n",
1616 ha->host_no, __func__,
1617 ddb_entry->fw_ddb_index,
1618 ddb_entry->fw_ddb_device_state,
1619 ddb_entry->flags));
1546 } 1620 }
1547 } 1621 }
1548
1549 return QLA_SUCCESS; 1622 return QLA_SUCCESS;
1550} 1623}
1551
diff --git a/drivers/scsi/qla4xxx/ql4_inline.h b/drivers/scsi/qla4xxx/ql4_inline.h
index 6375eb017dd3..9471ac755000 100644
--- a/drivers/scsi/qla4xxx/ql4_inline.h
+++ b/drivers/scsi/qla4xxx/ql4_inline.h
@@ -29,7 +29,7 @@ qla4xxx_lookup_ddb_by_fw_index(struct scsi_qla_host *ha, uint32_t fw_ddb_index)
29 ddb_entry = ha->fw_ddb_index_map[fw_ddb_index]; 29 ddb_entry = ha->fw_ddb_index_map[fw_ddb_index];
30 } 30 }
31 31
32 DEBUG3(printk("scsi%d: %s: index [%d], ddb_entry = %p\n", 32 DEBUG3(printk("scsi%d: %s: ddb [%d], ddb_entry = %p\n",
33 ha->host_no, __func__, fw_ddb_index, ddb_entry)); 33 ha->host_no, __func__, fw_ddb_index, ddb_entry));
34 34
35 return ddb_entry; 35 return ddb_entry;
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index e66f3f263f49..f89973deac5b 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -108,8 +108,7 @@ int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
108 wmb(); 108 wmb();
109 109
110 /* Tell ISP it's got a new I/O request */ 110 /* Tell ISP it's got a new I/O request */
111 writel(ha->request_in, &ha->reg->req_q_in); 111 ha->isp_ops->queue_iocb(ha);
112 readl(&ha->reg->req_q_in);
113 112
114exit_send_marker: 113exit_send_marker:
115 spin_unlock_irqrestore(&ha->hardware_lock, flags); 114 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -194,6 +193,72 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb,
194} 193}
195 194
196/** 195/**
196 * qla4_8xxx_queue_iocb - Tell ISP it's got new request(s)
197 * @ha: pointer to host adapter structure.
198 *
199 * This routine notifies the ISP that one or more new request
200 * queue entries have been placed on the request queue.
201 **/
202void qla4_8xxx_queue_iocb(struct scsi_qla_host *ha)
203{
204 uint32_t dbval = 0;
205 unsigned long wtime;
206
207 dbval = 0x14 | (ha->func_num << 5);
208 dbval = dbval | (0 << 8) | (ha->request_in << 16);
209 writel(dbval, (unsigned long __iomem *)ha->nx_db_wr_ptr);
210 wmb();
211
212 wtime = jiffies + (2 * HZ);
213 while (readl((void __iomem *)ha->nx_db_rd_ptr) != dbval &&
214 !time_after_eq(jiffies, wtime)) {
215 writel(dbval, (unsigned long __iomem *)ha->nx_db_wr_ptr);
216 wmb();
217 }
218}
219
220/**
221 * qla4_8xxx_complete_iocb - Tell ISP we're done with response(s)
222 * @ha: pointer to host adapter structure.
223 *
224 * This routine notifies the ISP that one or more response/completion
225 * queue entries have been processed by the driver.
226 * This also clears the interrupt.
227 **/
228void qla4_8xxx_complete_iocb(struct scsi_qla_host *ha)
229{
230 writel(ha->response_out, &ha->qla4_8xxx_reg->rsp_q_out);
231 readl(&ha->qla4_8xxx_reg->rsp_q_out);
232}
233
234/**
235 * qla4xxx_queue_iocb - Tell ISP it's got new request(s)
236 * @ha: pointer to host adapter structure.
237 *
238 * This routine is notifies the ISP that one or more new request
239 * queue entries have been placed on the request queue.
240 **/
241void qla4xxx_queue_iocb(struct scsi_qla_host *ha)
242{
243 writel(ha->request_in, &ha->reg->req_q_in);
244 readl(&ha->reg->req_q_in);
245}
246
247/**
248 * qla4xxx_complete_iocb - Tell ISP we're done with response(s)
249 * @ha: pointer to host adapter structure.
250 *
251 * This routine is notifies the ISP that one or more response/completion
252 * queue entries have been processed by the driver.
253 * This also clears the interrupt.
254 **/
255void qla4xxx_complete_iocb(struct scsi_qla_host *ha)
256{
257 writel(ha->response_out, &ha->reg->rsp_q_out);
258 readl(&ha->reg->rsp_q_out);
259}
260
261/**
197 * qla4xxx_send_command_to_isp - issues command to HBA 262 * qla4xxx_send_command_to_isp - issues command to HBA
198 * @ha: pointer to host adapter structure. 263 * @ha: pointer to host adapter structure.
199 * @srb: pointer to SCSI Request Block to be sent to ISP 264 * @srb: pointer to SCSI Request Block to be sent to ISP
@@ -310,9 +375,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
310 srb->iocb_cnt = req_cnt; 375 srb->iocb_cnt = req_cnt;
311 ha->req_q_count -= req_cnt; 376 ha->req_q_count -= req_cnt;
312 377
313 /* Debug print statements */ 378 ha->isp_ops->queue_iocb(ha);
314 writel(ha->request_in, &ha->reg->req_q_in);
315 readl(&ha->reg->req_q_in);
316 spin_unlock_irqrestore(&ha->hardware_lock, flags); 379 spin_unlock_irqrestore(&ha->hardware_lock, flags);
317 380
318 return QLA_SUCCESS; 381 return QLA_SUCCESS;
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 596c3031483c..aa65697a86b4 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -118,13 +118,12 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
118 118
119 srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle)); 119 srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
120 if (!srb) { 120 if (!srb) {
121 /* FIXMEdg: Don't we need to reset ISP in this case??? */
122 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid " 121 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid "
123 "handle 0x%x, sp=%p. This cmd may have already " 122 "handle 0x%x, sp=%p. This cmd may have already "
124 "been completed.\n", ha->host_no, __func__, 123 "been completed.\n", ha->host_no, __func__,
125 le32_to_cpu(sts_entry->handle), srb)); 124 le32_to_cpu(sts_entry->handle), srb));
126 dev_warn(&ha->pdev->dev, "%s invalid status entry:" 125 ql4_printk(KERN_WARNING, ha, "%s invalid status entry:"
127 " handle=0x%0x\n", __func__, sts_entry->handle); 126 " handle=0x%0x\n", __func__, sts_entry->handle);
128 set_bit(DPC_RESET_HA, &ha->dpc_flags); 127 set_bit(DPC_RESET_HA, &ha->dpc_flags);
129 return; 128 return;
130 } 129 }
@@ -135,8 +134,8 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
135 "OS pkt->handle=%d srb=%p srb->state:%d\n", 134 "OS pkt->handle=%d srb=%p srb->state:%d\n",
136 ha->host_no, __func__, sts_entry->handle, 135 ha->host_no, __func__, sts_entry->handle,
137 srb, srb->state)); 136 srb, srb->state));
138 dev_warn(&ha->pdev->dev, "Command is NULL:" 137 ql4_printk(KERN_WARNING, ha, "Command is NULL:"
139 " already returned to OS (srb=%p)\n", srb); 138 " already returned to OS (srb=%p)\n", srb);
140 return; 139 return;
141 } 140 }
142 141
@@ -293,6 +292,10 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
293 292
294 case SCS_DEVICE_LOGGED_OUT: 293 case SCS_DEVICE_LOGGED_OUT:
295 case SCS_DEVICE_UNAVAILABLE: 294 case SCS_DEVICE_UNAVAILABLE:
295 DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: SCS_DEVICE "
296 "state: 0x%x\n", ha->host_no,
297 cmd->device->channel, cmd->device->id,
298 cmd->device->lun, sts_entry->completionStatus));
296 /* 299 /*
297 * Mark device missing so that we won't continue to 300 * Mark device missing so that we won't continue to
298 * send I/O to this device. We should get a ddb 301 * send I/O to this device. We should get a ddb
@@ -339,16 +342,14 @@ status_entry_exit:
339 * This routine process response queue completions in interrupt context. 342 * This routine process response queue completions in interrupt context.
340 * Hardware_lock locked upon entry 343 * Hardware_lock locked upon entry
341 **/ 344 **/
342static void qla4xxx_process_response_queue(struct scsi_qla_host * ha) 345void qla4xxx_process_response_queue(struct scsi_qla_host *ha)
343{ 346{
344 uint32_t count = 0; 347 uint32_t count = 0;
345 struct srb *srb = NULL; 348 struct srb *srb = NULL;
346 struct status_entry *sts_entry; 349 struct status_entry *sts_entry;
347 350
348 /* Process all responses from response queue */ 351 /* Process all responses from response queue */
349 while ((ha->response_in = 352 while ((ha->response_ptr->signature != RESPONSE_PROCESSED)) {
350 (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in)) !=
351 ha->response_out) {
352 sts_entry = (struct status_entry *) ha->response_ptr; 353 sts_entry = (struct status_entry *) ha->response_ptr;
353 count++; 354 count++;
354 355
@@ -413,14 +414,14 @@ static void qla4xxx_process_response_queue(struct scsi_qla_host * ha)
413 sts_entry->hdr.entryType)); 414 sts_entry->hdr.entryType));
414 goto exit_prq_error; 415 goto exit_prq_error;
415 } 416 }
417 ((struct response *)sts_entry)->signature = RESPONSE_PROCESSED;
418 wmb();
416 } 419 }
417 420
418 /* 421 /*
419 * Done with responses, update the ISP For QLA4010, this also clears 422 * Tell ISP we're done with response(s). This also clears the interrupt.
420 * the interrupt.
421 */ 423 */
422 writel(ha->response_out, &ha->reg->rsp_q_out); 424 ha->isp_ops->complete_iocb(ha);
423 readl(&ha->reg->rsp_q_out);
424 425
425 return; 426 return;
426 427
@@ -430,9 +431,7 @@ exit_prq_invalid_handle:
430 sts_entry->completionStatus)); 431 sts_entry->completionStatus));
431 432
432exit_prq_error: 433exit_prq_error:
433 writel(ha->response_out, &ha->reg->rsp_q_out); 434 ha->isp_ops->complete_iocb(ha);
434 readl(&ha->reg->rsp_q_out);
435
436 set_bit(DPC_RESET_HA, &ha->dpc_flags); 435 set_bit(DPC_RESET_HA, &ha->dpc_flags);
437} 436}
438 437
@@ -448,7 +447,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
448 uint32_t mbox_status) 447 uint32_t mbox_status)
449{ 448{
450 int i; 449 int i;
451 uint32_t mbox_stat2, mbox_stat3; 450 uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
452 451
453 if ((mbox_status == MBOX_STS_BUSY) || 452 if ((mbox_status == MBOX_STS_BUSY) ||
454 (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) || 453 (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
@@ -460,27 +459,37 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
460 * Copy all mailbox registers to a temporary 459 * Copy all mailbox registers to a temporary
461 * location and set mailbox command done flag 460 * location and set mailbox command done flag
462 */ 461 */
463 for (i = 1; i < ha->mbox_status_count; i++) 462 for (i = 0; i < ha->mbox_status_count; i++)
464 ha->mbox_status[i] = 463 ha->mbox_status[i] = is_qla8022(ha)
465 readl(&ha->reg->mailbox[i]); 464 ? readl(&ha->qla4_8xxx_reg->mailbox_out[i])
465 : readl(&ha->reg->mailbox[i]);
466 466
467 set_bit(AF_MBOX_COMMAND_DONE, &ha->flags); 467 set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
468
469 if (test_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags))
470 complete(&ha->mbx_intr_comp);
468 } 471 }
469 } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) { 472 } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
473 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
474 mbox_sts[i] = is_qla8022(ha)
475 ? readl(&ha->qla4_8xxx_reg->mailbox_out[i])
476 : readl(&ha->reg->mailbox[i]);
477
470 /* Immediately process the AENs that don't require much work. 478 /* Immediately process the AENs that don't require much work.
471 * Only queue the database_changed AENs */ 479 * Only queue the database_changed AENs */
472 if (ha->aen_log.count < MAX_AEN_ENTRIES) { 480 if (ha->aen_log.count < MAX_AEN_ENTRIES) {
473 for (i = 0; i < MBOX_AEN_REG_COUNT; i++) 481 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
474 ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] = 482 ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] =
475 readl(&ha->reg->mailbox[i]); 483 mbox_sts[i];
476 ha->aen_log.count++; 484 ha->aen_log.count++;
477 } 485 }
478 switch (mbox_status) { 486 switch (mbox_status) {
479 case MBOX_ASTS_SYSTEM_ERROR: 487 case MBOX_ASTS_SYSTEM_ERROR:
480 /* Log Mailbox registers */ 488 /* Log Mailbox registers */
489 ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__);
481 if (ql4xdontresethba) { 490 if (ql4xdontresethba) {
482 DEBUG2(printk("%s:Dont Reset HBA\n", 491 DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n",
483 __func__)); 492 ha->host_no, __func__));
484 } else { 493 } else {
485 set_bit(AF_GET_CRASH_RECORD, &ha->flags); 494 set_bit(AF_GET_CRASH_RECORD, &ha->flags);
486 set_bit(DPC_RESET_HA, &ha->dpc_flags); 495 set_bit(DPC_RESET_HA, &ha->dpc_flags);
@@ -502,18 +511,15 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
502 if (test_bit(AF_INIT_DONE, &ha->flags)) 511 if (test_bit(AF_INIT_DONE, &ha->flags))
503 set_bit(DPC_LINK_CHANGED, &ha->dpc_flags); 512 set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
504 513
505 DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x Adapter" 514 ql4_printk(KERN_INFO, ha, "%s: LINK UP\n", __func__);
506 " LINK UP\n", ha->host_no,
507 mbox_status));
508 break; 515 break;
509 516
510 case MBOX_ASTS_LINK_DOWN: 517 case MBOX_ASTS_LINK_DOWN:
511 clear_bit(AF_LINK_UP, &ha->flags); 518 clear_bit(AF_LINK_UP, &ha->flags);
512 set_bit(DPC_LINK_CHANGED, &ha->dpc_flags); 519 if (test_bit(AF_INIT_DONE, &ha->flags))
520 set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
513 521
514 DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x Adapter" 522 ql4_printk(KERN_INFO, ha, "%s: LINK DOWN\n", __func__);
515 " LINK DOWN\n", ha->host_no,
516 mbox_status));
517 break; 523 break;
518 524
519 case MBOX_ASTS_HEARTBEAT: 525 case MBOX_ASTS_HEARTBEAT:
@@ -539,12 +545,17 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
539 break; 545 break;
540 546
541 case MBOX_ASTS_IP_ADDR_STATE_CHANGED: 547 case MBOX_ASTS_IP_ADDR_STATE_CHANGED:
542 mbox_stat2 = readl(&ha->reg->mailbox[2]); 548 printk("scsi%ld: AEN %04x, mbox_sts[2]=%04x, "
543 mbox_stat3 = readl(&ha->reg->mailbox[3]); 549 "mbox_sts[3]=%04x\n", ha->host_no, mbox_sts[0],
544 550 mbox_sts[2], mbox_sts[3]);
545 if ((mbox_stat3 == 5) && (mbox_stat2 == 3)) 551
552 /* mbox_sts[2] = Old ACB state
553 * mbox_sts[3] = new ACB state */
554 if ((mbox_sts[3] == ACB_STATE_VALID) &&
555 (mbox_sts[2] == ACB_STATE_TENTATIVE))
546 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags); 556 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
547 else if ((mbox_stat3 == 2) && (mbox_stat2 == 5)) 557 else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
558 (mbox_sts[2] == ACB_STATE_VALID))
548 set_bit(DPC_RESET_HA, &ha->dpc_flags); 559 set_bit(DPC_RESET_HA, &ha->dpc_flags);
549 break; 560 break;
550 561
@@ -553,9 +564,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
553 /* No action */ 564 /* No action */
554 DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, " 565 DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, "
555 "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n", 566 "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n",
556 ha->host_no, mbox_status, 567 ha->host_no, mbox_sts[0],
557 readl(&ha->reg->mailbox[1]), 568 mbox_sts[1], mbox_sts[2]));
558 readl(&ha->reg->mailbox[2])));
559 break; 569 break;
560 570
561 case MBOX_ASTS_SELF_TEST_FAILED: 571 case MBOX_ASTS_SELF_TEST_FAILED:
@@ -563,10 +573,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
563 /* No action */ 573 /* No action */
564 DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, " 574 DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, "
565 "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n", 575 "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n",
566 ha->host_no, mbox_status, 576 ha->host_no, mbox_sts[0], mbox_sts[1],
567 readl(&ha->reg->mailbox[1]), 577 mbox_sts[2], mbox_sts[3]));
568 readl(&ha->reg->mailbox[2]),
569 readl(&ha->reg->mailbox[3])));
570 break; 578 break;
571 579
572 case MBOX_ASTS_DATABASE_CHANGED: 580 case MBOX_ASTS_DATABASE_CHANGED:
@@ -577,21 +585,17 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
577 /* decrement available counter */ 585 /* decrement available counter */
578 ha->aen_q_count--; 586 ha->aen_q_count--;
579 587
580 for (i = 1; i < MBOX_AEN_REG_COUNT; i++) 588 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
581 ha->aen_q[ha->aen_in].mbox_sts[i] = 589 ha->aen_q[ha->aen_in].mbox_sts[i] =
582 readl(&ha->reg->mailbox[i]); 590 mbox_sts[i];
583
584 ha->aen_q[ha->aen_in].mbox_sts[0] = mbox_status;
585 591
586 /* print debug message */ 592 /* print debug message */
587 DEBUG2(printk("scsi%ld: AEN[%d] %04x queued" 593 DEBUG2(printk("scsi%ld: AEN[%d] %04x queued"
588 " mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n", 594 " mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n",
589 ha->host_no, ha->aen_in, 595 ha->host_no, ha->aen_in, mbox_sts[0],
590 mbox_status, 596 mbox_sts[1], mbox_sts[2], mbox_sts[3],
591 ha->aen_q[ha->aen_in].mbox_sts[1], 597 mbox_sts[4]));
592 ha->aen_q[ha->aen_in].mbox_sts[2], 598
593 ha->aen_q[ha->aen_in].mbox_sts[3],
594 ha->aen_q[ha->aen_in]. mbox_sts[4]));
595 /* advance pointer */ 599 /* advance pointer */
596 ha->aen_in++; 600 ha->aen_in++;
597 if (ha->aen_in == MAX_AEN_ENTRIES) 601 if (ha->aen_in == MAX_AEN_ENTRIES)
@@ -603,18 +607,16 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
603 DEBUG2(printk("scsi%ld: %s: aen %04x, queue " 607 DEBUG2(printk("scsi%ld: %s: aen %04x, queue "
604 "overflowed! AEN LOST!!\n", 608 "overflowed! AEN LOST!!\n",
605 ha->host_no, __func__, 609 ha->host_no, __func__,
606 mbox_status)); 610 mbox_sts[0]));
607 611
608 DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n", 612 DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n",
609 ha->host_no)); 613 ha->host_no));
610 614
611 for (i = 0; i < MAX_AEN_ENTRIES; i++) { 615 for (i = 0; i < MAX_AEN_ENTRIES; i++) {
612 DEBUG2(printk("AEN[%d] %04x %04x %04x " 616 DEBUG2(printk("AEN[%d] %04x %04x %04x "
613 "%04x\n", i, 617 "%04x\n", i, mbox_sts[0],
614 ha->aen_q[i].mbox_sts[0], 618 mbox_sts[1], mbox_sts[2],
615 ha->aen_q[i].mbox_sts[1], 619 mbox_sts[3]));
616 ha->aen_q[i].mbox_sts[2],
617 ha->aen_q[i].mbox_sts[3]));
618 } 620 }
619 } 621 }
620 break; 622 break;
@@ -622,7 +624,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
622 default: 624 default:
623 DEBUG2(printk(KERN_WARNING 625 DEBUG2(printk(KERN_WARNING
624 "scsi%ld: AEN %04x UNKNOWN\n", 626 "scsi%ld: AEN %04x UNKNOWN\n",
625 ha->host_no, mbox_status)); 627 ha->host_no, mbox_sts[0]));
626 break; 628 break;
627 } 629 }
628 } else { 630 } else {
@@ -634,6 +636,30 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
634} 636}
635 637
636/** 638/**
639 * qla4_8xxx_interrupt_service_routine - isr
640 * @ha: pointer to host adapter structure.
641 *
642 * This is the main interrupt service routine.
643 * hardware_lock locked upon entry. runs in interrupt context.
644 **/
645void qla4_8xxx_interrupt_service_routine(struct scsi_qla_host *ha,
646 uint32_t intr_status)
647{
648 /* Process response queue interrupt. */
649 if (intr_status & HSRX_RISC_IOCB_INT)
650 qla4xxx_process_response_queue(ha);
651
652 /* Process mailbox/asynch event interrupt.*/
653 if (intr_status & HSRX_RISC_MB_INT)
654 qla4xxx_isr_decode_mailbox(ha,
655 readl(&ha->qla4_8xxx_reg->mailbox_out[0]));
656
657 /* clear the interrupt */
658 writel(0, &ha->qla4_8xxx_reg->host_int);
659 readl(&ha->qla4_8xxx_reg->host_int);
660}
661
662/**
637 * qla4xxx_interrupt_service_routine - isr 663 * qla4xxx_interrupt_service_routine - isr
638 * @ha: pointer to host adapter structure. 664 * @ha: pointer to host adapter structure.
639 * 665 *
@@ -660,6 +686,28 @@ void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
660} 686}
661 687
662/** 688/**
689 * qla4_8xxx_spurious_interrupt - processes spurious interrupt
690 * @ha: pointer to host adapter structure.
691 * @reqs_count: .
692 *
693 **/
694static void qla4_8xxx_spurious_interrupt(struct scsi_qla_host *ha,
695 uint8_t reqs_count)
696{
697 if (reqs_count)
698 return;
699
700 DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n"));
701 if (is_qla8022(ha)) {
702 writel(0, &ha->qla4_8xxx_reg->host_int);
703 if (test_bit(AF_INTx_ENABLED, &ha->flags))
704 qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
705 0xfbff);
706 }
707 ha->spurious_int_count++;
708}
709
710/**
663 * qla4xxx_intr_handler - hardware interrupt handler. 711 * qla4xxx_intr_handler - hardware interrupt handler.
664 * @irq: Unused 712 * @irq: Unused
665 * @dev_id: Pointer to host adapter structure 713 * @dev_id: Pointer to host adapter structure
@@ -689,15 +737,14 @@ irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
689 /* 737 /*
690 * Read interrupt status 738 * Read interrupt status
691 */ 739 */
692 if (le32_to_cpu(ha->shadow_regs->rsp_q_in) != 740 if (ha->isp_ops->rd_shdw_rsp_q_in(ha) !=
693 ha->response_out) 741 ha->response_out)
694 intr_status = CSR_SCSI_COMPLETION_INTR; 742 intr_status = CSR_SCSI_COMPLETION_INTR;
695 else 743 else
696 intr_status = readl(&ha->reg->ctrl_status); 744 intr_status = readl(&ha->reg->ctrl_status);
697 745
698 if ((intr_status & 746 if ((intr_status &
699 (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) == 747 (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) == 0) {
700 0) {
701 if (reqs_count == 0) 748 if (reqs_count == 0)
702 ha->spurious_int_count++; 749 ha->spurious_int_count++;
703 break; 750 break;
@@ -739,22 +786,159 @@ irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
739 &ha->reg->ctrl_status); 786 &ha->reg->ctrl_status);
740 readl(&ha->reg->ctrl_status); 787 readl(&ha->reg->ctrl_status);
741 788
742 if (!ql4_mod_unload) 789 if (!test_bit(AF_HBA_GOING_AWAY, &ha->flags))
743 set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); 790 set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
744 791
745 break; 792 break;
746 } else if (intr_status & INTR_PENDING) { 793 } else if (intr_status & INTR_PENDING) {
747 qla4xxx_interrupt_service_routine(ha, intr_status); 794 ha->isp_ops->interrupt_service_routine(ha, intr_status);
748 ha->total_io_count++; 795 ha->total_io_count++;
749 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR) 796 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
750 break; 797 break;
798 }
799 }
800
801 spin_unlock_irqrestore(&ha->hardware_lock, flags);
802
803 return IRQ_HANDLED;
804}
805
806/**
807 * qla4_8xxx_intr_handler - hardware interrupt handler.
808 * @irq: Unused
809 * @dev_id: Pointer to host adapter structure
810 **/
811irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id)
812{
813 struct scsi_qla_host *ha = dev_id;
814 uint32_t intr_status;
815 uint32_t status;
816 unsigned long flags = 0;
817 uint8_t reqs_count = 0;
818
819 ha->isr_count++;
820 status = qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
821 if (!(status & ha->nx_legacy_intr.int_vec_bit))
822 return IRQ_NONE;
823
824 status = qla4_8xxx_rd_32(ha, ISR_INT_STATE_REG);
825 if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) {
826 DEBUG2(ql4_printk(KERN_INFO, ha,
827 "%s legacy Int not triggered\n", __func__));
828 return IRQ_NONE;
829 }
830
831 /* clear the interrupt */
832 qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
833
834 /* read twice to ensure write is flushed */
835 qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
836 qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
837
838 spin_lock_irqsave(&ha->hardware_lock, flags);
839 while (1) {
840 if (!(readl(&ha->qla4_8xxx_reg->host_int) &
841 ISRX_82XX_RISC_INT)) {
842 qla4_8xxx_spurious_interrupt(ha, reqs_count);
843 break;
844 }
845 intr_status = readl(&ha->qla4_8xxx_reg->host_status);
846 if ((intr_status &
847 (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
848 qla4_8xxx_spurious_interrupt(ha, reqs_count);
849 break;
850 }
851
852 ha->isp_ops->interrupt_service_routine(ha, intr_status);
853
854 /* Enable Interrupt */
855 qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
751 856
752 intr_status = 0; 857 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
858 break;
859 }
860
861 spin_unlock_irqrestore(&ha->hardware_lock, flags);
862 return IRQ_HANDLED;
863}
864
865irqreturn_t
866qla4_8xxx_msi_handler(int irq, void *dev_id)
867{
868 struct scsi_qla_host *ha;
869
870 ha = (struct scsi_qla_host *) dev_id;
871 if (!ha) {
872 DEBUG2(printk(KERN_INFO
873 "qla4xxx: MSIX: Interrupt with NULL host ptr\n"));
874 return IRQ_NONE;
875 }
876
877 ha->isr_count++;
878 /* clear the interrupt */
879 qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
880
881 /* read twice to ensure write is flushed */
882 qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
883 qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
884
885 return qla4_8xxx_default_intr_handler(irq, dev_id);
886}
887
888/**
889 * qla4_8xxx_default_intr_handler - hardware interrupt handler.
890 * @irq: Unused
891 * @dev_id: Pointer to host adapter structure
892 *
893 * This interrupt handler is called directly for MSI-X, and
894 * called indirectly for MSI.
895 **/
896irqreturn_t
897qla4_8xxx_default_intr_handler(int irq, void *dev_id)
898{
899 struct scsi_qla_host *ha = dev_id;
900 unsigned long flags;
901 uint32_t intr_status;
902 uint8_t reqs_count = 0;
903
904 spin_lock_irqsave(&ha->hardware_lock, flags);
905 while (1) {
906 if (!(readl(&ha->qla4_8xxx_reg->host_int) &
907 ISRX_82XX_RISC_INT)) {
908 qla4_8xxx_spurious_interrupt(ha, reqs_count);
909 break;
910 }
911
912 intr_status = readl(&ha->qla4_8xxx_reg->host_status);
913 if ((intr_status &
914 (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
915 qla4_8xxx_spurious_interrupt(ha, reqs_count);
916 break;
753 } 917 }
918
919 ha->isp_ops->interrupt_service_routine(ha, intr_status);
920
921 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
922 break;
754 } 923 }
755 924
925 ha->isr_count++;
756 spin_unlock_irqrestore(&ha->hardware_lock, flags); 926 spin_unlock_irqrestore(&ha->hardware_lock, flags);
927 return IRQ_HANDLED;
928}
757 929
930irqreturn_t
931qla4_8xxx_msix_rsp_q(int irq, void *dev_id)
932{
933 struct scsi_qla_host *ha = dev_id;
934 unsigned long flags;
935
936 spin_lock_irqsave(&ha->hardware_lock, flags);
937 qla4xxx_process_response_queue(ha);
938 writel(0, &ha->qla4_8xxx_reg->host_int);
939 spin_unlock_irqrestore(&ha->hardware_lock, flags);
940
941 ha->isr_count++;
758 return IRQ_HANDLED; 942 return IRQ_HANDLED;
759} 943}
760 944
@@ -825,7 +1009,7 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
825 ((ddb_entry->default_time2wait + 1009 ((ddb_entry->default_time2wait +
826 4) * HZ); 1010 4) * HZ);
827 1011
828 DEBUG2(printk("scsi%ld: ddb index [%d] initate" 1012 DEBUG2(printk("scsi%ld: ddb [%d] initate"
829 " RELOGIN after %d seconds\n", 1013 " RELOGIN after %d seconds\n",
830 ha->host_no, 1014 ha->host_no,
831 ddb_entry->fw_ddb_index, 1015 ddb_entry->fw_ddb_index,
@@ -847,3 +1031,81 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
847 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1031 spin_unlock_irqrestore(&ha->hardware_lock, flags);
848} 1032}
849 1033
1034int qla4xxx_request_irqs(struct scsi_qla_host *ha)
1035{
1036 int ret;
1037
1038 if (!is_qla8022(ha))
1039 goto try_intx;
1040
1041 if (ql4xenablemsix == 2)
1042 goto try_msi;
1043
1044 if (ql4xenablemsix == 0 || ql4xenablemsix != 1)
1045 goto try_intx;
1046
1047 /* Trying MSI-X */
1048 ret = qla4_8xxx_enable_msix(ha);
1049 if (!ret) {
1050 DEBUG2(ql4_printk(KERN_INFO, ha,
1051 "MSI-X: Enabled (0x%X).\n", ha->revision_id));
1052 goto irq_attached;
1053 }
1054
1055 ql4_printk(KERN_WARNING, ha,
1056 "MSI-X: Falling back-to MSI mode -- %d.\n", ret);
1057
1058try_msi:
1059 /* Trying MSI */
1060 ret = pci_enable_msi(ha->pdev);
1061 if (!ret) {
1062 ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler,
1063 IRQF_DISABLED|IRQF_SHARED, DRIVER_NAME, ha);
1064 if (!ret) {
1065 DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
1066 set_bit(AF_MSI_ENABLED, &ha->flags);
1067 goto irq_attached;
1068 } else {
1069 ql4_printk(KERN_WARNING, ha,
1070 "MSI: Failed to reserve interrupt %d "
1071 "already in use.\n", ha->pdev->irq);
1072 pci_disable_msi(ha->pdev);
1073 }
1074 }
1075 ql4_printk(KERN_WARNING, ha,
1076 "MSI: Falling back-to INTx mode -- %d.\n", ret);
1077
1078try_intx:
1079 /* Trying INTx */
1080 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1081 IRQF_DISABLED|IRQF_SHARED, DRIVER_NAME, ha);
1082 if (!ret) {
1083 DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n"));
1084 set_bit(AF_INTx_ENABLED, &ha->flags);
1085 goto irq_attached;
1086
1087 } else {
1088 ql4_printk(KERN_WARNING, ha,
1089 "INTx: Failed to reserve interrupt %d already in"
1090 " use.\n", ha->pdev->irq);
1091 return ret;
1092 }
1093
1094irq_attached:
1095 set_bit(AF_IRQ_ATTACHED, &ha->flags);
1096 ha->host->irq = ha->pdev->irq;
1097 ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n",
1098 __func__, ha->pdev->irq);
1099 return ret;
1100}
1101
1102void qla4xxx_free_irqs(struct scsi_qla_host *ha)
1103{
1104 if (test_bit(AF_MSIX_ENABLED, &ha->flags))
1105 qla4_8xxx_disable_msix(ha);
1106 else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) {
1107 free_irq(ha->pdev->irq, ha);
1108 pci_disable_msi(ha->pdev);
1109 } else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags))
1110 free_irq(ha->pdev->irq, ha);
1111}
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 75496fb0ae75..940ee561ee0a 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -19,13 +19,13 @@
19 * @mbx_cmd: data pointer for mailbox in registers. 19 * @mbx_cmd: data pointer for mailbox in registers.
20 * @mbx_sts: data pointer for mailbox out registers. 20 * @mbx_sts: data pointer for mailbox out registers.
21 * 21 *
22 * This routine sssue mailbox commands and waits for completion. 22 * This routine isssue mailbox commands and waits for completion.
23 * If outCount is 0, this routine completes successfully WITHOUT waiting 23 * If outCount is 0, this routine completes successfully WITHOUT waiting
24 * for the mailbox command to complete. 24 * for the mailbox command to complete.
25 **/ 25 **/
26static int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, 26int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
27 uint8_t outCount, uint32_t *mbx_cmd, 27 uint8_t outCount, uint32_t *mbx_cmd,
28 uint32_t *mbx_sts) 28 uint32_t *mbx_sts)
29{ 29{
30 int status = QLA_ERROR; 30 int status = QLA_ERROR;
31 uint8_t i; 31 uint8_t i;
@@ -59,32 +59,66 @@ static int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
59 } 59 }
60 60
61 /* To prevent overwriting mailbox registers for a command that has 61 /* To prevent overwriting mailbox registers for a command that has
62 * not yet been serviced, check to see if a previously issued 62 * not yet been serviced, check to see if an active command
63 * mailbox command is interrupting. 63 * (AEN, IOCB, etc.) is interrupting, then service it.
64 * ----------------------------------------------------------------- 64 * -----------------------------------------------------------------
65 */ 65 */
66 spin_lock_irqsave(&ha->hardware_lock, flags); 66 spin_lock_irqsave(&ha->hardware_lock, flags);
67 intr_status = readl(&ha->reg->ctrl_status); 67
68 if (intr_status & CSR_SCSI_PROCESSOR_INTR) { 68 if (is_qla8022(ha)) {
69 /* Service existing interrupt */ 69 intr_status = readl(&ha->qla4_8xxx_reg->host_int);
70 qla4xxx_interrupt_service_routine(ha, intr_status); 70 if (intr_status & ISRX_82XX_RISC_INT) {
71 clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags); 71 /* Service existing interrupt */
72 DEBUG2(printk("scsi%ld: %s: "
73 "servicing existing interrupt\n",
74 ha->host_no, __func__));
75 intr_status = readl(&ha->qla4_8xxx_reg->host_status);
76 ha->isp_ops->interrupt_service_routine(ha, intr_status);
77 clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
78 if (test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
79 test_bit(AF_INTx_ENABLED, &ha->flags))
80 qla4_8xxx_wr_32(ha,
81 ha->nx_legacy_intr.tgt_mask_reg,
82 0xfbff);
83 }
84 } else {
85 intr_status = readl(&ha->reg->ctrl_status);
86 if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
87 /* Service existing interrupt */
88 ha->isp_ops->interrupt_service_routine(ha, intr_status);
89 clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
90 }
72 } 91 }
73 92
74 /* Send the mailbox command to the firmware */
75 ha->mbox_status_count = outCount; 93 ha->mbox_status_count = outCount;
76 for (i = 0; i < outCount; i++) 94 for (i = 0; i < outCount; i++)
77 ha->mbox_status[i] = 0; 95 ha->mbox_status[i] = 0;
78 96
79 /* Load all mailbox registers, except mailbox 0. */ 97 if (is_qla8022(ha)) {
80 for (i = 1; i < inCount; i++) 98 /* Load all mailbox registers, except mailbox 0. */
81 writel(mbx_cmd[i], &ha->reg->mailbox[i]); 99 DEBUG5(
100 printk("scsi%ld: %s: Cmd ", ha->host_no, __func__);
101 for (i = 0; i < inCount; i++)
102 printk("mb%d=%04x ", i, mbx_cmd[i]);
103 printk("\n"));
104
105 for (i = 1; i < inCount; i++)
106 writel(mbx_cmd[i], &ha->qla4_8xxx_reg->mailbox_in[i]);
107 writel(mbx_cmd[0], &ha->qla4_8xxx_reg->mailbox_in[0]);
108 readl(&ha->qla4_8xxx_reg->mailbox_in[0]);
109 writel(HINT_MBX_INT_PENDING, &ha->qla4_8xxx_reg->hint);
110 } else {
111 /* Load all mailbox registers, except mailbox 0. */
112 for (i = 1; i < inCount; i++)
113 writel(mbx_cmd[i], &ha->reg->mailbox[i]);
114
115 /* Wakeup firmware */
116 writel(mbx_cmd[0], &ha->reg->mailbox[0]);
117 readl(&ha->reg->mailbox[0]);
118 writel(set_rmask(CSR_INTR_RISC), &ha->reg->ctrl_status);
119 readl(&ha->reg->ctrl_status);
120 }
82 121
83 /* Wakeup firmware */
84 writel(mbx_cmd[0], &ha->reg->mailbox[0]);
85 readl(&ha->reg->mailbox[0]);
86 writel(set_rmask(CSR_INTR_RISC), &ha->reg->ctrl_status);
87 readl(&ha->reg->ctrl_status);
88 spin_unlock_irqrestore(&ha->hardware_lock, flags); 122 spin_unlock_irqrestore(&ha->hardware_lock, flags);
89 123
90 /* Wait for completion */ 124 /* Wait for completion */
@@ -98,26 +132,66 @@ static int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
98 status = QLA_SUCCESS; 132 status = QLA_SUCCESS;
99 goto mbox_exit; 133 goto mbox_exit;
100 } 134 }
101 /* Wait for command to complete */
102 wait_count = jiffies + MBOX_TOV * HZ;
103 while (test_bit(AF_MBOX_COMMAND_DONE, &ha->flags) == 0) {
104 if (time_after_eq(jiffies, wait_count))
105 break;
106 135
107 spin_lock_irqsave(&ha->hardware_lock, flags); 136 /*
108 intr_status = readl(&ha->reg->ctrl_status); 137 * Wait for completion: Poll or completion queue
109 if (intr_status & INTR_PENDING) { 138 */
139 if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
140 test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
141 test_bit(AF_ONLINE, &ha->flags) &&
142 !test_bit(AF_HBA_GOING_AWAY, &ha->flags)) {
143 /* Do not poll for completion. Use completion queue */
144 set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
145 wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ);
146 clear_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
147 } else {
148 /* Poll for command to complete */
149 wait_count = jiffies + MBOX_TOV * HZ;
150 while (test_bit(AF_MBOX_COMMAND_DONE, &ha->flags) == 0) {
151 if (time_after_eq(jiffies, wait_count))
152 break;
110 /* 153 /*
111 * Service the interrupt. 154 * Service the interrupt.
112 * The ISR will save the mailbox status registers 155 * The ISR will save the mailbox status registers
113 * to a temporary storage location in the adapter 156 * to a temporary storage location in the adapter
114 * structure. 157 * structure.
115 */ 158 */
116 ha->mbox_status_count = outCount; 159
117 qla4xxx_interrupt_service_routine(ha, intr_status); 160 spin_lock_irqsave(&ha->hardware_lock, flags);
161 if (is_qla8022(ha)) {
162 intr_status =
163 readl(&ha->qla4_8xxx_reg->host_int);
164 if (intr_status & ISRX_82XX_RISC_INT) {
165 ha->mbox_status_count = outCount;
166 intr_status =
167 readl(&ha->qla4_8xxx_reg->host_status);
168 ha->isp_ops->interrupt_service_routine(
169 ha, intr_status);
170 if (test_bit(AF_INTERRUPTS_ON,
171 &ha->flags) &&
172 test_bit(AF_INTx_ENABLED,
173 &ha->flags))
174 qla4_8xxx_wr_32(ha,
175 ha->nx_legacy_intr.tgt_mask_reg,
176 0xfbff);
177 }
178 } else {
179 intr_status = readl(&ha->reg->ctrl_status);
180 if (intr_status & INTR_PENDING) {
181 /*
182 * Service the interrupt.
183 * The ISR will save the mailbox status
184 * registers to a temporary storage
185 * location in the adapter structure.
186 */
187 ha->mbox_status_count = outCount;
188 ha->isp_ops->interrupt_service_routine(
189 ha, intr_status);
190 }
191 }
192 spin_unlock_irqrestore(&ha->hardware_lock, flags);
193 msleep(10);
118 } 194 }
119 spin_unlock_irqrestore(&ha->hardware_lock, flags);
120 msleep(10);
121 } 195 }
122 196
123 /* Check for mailbox timeout. */ 197 /* Check for mailbox timeout. */
@@ -172,7 +246,7 @@ mbox_exit:
172 return status; 246 return status;
173} 247}
174 248
175uint8_t 249static uint8_t
176qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, 250qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
177 uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma) 251 uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
178{ 252{
@@ -196,7 +270,7 @@ qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
196 return QLA_SUCCESS; 270 return QLA_SUCCESS;
197} 271}
198 272
199uint8_t 273static uint8_t
200qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, 274qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
201 uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma) 275 uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
202{ 276{
@@ -218,7 +292,7 @@ qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
218 return QLA_SUCCESS; 292 return QLA_SUCCESS;
219} 293}
220 294
221void 295static void
222qla4xxx_update_local_ip(struct scsi_qla_host *ha, 296qla4xxx_update_local_ip(struct scsi_qla_host *ha,
223 struct addr_ctrl_blk *init_fw_cb) 297 struct addr_ctrl_blk *init_fw_cb)
224{ 298{
@@ -256,7 +330,7 @@ qla4xxx_update_local_ip(struct scsi_qla_host *ha,
256 } 330 }
257} 331}
258 332
259uint8_t 333static uint8_t
260qla4xxx_update_local_ifcb(struct scsi_qla_host *ha, 334qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
261 uint32_t *mbox_cmd, 335 uint32_t *mbox_cmd,
262 uint32_t *mbox_sts, 336 uint32_t *mbox_sts,
@@ -317,7 +391,7 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
317 if (init_fw_cb == NULL) { 391 if (init_fw_cb == NULL) {
318 DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n", 392 DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
319 ha->host_no, __func__)); 393 ha->host_no, __func__));
320 return 10; 394 goto exit_init_fw_cb_no_free;
321 } 395 }
322 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk)); 396 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
323 397
@@ -373,7 +447,7 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
373exit_init_fw_cb: 447exit_init_fw_cb:
374 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), 448 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
375 init_fw_cb, init_fw_cb_dma); 449 init_fw_cb, init_fw_cb_dma);
376 450exit_init_fw_cb_no_free:
377 return status; 451 return status;
378} 452}
379 453
@@ -394,7 +468,7 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
394 if (init_fw_cb == NULL) { 468 if (init_fw_cb == NULL) {
395 printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no, 469 printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
396 __func__); 470 __func__);
397 return 10; 471 return QLA_ERROR;
398 } 472 }
399 473
400 /* Get Initialize Firmware Control Block. */ 474 /* Get Initialize Firmware Control Block. */
@@ -445,7 +519,7 @@ int qla4xxx_get_firmware_state(struct scsi_qla_host * ha)
445 DEBUG2(printk("scsi%ld: %s firmware_state=0x%x\n", 519 DEBUG2(printk("scsi%ld: %s firmware_state=0x%x\n",
446 ha->host_no, __func__, ha->firmware_state);) 520 ha->host_no, __func__, ha->firmware_state);)
447 521
448 return QLA_SUCCESS; 522 return QLA_SUCCESS;
449} 523}
450 524
451/** 525/**
@@ -470,6 +544,10 @@ int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
470 mbox_sts[0])); 544 mbox_sts[0]));
471 return QLA_ERROR; 545 return QLA_ERROR;
472 } 546 }
547
548 ql4_printk(KERN_INFO, ha, "%ld firmare IOCBs available (%d).\n",
549 ha->host_no, mbox_cmd[2]);
550
473 return QLA_SUCCESS; 551 return QLA_SUCCESS;
474} 552}
475 553
@@ -500,7 +578,7 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
500 578
501 /* Make sure the device index is valid */ 579 /* Make sure the device index is valid */
502 if (fw_ddb_index >= MAX_DDB_ENTRIES) { 580 if (fw_ddb_index >= MAX_DDB_ENTRIES) {
503 DEBUG2(printk("scsi%ld: %s: index [%d] out of range.\n", 581 DEBUG2(printk("scsi%ld: %s: ddb [%d] out of range.\n",
504 ha->host_no, __func__, fw_ddb_index)); 582 ha->host_no, __func__, fw_ddb_index));
505 goto exit_get_fwddb; 583 goto exit_get_fwddb;
506 } 584 }
@@ -521,7 +599,7 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
521 goto exit_get_fwddb; 599 goto exit_get_fwddb;
522 } 600 }
523 if (fw_ddb_index != mbox_sts[1]) { 601 if (fw_ddb_index != mbox_sts[1]) {
524 DEBUG2(printk("scsi%ld: %s: index mismatch [%d] != [%d].\n", 602 DEBUG2(printk("scsi%ld: %s: ddb mismatch [%d] != [%d].\n",
525 ha->host_no, __func__, fw_ddb_index, 603 ha->host_no, __func__, fw_ddb_index,
526 mbox_sts[1])); 604 mbox_sts[1]));
527 goto exit_get_fwddb; 605 goto exit_get_fwddb;
@@ -529,7 +607,7 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
529 if (fw_ddb_entry) { 607 if (fw_ddb_entry) {
530 options = le16_to_cpu(fw_ddb_entry->options); 608 options = le16_to_cpu(fw_ddb_entry->options);
531 if (options & DDB_OPT_IPV6_DEVICE) { 609 if (options & DDB_OPT_IPV6_DEVICE) {
532 dev_info(&ha->pdev->dev, "%s: DDB[%d] MB0 %04x Tot %d " 610 ql4_printk(KERN_INFO, ha, "%s: DDB[%d] MB0 %04x Tot %d "
533 "Next %d State %04x ConnErr %08x %pI6 " 611 "Next %d State %04x ConnErr %08x %pI6 "
534 ":%04d \"%s\"\n", __func__, fw_ddb_index, 612 ":%04d \"%s\"\n", __func__, fw_ddb_index,
535 mbox_sts[0], mbox_sts[2], mbox_sts[3], 613 mbox_sts[0], mbox_sts[2], mbox_sts[3],
@@ -538,7 +616,7 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
538 le16_to_cpu(fw_ddb_entry->port), 616 le16_to_cpu(fw_ddb_entry->port),
539 fw_ddb_entry->iscsi_name); 617 fw_ddb_entry->iscsi_name);
540 } else { 618 } else {
541 dev_info(&ha->pdev->dev, "%s: DDB[%d] MB0 %04x Tot %d " 619 ql4_printk(KERN_INFO, ha, "%s: DDB[%d] MB0 %04x Tot %d "
542 "Next %d State %04x ConnErr %08x %pI4 " 620 "Next %d State %04x ConnErr %08x %pI4 "
543 ":%04d \"%s\"\n", __func__, fw_ddb_index, 621 ":%04d \"%s\"\n", __func__, fw_ddb_index,
544 mbox_sts[0], mbox_sts[2], mbox_sts[3], 622 mbox_sts[0], mbox_sts[2], mbox_sts[3],
@@ -590,6 +668,7 @@ int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
590{ 668{
591 uint32_t mbox_cmd[MBOX_REG_COUNT]; 669 uint32_t mbox_cmd[MBOX_REG_COUNT];
592 uint32_t mbox_sts[MBOX_REG_COUNT]; 670 uint32_t mbox_sts[MBOX_REG_COUNT];
671 int status;
593 672
594 /* Do not wait for completion. The firmware will send us an 673 /* Do not wait for completion. The firmware will send us an
595 * ASTS_DATABASE_CHANGED (0x8014) to notify us of the login status. 674 * ASTS_DATABASE_CHANGED (0x8014) to notify us of the login status.
@@ -603,7 +682,12 @@ int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
603 mbox_cmd[3] = MSDW(fw_ddb_entry_dma); 682 mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
604 mbox_cmd[4] = sizeof(struct dev_db_entry); 683 mbox_cmd[4] = sizeof(struct dev_db_entry);
605 684
606 return qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]); 685 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0],
686 &mbox_sts[0]);
687 DEBUG2(printk("scsi%ld: %s: status=%d mbx0=0x%x mbx4=0x%x\n",
688 ha->host_no, __func__, status, mbox_sts[0], mbox_sts[4]);)
689
690 return status;
607} 691}
608 692
609/** 693/**
@@ -817,8 +901,8 @@ int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb)
817/** 901/**
818 * qla4xxx_reset_lun - issues LUN Reset 902 * qla4xxx_reset_lun - issues LUN Reset
819 * @ha: Pointer to host adapter structure. 903 * @ha: Pointer to host adapter structure.
820 * @db_entry: Pointer to device database entry 904 * @ddb_entry: Pointer to device database entry
821 * @un_entry: Pointer to lun entry structure 905 * @lun: lun number
822 * 906 *
823 * This routine performs a LUN RESET on the specified target/lun. 907 * This routine performs a LUN RESET on the specified target/lun.
824 * The caller must ensure that the ddb_entry and lun_entry pointers 908 * The caller must ensure that the ddb_entry and lun_entry pointers
@@ -832,7 +916,7 @@ int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
832 int status = QLA_SUCCESS; 916 int status = QLA_SUCCESS;
833 917
834 DEBUG2(printk("scsi%ld:%d:%d: lun reset issued\n", ha->host_no, 918 DEBUG2(printk("scsi%ld:%d:%d: lun reset issued\n", ha->host_no,
835 ddb_entry->os_target_id, lun)); 919 ddb_entry->fw_ddb_index, lun));
836 920
837 /* 921 /*
838 * Send lun reset command to ISP, so that the ISP will return all 922 * Send lun reset command to ISP, so that the ISP will return all
@@ -872,7 +956,7 @@ int qla4xxx_reset_target(struct scsi_qla_host *ha,
872 int status = QLA_SUCCESS; 956 int status = QLA_SUCCESS;
873 957
874 DEBUG2(printk("scsi%ld:%d: target reset issued\n", ha->host_no, 958 DEBUG2(printk("scsi%ld:%d: target reset issued\n", ha->host_no,
875 ddb_entry->os_target_id)); 959 ddb_entry->fw_ddb_index));
876 960
877 /* 961 /*
878 * Send target reset command to ISP, so that the ISP will return all 962 * Send target reset command to ISP, so that the ISP will return all
@@ -1019,16 +1103,16 @@ int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port)
1019 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", 1103 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
1020 ha->host_no, __func__)); 1104 ha->host_no, __func__));
1021 ret_val = QLA_ERROR; 1105 ret_val = QLA_ERROR;
1022 goto qla4xxx_send_tgts_exit; 1106 goto exit_send_tgts_no_free;
1023 } 1107 }
1024 1108
1025 ret_val = qla4xxx_get_default_ddb(ha, fw_ddb_entry_dma); 1109 ret_val = qla4xxx_get_default_ddb(ha, fw_ddb_entry_dma);
1026 if (ret_val != QLA_SUCCESS) 1110 if (ret_val != QLA_SUCCESS)
1027 goto qla4xxx_send_tgts_exit; 1111 goto exit_send_tgts;
1028 1112
1029 ret_val = qla4xxx_req_ddb_entry(ha, &ddb_index); 1113 ret_val = qla4xxx_req_ddb_entry(ha, &ddb_index);
1030 if (ret_val != QLA_SUCCESS) 1114 if (ret_val != QLA_SUCCESS)
1031 goto qla4xxx_send_tgts_exit; 1115 goto exit_send_tgts;
1032 1116
1033 memset(fw_ddb_entry->iscsi_alias, 0, 1117 memset(fw_ddb_entry->iscsi_alias, 0,
1034 sizeof(fw_ddb_entry->iscsi_alias)); 1118 sizeof(fw_ddb_entry->iscsi_alias));
@@ -1050,9 +1134,10 @@ int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port)
1050 1134
1051 ret_val = qla4xxx_set_ddb_entry(ha, ddb_index, fw_ddb_entry_dma); 1135 ret_val = qla4xxx_set_ddb_entry(ha, ddb_index, fw_ddb_entry_dma);
1052 1136
1053qla4xxx_send_tgts_exit: 1137exit_send_tgts:
1054 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 1138 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1055 fw_ddb_entry, fw_ddb_entry_dma); 1139 fw_ddb_entry, fw_ddb_entry_dma);
1140exit_send_tgts_no_free:
1056 return ret_val; 1141 return ret_val;
1057} 1142}
1058 1143
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c
index 7fe0482ecf03..f0d0fbf88aa2 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.c
+++ b/drivers/scsi/qla4xxx/ql4_nvram.c
@@ -149,7 +149,7 @@ static int eeprom_readword(int eepromAddr, u16 * value,
149/* Hardware_lock must be set before calling */ 149/* Hardware_lock must be set before calling */
150u16 rd_nvram_word(struct scsi_qla_host * ha, int offset) 150u16 rd_nvram_word(struct scsi_qla_host * ha, int offset)
151{ 151{
152 u16 val; 152 u16 val = 0;
153 153
154 /* NOTE: NVRAM uses half-word addresses */ 154 /* NOTE: NVRAM uses half-word addresses */
155 eeprom_readword(offset, &val, ha); 155 eeprom_readword(offset, &val, ha);
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.h b/drivers/scsi/qla4xxx/ql4_nvram.h
index b47b4fc59d83..7a8fc66a760d 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.h
+++ b/drivers/scsi/qla4xxx/ql4_nvram.h
@@ -8,9 +8,9 @@
8#ifndef _QL4XNVRM_H_ 8#ifndef _QL4XNVRM_H_
9#define _QL4XNVRM_H_ 9#define _QL4XNVRM_H_
10 10
11/* 11/**
12 * AM29LV Flash definitions 12 * AM29LV Flash definitions
13 */ 13 **/
14#define FM93C56A_SIZE_8 0x100 14#define FM93C56A_SIZE_8 0x100
15#define FM93C56A_SIZE_16 0x80 15#define FM93C56A_SIZE_16 0x80
16#define FM93C66A_SIZE_8 0x200 16#define FM93C66A_SIZE_8 0x200
@@ -19,7 +19,7 @@
19 19
20#define FM93C56A_START 0x1 20#define FM93C56A_START 0x1
21 21
22// Commands 22/* Commands */
23#define FM93C56A_READ 0x2 23#define FM93C56A_READ 0x2
24#define FM93C56A_WEN 0x0 24#define FM93C56A_WEN 0x0
25#define FM93C56A_WRITE 0x1 25#define FM93C56A_WRITE 0x1
@@ -62,9 +62,9 @@
62#define AUBURN_EEPROM_CLK_RISE 0x1 62#define AUBURN_EEPROM_CLK_RISE 0x1
63#define AUBURN_EEPROM_CLK_FALL 0x0 63#define AUBURN_EEPROM_CLK_FALL 0x0
64 64
65/* */ 65/**/
66/* EEPROM format */ 66/* EEPROM format */
67/* */ 67/**/
68struct bios_params { 68struct bios_params {
69 uint16_t SpinUpDelay:1; 69 uint16_t SpinUpDelay:1;
70 uint16_t BIOSDisable:1; 70 uint16_t BIOSDisable:1;
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
new file mode 100644
index 000000000000..3e119ae78397
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -0,0 +1,2321 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2009 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7#include <linux/delay.h>
8#include <linux/pci.h>
9#include "ql4_def.h"
10#include "ql4_glbl.h"
11
12#define MASK(n) DMA_BIT_MASK(n)
13#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff))
14#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff))
15#define MS_WIN(addr) (addr & 0x0ffc0000)
16#define QLA82XX_PCI_MN_2M (0)
17#define QLA82XX_PCI_MS_2M (0x80000)
18#define QLA82XX_PCI_OCM0_2M (0xc0000)
19#define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800)
20#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
21
22/* CRB window related */
23#define CRB_BLK(off) ((off >> 20) & 0x3f)
24#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
25#define CRB_WINDOW_2M (0x130060)
26#define CRB_HI(off) ((qla4_8xxx_crb_hub_agt[CRB_BLK(off)] << 20) | \
27 ((off) & 0xf0000))
28#define QLA82XX_PCI_CAMQM_2M_END (0x04800800UL)
29#define QLA82XX_PCI_CAMQM_2M_BASE (0x000ff800UL)
30#define CRB_INDIRECT_2M (0x1e0000UL)
31
32static inline void __iomem *
33qla4_8xxx_pci_base_offsetfset(struct scsi_qla_host *ha, unsigned long off)
34{
35 if ((off < ha->first_page_group_end) &&
36 (off >= ha->first_page_group_start))
37 return (void __iomem *)(ha->nx_pcibase + off);
38
39 return NULL;
40}
41
42#define MAX_CRB_XFORM 60
43static unsigned long crb_addr_xform[MAX_CRB_XFORM];
44static int qla4_8xxx_crb_table_initialized;
45
46#define qla4_8xxx_crb_addr_transform(name) \
47 (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
48 QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
49static void
50qla4_8xxx_crb_addr_transform_setup(void)
51{
52 qla4_8xxx_crb_addr_transform(XDMA);
53 qla4_8xxx_crb_addr_transform(TIMR);
54 qla4_8xxx_crb_addr_transform(SRE);
55 qla4_8xxx_crb_addr_transform(SQN3);
56 qla4_8xxx_crb_addr_transform(SQN2);
57 qla4_8xxx_crb_addr_transform(SQN1);
58 qla4_8xxx_crb_addr_transform(SQN0);
59 qla4_8xxx_crb_addr_transform(SQS3);
60 qla4_8xxx_crb_addr_transform(SQS2);
61 qla4_8xxx_crb_addr_transform(SQS1);
62 qla4_8xxx_crb_addr_transform(SQS0);
63 qla4_8xxx_crb_addr_transform(RPMX7);
64 qla4_8xxx_crb_addr_transform(RPMX6);
65 qla4_8xxx_crb_addr_transform(RPMX5);
66 qla4_8xxx_crb_addr_transform(RPMX4);
67 qla4_8xxx_crb_addr_transform(RPMX3);
68 qla4_8xxx_crb_addr_transform(RPMX2);
69 qla4_8xxx_crb_addr_transform(RPMX1);
70 qla4_8xxx_crb_addr_transform(RPMX0);
71 qla4_8xxx_crb_addr_transform(ROMUSB);
72 qla4_8xxx_crb_addr_transform(SN);
73 qla4_8xxx_crb_addr_transform(QMN);
74 qla4_8xxx_crb_addr_transform(QMS);
75 qla4_8xxx_crb_addr_transform(PGNI);
76 qla4_8xxx_crb_addr_transform(PGND);
77 qla4_8xxx_crb_addr_transform(PGN3);
78 qla4_8xxx_crb_addr_transform(PGN2);
79 qla4_8xxx_crb_addr_transform(PGN1);
80 qla4_8xxx_crb_addr_transform(PGN0);
81 qla4_8xxx_crb_addr_transform(PGSI);
82 qla4_8xxx_crb_addr_transform(PGSD);
83 qla4_8xxx_crb_addr_transform(PGS3);
84 qla4_8xxx_crb_addr_transform(PGS2);
85 qla4_8xxx_crb_addr_transform(PGS1);
86 qla4_8xxx_crb_addr_transform(PGS0);
87 qla4_8xxx_crb_addr_transform(PS);
88 qla4_8xxx_crb_addr_transform(PH);
89 qla4_8xxx_crb_addr_transform(NIU);
90 qla4_8xxx_crb_addr_transform(I2Q);
91 qla4_8xxx_crb_addr_transform(EG);
92 qla4_8xxx_crb_addr_transform(MN);
93 qla4_8xxx_crb_addr_transform(MS);
94 qla4_8xxx_crb_addr_transform(CAS2);
95 qla4_8xxx_crb_addr_transform(CAS1);
96 qla4_8xxx_crb_addr_transform(CAS0);
97 qla4_8xxx_crb_addr_transform(CAM);
98 qla4_8xxx_crb_addr_transform(C2C1);
99 qla4_8xxx_crb_addr_transform(C2C0);
100 qla4_8xxx_crb_addr_transform(SMB);
101 qla4_8xxx_crb_addr_transform(OCM0);
102 qla4_8xxx_crb_addr_transform(I2C0);
103
104 qla4_8xxx_crb_table_initialized = 1;
105}
106
107static struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
108 {{{0, 0, 0, 0} } }, /* 0: PCI */
109 {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
110 {1, 0x0110000, 0x0120000, 0x130000},
111 {1, 0x0120000, 0x0122000, 0x124000},
112 {1, 0x0130000, 0x0132000, 0x126000},
113 {1, 0x0140000, 0x0142000, 0x128000},
114 {1, 0x0150000, 0x0152000, 0x12a000},
115 {1, 0x0160000, 0x0170000, 0x110000},
116 {1, 0x0170000, 0x0172000, 0x12e000},
117 {0, 0x0000000, 0x0000000, 0x000000},
118 {0, 0x0000000, 0x0000000, 0x000000},
119 {0, 0x0000000, 0x0000000, 0x000000},
120 {0, 0x0000000, 0x0000000, 0x000000},
121 {0, 0x0000000, 0x0000000, 0x000000},
122 {0, 0x0000000, 0x0000000, 0x000000},
123 {1, 0x01e0000, 0x01e0800, 0x122000},
124 {0, 0x0000000, 0x0000000, 0x000000} } },
125 {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
126 {{{0, 0, 0, 0} } }, /* 3: */
127 {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
128 {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
129 {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
130 {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
131 {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
132 {0, 0x0000000, 0x0000000, 0x000000},
133 {0, 0x0000000, 0x0000000, 0x000000},
134 {0, 0x0000000, 0x0000000, 0x000000},
135 {0, 0x0000000, 0x0000000, 0x000000},
136 {0, 0x0000000, 0x0000000, 0x000000},
137 {0, 0x0000000, 0x0000000, 0x000000},
138 {0, 0x0000000, 0x0000000, 0x000000},
139 {0, 0x0000000, 0x0000000, 0x000000},
140 {0, 0x0000000, 0x0000000, 0x000000},
141 {0, 0x0000000, 0x0000000, 0x000000},
142 {0, 0x0000000, 0x0000000, 0x000000},
143 {0, 0x0000000, 0x0000000, 0x000000},
144 {0, 0x0000000, 0x0000000, 0x000000},
145 {0, 0x0000000, 0x0000000, 0x000000},
146 {1, 0x08f0000, 0x08f2000, 0x172000} } },
147 {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
148 {0, 0x0000000, 0x0000000, 0x000000},
149 {0, 0x0000000, 0x0000000, 0x000000},
150 {0, 0x0000000, 0x0000000, 0x000000},
151 {0, 0x0000000, 0x0000000, 0x000000},
152 {0, 0x0000000, 0x0000000, 0x000000},
153 {0, 0x0000000, 0x0000000, 0x000000},
154 {0, 0x0000000, 0x0000000, 0x000000},
155 {0, 0x0000000, 0x0000000, 0x000000},
156 {0, 0x0000000, 0x0000000, 0x000000},
157 {0, 0x0000000, 0x0000000, 0x000000},
158 {0, 0x0000000, 0x0000000, 0x000000},
159 {0, 0x0000000, 0x0000000, 0x000000},
160 {0, 0x0000000, 0x0000000, 0x000000},
161 {0, 0x0000000, 0x0000000, 0x000000},
162 {1, 0x09f0000, 0x09f2000, 0x176000} } },
163 {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
164 {0, 0x0000000, 0x0000000, 0x000000},
165 {0, 0x0000000, 0x0000000, 0x000000},
166 {0, 0x0000000, 0x0000000, 0x000000},
167 {0, 0x0000000, 0x0000000, 0x000000},
168 {0, 0x0000000, 0x0000000, 0x000000},
169 {0, 0x0000000, 0x0000000, 0x000000},
170 {0, 0x0000000, 0x0000000, 0x000000},
171 {0, 0x0000000, 0x0000000, 0x000000},
172 {0, 0x0000000, 0x0000000, 0x000000},
173 {0, 0x0000000, 0x0000000, 0x000000},
174 {0, 0x0000000, 0x0000000, 0x000000},
175 {0, 0x0000000, 0x0000000, 0x000000},
176 {0, 0x0000000, 0x0000000, 0x000000},
177 {0, 0x0000000, 0x0000000, 0x000000},
178 {1, 0x0af0000, 0x0af2000, 0x17a000} } },
179 {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
180 {0, 0x0000000, 0x0000000, 0x000000},
181 {0, 0x0000000, 0x0000000, 0x000000},
182 {0, 0x0000000, 0x0000000, 0x000000},
183 {0, 0x0000000, 0x0000000, 0x000000},
184 {0, 0x0000000, 0x0000000, 0x000000},
185 {0, 0x0000000, 0x0000000, 0x000000},
186 {0, 0x0000000, 0x0000000, 0x000000},
187 {0, 0x0000000, 0x0000000, 0x000000},
188 {0, 0x0000000, 0x0000000, 0x000000},
189 {0, 0x0000000, 0x0000000, 0x000000},
190 {0, 0x0000000, 0x0000000, 0x000000},
191 {0, 0x0000000, 0x0000000, 0x000000},
192 {0, 0x0000000, 0x0000000, 0x000000},
193 {0, 0x0000000, 0x0000000, 0x000000},
194 {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
195 {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
196 {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
197 {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
198 {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
199 {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
200 {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
201 {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
202 {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
203 {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
204 {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
205 {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
206 {{{0, 0, 0, 0} } }, /* 23: */
207 {{{0, 0, 0, 0} } }, /* 24: */
208 {{{0, 0, 0, 0} } }, /* 25: */
209 {{{0, 0, 0, 0} } }, /* 26: */
210 {{{0, 0, 0, 0} } }, /* 27: */
211 {{{0, 0, 0, 0} } }, /* 28: */
212 {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
213 {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
214 {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
215 {{{0} } }, /* 32: PCI */
216 {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
217 {1, 0x2110000, 0x2120000, 0x130000},
218 {1, 0x2120000, 0x2122000, 0x124000},
219 {1, 0x2130000, 0x2132000, 0x126000},
220 {1, 0x2140000, 0x2142000, 0x128000},
221 {1, 0x2150000, 0x2152000, 0x12a000},
222 {1, 0x2160000, 0x2170000, 0x110000},
223 {1, 0x2170000, 0x2172000, 0x12e000},
224 {0, 0x0000000, 0x0000000, 0x000000},
225 {0, 0x0000000, 0x0000000, 0x000000},
226 {0, 0x0000000, 0x0000000, 0x000000},
227 {0, 0x0000000, 0x0000000, 0x000000},
228 {0, 0x0000000, 0x0000000, 0x000000},
229 {0, 0x0000000, 0x0000000, 0x000000},
230 {0, 0x0000000, 0x0000000, 0x000000},
231 {0, 0x0000000, 0x0000000, 0x000000} } },
232 {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
233 {{{0} } }, /* 35: */
234 {{{0} } }, /* 36: */
235 {{{0} } }, /* 37: */
236 {{{0} } }, /* 38: */
237 {{{0} } }, /* 39: */
238 {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
239 {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
240 {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
241 {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
242 {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
243 {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
244 {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
245 {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
246 {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
247 {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
248 {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
249 {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
250 {{{0} } }, /* 52: */
251 {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
252 {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
253 {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
254 {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
255 {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
256 {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
257 {{{0} } }, /* 59: I2C0 */
258 {{{0} } }, /* 60: I2C1 */
259 {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } },/* 61: LPC */
260 {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
261 {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
262};
263
264/*
265 * top 12 bits of crb internal address (hub, agent)
266 */
267static unsigned qla4_8xxx_crb_hub_agt[64] = {
268 0,
269 QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
270 QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
271 QLA82XX_HW_CRB_HUB_AGT_ADR_MS,
272 0,
273 QLA82XX_HW_CRB_HUB_AGT_ADR_SRE,
274 QLA82XX_HW_CRB_HUB_AGT_ADR_NIU,
275 QLA82XX_HW_CRB_HUB_AGT_ADR_QMN,
276 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0,
277 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1,
278 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2,
279 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3,
280 QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
281 QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
282 QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
283 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4,
284 QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
285 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0,
286 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1,
287 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2,
288 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3,
289 QLA82XX_HW_CRB_HUB_AGT_ADR_PGND,
290 QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI,
291 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0,
292 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1,
293 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2,
294 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3,
295 0,
296 QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI,
297 QLA82XX_HW_CRB_HUB_AGT_ADR_SN,
298 0,
299 QLA82XX_HW_CRB_HUB_AGT_ADR_EG,
300 0,
301 QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
302 QLA82XX_HW_CRB_HUB_AGT_ADR_CAM,
303 0,
304 0,
305 0,
306 0,
307 0,
308 QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
309 0,
310 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1,
311 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2,
312 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3,
313 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4,
314 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5,
315 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6,
316 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7,
317 QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
318 QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
319 QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
320 0,
321 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0,
322 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8,
323 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9,
324 QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0,
325 0,
326 QLA82XX_HW_CRB_HUB_AGT_ADR_SMB,
327 QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0,
328 QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1,
329 0,
330 QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC,
331 0,
332};
333
334/* Device states */
335static char *qdev_state[] = {
336 "Unknown",
337 "Cold",
338 "Initializing",
339 "Ready",
340 "Need Reset",
341 "Need Quiescent",
342 "Failed",
343 "Quiescent",
344};
345
346/*
347 * In: 'off' is offset from CRB space in 128M pci map
348 * Out: 'off' is 2M pci map addr
349 * side effect: lock crb window
350 */
351static void
352qla4_8xxx_pci_set_crbwindow_2M(struct scsi_qla_host *ha, ulong *off)
353{
354 u32 win_read;
355
356 ha->crb_win = CRB_HI(*off);
357 writel(ha->crb_win,
358 (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
359
360 /* Read back value to make sure write has gone through before trying
361 * to use it. */
362 win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
363 if (win_read != ha->crb_win) {
364 DEBUG2(ql4_printk(KERN_INFO, ha,
365 "%s: Written crbwin (0x%x) != Read crbwin (0x%x),"
366 " off=0x%lx\n", __func__, ha->crb_win, win_read, *off));
367 }
368 *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
369}
370
371void
372qla4_8xxx_wr_32(struct scsi_qla_host *ha, ulong off, u32 data)
373{
374 unsigned long flags = 0;
375 int rv;
376
377 rv = qla4_8xxx_pci_get_crb_addr_2M(ha, &off);
378
379 BUG_ON(rv == -1);
380
381 if (rv == 1) {
382 write_lock_irqsave(&ha->hw_lock, flags);
383 qla4_8xxx_crb_win_lock(ha);
384 qla4_8xxx_pci_set_crbwindow_2M(ha, &off);
385 }
386
387 writel(data, (void __iomem *)off);
388
389 if (rv == 1) {
390 qla4_8xxx_crb_win_unlock(ha);
391 write_unlock_irqrestore(&ha->hw_lock, flags);
392 }
393}
394
395int
396qla4_8xxx_rd_32(struct scsi_qla_host *ha, ulong off)
397{
398 unsigned long flags = 0;
399 int rv;
400 u32 data;
401
402 rv = qla4_8xxx_pci_get_crb_addr_2M(ha, &off);
403
404 BUG_ON(rv == -1);
405
406 if (rv == 1) {
407 write_lock_irqsave(&ha->hw_lock, flags);
408 qla4_8xxx_crb_win_lock(ha);
409 qla4_8xxx_pci_set_crbwindow_2M(ha, &off);
410 }
411 data = readl((void __iomem *)off);
412
413 if (rv == 1) {
414 qla4_8xxx_crb_win_unlock(ha);
415 write_unlock_irqrestore(&ha->hw_lock, flags);
416 }
417 return data;
418}
419
420#define CRB_WIN_LOCK_TIMEOUT 100000000
421
422int qla4_8xxx_crb_win_lock(struct scsi_qla_host *ha)
423{
424 int i;
425 int done = 0, timeout = 0;
426
427 while (!done) {
428 /* acquire semaphore3 from PCI HW block */
429 done = qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
430 if (done == 1)
431 break;
432 if (timeout >= CRB_WIN_LOCK_TIMEOUT)
433 return -1;
434
435 timeout++;
436
437 /* Yield CPU */
438 if (!in_interrupt())
439 schedule();
440 else {
441 for (i = 0; i < 20; i++)
442 cpu_relax(); /*This a nop instr on i386*/
443 }
444 }
445 qla4_8xxx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->func_num);
446 return 0;
447}
448
449void qla4_8xxx_crb_win_unlock(struct scsi_qla_host *ha)
450{
451 qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
452}
453
454#define IDC_LOCK_TIMEOUT 100000000
455
456/**
457 * qla4_8xxx_idc_lock - hw_lock
458 * @ha: pointer to adapter structure
459 *
460 * General purpose lock used to synchronize access to
461 * CRB_DEV_STATE, CRB_DEV_REF_COUNT, etc.
462 **/
463int qla4_8xxx_idc_lock(struct scsi_qla_host *ha)
464{
465 int i;
466 int done = 0, timeout = 0;
467
468 while (!done) {
469 /* acquire semaphore5 from PCI HW block */
470 done = qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
471 if (done == 1)
472 break;
473 if (timeout >= IDC_LOCK_TIMEOUT)
474 return -1;
475
476 timeout++;
477
478 /* Yield CPU */
479 if (!in_interrupt())
480 schedule();
481 else {
482 for (i = 0; i < 20; i++)
483 cpu_relax(); /*This a nop instr on i386*/
484 }
485 }
486 return 0;
487}
488
489void qla4_8xxx_idc_unlock(struct scsi_qla_host *ha)
490{
491 qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
492}
493
494int
495qla4_8xxx_pci_get_crb_addr_2M(struct scsi_qla_host *ha, ulong *off)
496{
497 struct crb_128M_2M_sub_block_map *m;
498
499 if (*off >= QLA82XX_CRB_MAX)
500 return -1;
501
502 if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) {
503 *off = (*off - QLA82XX_PCI_CAMQM) +
504 QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
505 return 0;
506 }
507
508 if (*off < QLA82XX_PCI_CRBSPACE)
509 return -1;
510
511 *off -= QLA82XX_PCI_CRBSPACE;
512 /*
513 * Try direct map
514 */
515
516 m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
517
518 if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
519 *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase;
520 return 0;
521 }
522
523 /*
524 * Not in direct map, use crb window
525 */
526 return 1;
527}
528
529/* PCI Windowing for DDR regions. */
530#define QLA82XX_ADDR_IN_RANGE(addr, low, high) \
531 (((addr) <= (high)) && ((addr) >= (low)))
532
533/*
534* check memory access boundary.
535* used by test agent. support ddr access only for now
536*/
537static unsigned long
538qla4_8xxx_pci_mem_bound_check(struct scsi_qla_host *ha,
539 unsigned long long addr, int size)
540{
541 if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
542 QLA82XX_ADDR_DDR_NET_MAX) ||
543 !QLA82XX_ADDR_IN_RANGE(addr + size - 1,
544 QLA82XX_ADDR_DDR_NET, QLA82XX_ADDR_DDR_NET_MAX) ||
545 ((size != 1) && (size != 2) && (size != 4) && (size != 8))) {
546 return 0;
547 }
548 return 1;
549}
550
551static int qla4_8xxx_pci_set_window_warning_count;
552
553static unsigned long
554qla4_8xxx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
555{
556 int window;
557 u32 win_read;
558
559 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
560 QLA82XX_ADDR_DDR_NET_MAX)) {
561 /* DDR network side */
562 window = MN_WIN(addr);
563 ha->ddr_mn_window = window;
564 qla4_8xxx_wr_32(ha, ha->mn_win_crb |
565 QLA82XX_PCI_CRBSPACE, window);
566 win_read = qla4_8xxx_rd_32(ha, ha->mn_win_crb |
567 QLA82XX_PCI_CRBSPACE);
568 if ((win_read << 17) != window) {
569 ql4_printk(KERN_WARNING, ha,
570 "%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n",
571 __func__, window, win_read);
572 }
573 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
574 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
575 QLA82XX_ADDR_OCM0_MAX)) {
576 unsigned int temp1;
577 /* if bits 19:18&17:11 are on */
578 if ((addr & 0x00ff800) == 0xff800) {
579 printk("%s: QM access not handled.\n", __func__);
580 addr = -1UL;
581 }
582
583 window = OCM_WIN(addr);
584 ha->ddr_mn_window = window;
585 qla4_8xxx_wr_32(ha, ha->mn_win_crb |
586 QLA82XX_PCI_CRBSPACE, window);
587 win_read = qla4_8xxx_rd_32(ha, ha->mn_win_crb |
588 QLA82XX_PCI_CRBSPACE);
589 temp1 = ((window & 0x1FF) << 7) |
590 ((window & 0x0FFFE0000) >> 17);
591 if (win_read != temp1) {
592 printk("%s: Written OCMwin (0x%x) != Read"
593 " OCMwin (0x%x)\n", __func__, temp1, win_read);
594 }
595 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
596
597 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET,
598 QLA82XX_P3_ADDR_QDR_NET_MAX)) {
599 /* QDR network side */
600 window = MS_WIN(addr);
601 ha->qdr_sn_window = window;
602 qla4_8xxx_wr_32(ha, ha->ms_win_crb |
603 QLA82XX_PCI_CRBSPACE, window);
604 win_read = qla4_8xxx_rd_32(ha,
605 ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
606 if (win_read != window) {
607 printk("%s: Written MSwin (0x%x) != Read "
608 "MSwin (0x%x)\n", __func__, window, win_read);
609 }
610 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
611
612 } else {
613 /*
614 * peg gdb frequently accesses memory that doesn't exist,
615 * this limits the chit chat so debugging isn't slowed down.
616 */
617 if ((qla4_8xxx_pci_set_window_warning_count++ < 8) ||
618 (qla4_8xxx_pci_set_window_warning_count%64 == 0)) {
619 printk("%s: Warning:%s Unknown address range!\n",
620 __func__, DRIVER_NAME);
621 }
622 addr = -1UL;
623 }
624 return addr;
625}
626
627/* check if address is in the same windows as the previous access */
628static int qla4_8xxx_pci_is_same_window(struct scsi_qla_host *ha,
629 unsigned long long addr)
630{
631 int window;
632 unsigned long long qdr_max;
633
634 qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
635
636 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
637 QLA82XX_ADDR_DDR_NET_MAX)) {
638 /* DDR network side */
639 BUG(); /* MN access can not come here */
640 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
641 QLA82XX_ADDR_OCM0_MAX)) {
642 return 1;
643 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1,
644 QLA82XX_ADDR_OCM1_MAX)) {
645 return 1;
646 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET,
647 qdr_max)) {
648 /* QDR network side */
649 window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f;
650 if (ha->qdr_sn_window == window)
651 return 1;
652 }
653
654 return 0;
655}
656
657static int qla4_8xxx_pci_mem_read_direct(struct scsi_qla_host *ha,
658 u64 off, void *data, int size)
659{
660 unsigned long flags;
661 void __iomem *addr;
662 int ret = 0;
663 u64 start;
664 void __iomem *mem_ptr = NULL;
665 unsigned long mem_base;
666 unsigned long mem_page;
667
668 write_lock_irqsave(&ha->hw_lock, flags);
669
670 /*
671 * If attempting to access unknown address or straddle hw windows,
672 * do not access.
673 */
674 start = qla4_8xxx_pci_set_window(ha, off);
675 if ((start == -1UL) ||
676 (qla4_8xxx_pci_is_same_window(ha, off + size - 1) == 0)) {
677 write_unlock_irqrestore(&ha->hw_lock, flags);
678 printk(KERN_ERR"%s out of bound pci memory access. "
679 "offset is 0x%llx\n", DRIVER_NAME, off);
680 return -1;
681 }
682
683 addr = qla4_8xxx_pci_base_offsetfset(ha, start);
684 if (!addr) {
685 write_unlock_irqrestore(&ha->hw_lock, flags);
686 mem_base = pci_resource_start(ha->pdev, 0);
687 mem_page = start & PAGE_MASK;
688 /* Map two pages whenever user tries to access addresses in two
689 consecutive pages.
690 */
691 if (mem_page != ((start + size - 1) & PAGE_MASK))
692 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
693 else
694 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
695
696 if (mem_ptr == NULL) {
697 *(u8 *)data = 0;
698 return -1;
699 }
700 addr = mem_ptr;
701 addr += start & (PAGE_SIZE - 1);
702 write_lock_irqsave(&ha->hw_lock, flags);
703 }
704
705 switch (size) {
706 case 1:
707 *(u8 *)data = readb(addr);
708 break;
709 case 2:
710 *(u16 *)data = readw(addr);
711 break;
712 case 4:
713 *(u32 *)data = readl(addr);
714 break;
715 case 8:
716 *(u64 *)data = readq(addr);
717 break;
718 default:
719 ret = -1;
720 break;
721 }
722 write_unlock_irqrestore(&ha->hw_lock, flags);
723
724 if (mem_ptr)
725 iounmap(mem_ptr);
726 return ret;
727}
728
729static int
730qla4_8xxx_pci_mem_write_direct(struct scsi_qla_host *ha, u64 off,
731 void *data, int size)
732{
733 unsigned long flags;
734 void __iomem *addr;
735 int ret = 0;
736 u64 start;
737 void __iomem *mem_ptr = NULL;
738 unsigned long mem_base;
739 unsigned long mem_page;
740
741 write_lock_irqsave(&ha->hw_lock, flags);
742
743 /*
744 * If attempting to access unknown address or straddle hw windows,
745 * do not access.
746 */
747 start = qla4_8xxx_pci_set_window(ha, off);
748 if ((start == -1UL) ||
749 (qla4_8xxx_pci_is_same_window(ha, off + size - 1) == 0)) {
750 write_unlock_irqrestore(&ha->hw_lock, flags);
751 printk(KERN_ERR"%s out of bound pci memory access. "
752 "offset is 0x%llx\n", DRIVER_NAME, off);
753 return -1;
754 }
755
756 addr = qla4_8xxx_pci_base_offsetfset(ha, start);
757 if (!addr) {
758 write_unlock_irqrestore(&ha->hw_lock, flags);
759 mem_base = pci_resource_start(ha->pdev, 0);
760 mem_page = start & PAGE_MASK;
761 /* Map two pages whenever user tries to access addresses in two
762 consecutive pages.
763 */
764 if (mem_page != ((start + size - 1) & PAGE_MASK))
765 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
766 else
767 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
768 if (mem_ptr == NULL)
769 return -1;
770
771 addr = mem_ptr;
772 addr += start & (PAGE_SIZE - 1);
773 write_lock_irqsave(&ha->hw_lock, flags);
774 }
775
776 switch (size) {
777 case 1:
778 writeb(*(u8 *)data, addr);
779 break;
780 case 2:
781 writew(*(u16 *)data, addr);
782 break;
783 case 4:
784 writel(*(u32 *)data, addr);
785 break;
786 case 8:
787 writeq(*(u64 *)data, addr);
788 break;
789 default:
790 ret = -1;
791 break;
792 }
793 write_unlock_irqrestore(&ha->hw_lock, flags);
794 if (mem_ptr)
795 iounmap(mem_ptr);
796 return ret;
797}
798
799#define MTU_FUDGE_FACTOR 100
800
801static unsigned long
802qla4_8xxx_decode_crb_addr(unsigned long addr)
803{
804 int i;
805 unsigned long base_addr, offset, pci_base;
806
807 if (!qla4_8xxx_crb_table_initialized)
808 qla4_8xxx_crb_addr_transform_setup();
809
810 pci_base = ADDR_ERROR;
811 base_addr = addr & 0xfff00000;
812 offset = addr & 0x000fffff;
813
814 for (i = 0; i < MAX_CRB_XFORM; i++) {
815 if (crb_addr_xform[i] == base_addr) {
816 pci_base = i << 20;
817 break;
818 }
819 }
820 if (pci_base == ADDR_ERROR)
821 return pci_base;
822 else
823 return pci_base + offset;
824}
825
826static long rom_max_timeout = 100;
827static long qla4_8xxx_rom_lock_timeout = 100;
828
829static int
830qla4_8xxx_rom_lock(struct scsi_qla_host *ha)
831{
832 int i;
833 int done = 0, timeout = 0;
834
835 while (!done) {
836 /* acquire semaphore2 from PCI HW block */
837
838 done = qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
839 if (done == 1)
840 break;
841 if (timeout >= qla4_8xxx_rom_lock_timeout)
842 return -1;
843
844 timeout++;
845
846 /* Yield CPU */
847 if (!in_interrupt())
848 schedule();
849 else {
850 for (i = 0; i < 20; i++)
851 cpu_relax(); /*This a nop instr on i386*/
852 }
853 }
854 qla4_8xxx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
855 return 0;
856}
857
858static void
859qla4_8xxx_rom_unlock(struct scsi_qla_host *ha)
860{
861 qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
862}
863
864static int
865qla4_8xxx_wait_rom_done(struct scsi_qla_host *ha)
866{
867 long timeout = 0;
868 long done = 0 ;
869
870 while (done == 0) {
871 done = qla4_8xxx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
872 done &= 2;
873 timeout++;
874 if (timeout >= rom_max_timeout) {
875 printk("%s: Timeout reached waiting for rom done",
876 DRIVER_NAME);
877 return -1;
878 }
879 }
880 return 0;
881}
882
883static int
884qla4_8xxx_do_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp)
885{
886 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
887 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
888 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
889 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
890 if (qla4_8xxx_wait_rom_done(ha)) {
891 printk("%s: Error waiting for rom done\n", DRIVER_NAME);
892 return -1;
893 }
894 /* reset abyte_cnt and dummy_byte_cnt */
895 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
896 udelay(10);
897 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
898
899 *valp = qla4_8xxx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
900 return 0;
901}
902
903static int
904qla4_8xxx_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp)
905{
906 int ret, loops = 0;
907
908 while ((qla4_8xxx_rom_lock(ha) != 0) && (loops < 50000)) {
909 udelay(100);
910 loops++;
911 }
912 if (loops >= 50000) {
913 printk("%s: qla4_8xxx_rom_lock failed\n", DRIVER_NAME);
914 return -1;
915 }
916 ret = qla4_8xxx_do_rom_fast_read(ha, addr, valp);
917 qla4_8xxx_rom_unlock(ha);
918 return ret;
919}
920
921/**
922 * This routine does CRB initialize sequence
923 * to put the ISP into operational state
924 **/
925static int
926qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
927{
928 int addr, val;
929 int i ;
930 struct crb_addr_pair *buf;
931 unsigned long off;
932 unsigned offset, n;
933
934 struct crb_addr_pair {
935 long addr;
936 long data;
937 };
938
939 /* Halt all the indiviual PEGs and other blocks of the ISP */
940 qla4_8xxx_rom_lock(ha);
941 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
942 /* don't reset CAM block on reset */
943 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
944 else
945 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
946
947 qla4_8xxx_rom_unlock(ha);
948
949 /* Read the signature value from the flash.
950 * Offset 0: Contain signature (0xcafecafe)
951 * Offset 4: Offset and number of addr/value pairs
952 * that present in CRB initialize sequence
953 */
954 if (qla4_8xxx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
955 qla4_8xxx_rom_fast_read(ha, 4, &n) != 0) {
956 ql4_printk(KERN_WARNING, ha,
957 "[ERROR] Reading crb_init area: n: %08x\n", n);
958 return -1;
959 }
960
961 /* Offset in flash = lower 16 bits
962 * Number of enteries = upper 16 bits
963 */
964 offset = n & 0xffffU;
965 n = (n >> 16) & 0xffffU;
966
967 /* number of addr/value pair should not exceed 1024 enteries */
968 if (n >= 1024) {
969 ql4_printk(KERN_WARNING, ha,
970 "%s: %s:n=0x%x [ERROR] Card flash not initialized.\n",
971 DRIVER_NAME, __func__, n);
972 return -1;
973 }
974
975 ql4_printk(KERN_INFO, ha,
976 "%s: %d CRB init values found in ROM.\n", DRIVER_NAME, n);
977
978 buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
979 if (buf == NULL) {
980 ql4_printk(KERN_WARNING, ha,
981 "%s: [ERROR] Unable to malloc memory.\n", DRIVER_NAME);
982 return -1;
983 }
984
985 for (i = 0; i < n; i++) {
986 if (qla4_8xxx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
987 qla4_8xxx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) !=
988 0) {
989 kfree(buf);
990 return -1;
991 }
992
993 buf[i].addr = addr;
994 buf[i].data = val;
995 }
996
997 for (i = 0; i < n; i++) {
998 /* Translate internal CRB initialization
999 * address to PCI bus address
1000 */
1001 off = qla4_8xxx_decode_crb_addr((unsigned long)buf[i].addr) +
1002 QLA82XX_PCI_CRBSPACE;
1003 /* Not all CRB addr/value pair to be written,
1004 * some of them are skipped
1005 */
1006
1007 /* skip if LS bit is set*/
1008 if (off & 0x1) {
1009 DEBUG2(ql4_printk(KERN_WARNING, ha,
1010 "Skip CRB init replay for offset = 0x%lx\n", off));
1011 continue;
1012 }
1013
1014 /* skipping cold reboot MAGIC */
1015 if (off == QLA82XX_CAM_RAM(0x1fc))
1016 continue;
1017
1018 /* do not reset PCI */
1019 if (off == (ROMUSB_GLB + 0xbc))
1020 continue;
1021
1022 /* skip core clock, so that firmware can increase the clock */
1023 if (off == (ROMUSB_GLB + 0xc8))
1024 continue;
1025
1026 /* skip the function enable register */
1027 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION))
1028 continue;
1029
1030 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2))
1031 continue;
1032
1033 if ((off & 0x0ff00000) == QLA82XX_CRB_SMB)
1034 continue;
1035
1036 if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET)
1037 continue;
1038
1039 if (off == ADDR_ERROR) {
1040 ql4_printk(KERN_WARNING, ha,
1041 "%s: [ERROR] Unknown addr: 0x%08lx\n",
1042 DRIVER_NAME, buf[i].addr);
1043 continue;
1044 }
1045
1046 qla4_8xxx_wr_32(ha, off, buf[i].data);
1047
1048 /* ISP requires much bigger delay to settle down,
1049 * else crb_window returns 0xffffffff
1050 */
1051 if (off == QLA82XX_ROMUSB_GLB_SW_RESET)
1052 msleep(1000);
1053
1054 /* ISP requires millisec delay between
1055 * successive CRB register updation
1056 */
1057 msleep(1);
1058 }
1059
1060 kfree(buf);
1061
1062 /* Resetting the data and instruction cache */
1063 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
1064 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
1065 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
1066
1067 /* Clear all protocol processing engines */
1068 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0);
1069 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0);
1070 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0);
1071 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0);
1072 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0);
1073 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0);
1074 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0);
1075 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0);
1076
1077 return 0;
1078}
1079
1080static int qla4_8xxx_check_for_bad_spd(struct scsi_qla_host *ha)
1081{
1082 u32 val = 0;
1083 val = qla4_8xxx_rd_32(ha, BOOT_LOADER_DIMM_STATUS) ;
1084 val &= QLA82XX_BOOT_LOADER_MN_ISSUE;
1085 if (val & QLA82XX_PEG_TUNE_MN_SPD_ZEROED) {
1086 printk("Memory DIMM SPD not programmed. Assumed valid.\n");
1087 return 1;
1088 } else if (val) {
1089 printk("Memory DIMM type incorrect. Info:%08X.\n", val);
1090 return 2;
1091 }
1092 return 0;
1093}
1094
1095static int
1096qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
1097{
1098 int i;
1099 long size = 0;
1100 long flashaddr, memaddr;
1101 u64 data;
1102 u32 high, low;
1103
1104 flashaddr = memaddr = ha->hw.flt_region_bootload;
1105 size = (image_start - flashaddr)/8;
1106
1107 DEBUG2(printk("scsi%ld: %s: bootldr=0x%lx, fw_image=0x%x\n",
1108 ha->host_no, __func__, flashaddr, image_start));
1109
1110 for (i = 0; i < size; i++) {
1111 if ((qla4_8xxx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
1112 (qla4_8xxx_rom_fast_read(ha, flashaddr + 4,
1113 (int *)&high))) {
1114 return -1;
1115 }
1116 data = ((u64)high << 32) | low ;
1117 qla4_8xxx_pci_mem_write_2M(ha, memaddr, &data, 8);
1118 flashaddr += 8;
1119 memaddr += 8;
1120
1121 if (i%0x1000 == 0)
1122 msleep(1);
1123
1124 }
1125
1126 udelay(100);
1127
1128 read_lock(&ha->hw_lock);
1129 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1130 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
1131 read_unlock(&ha->hw_lock);
1132
1133 return 0;
1134}
1135
1136static int qla4_8xxx_load_fw(struct scsi_qla_host *ha, uint32_t image_start)
1137{
1138 u32 rst;
1139
1140 qla4_8xxx_wr_32(ha, CRB_CMDPEG_STATE, 0);
1141 if (qla4_8xxx_pinit_from_rom(ha, 0) != QLA_SUCCESS) {
1142 printk(KERN_WARNING "%s: Error during CRB Initialization\n",
1143 __func__);
1144 return QLA_ERROR;
1145 }
1146
1147 udelay(500);
1148
1149 /* at this point, QM is in reset. This could be a problem if there are
1150 * incoming d* transition queue messages. QM/PCIE could wedge.
1151 * To get around this, QM is brought out of reset.
1152 */
1153
1154 rst = qla4_8xxx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET);
1155 /* unreset qm */
1156 rst &= ~(1 << 28);
1157 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst);
1158
1159 if (qla4_8xxx_load_from_flash(ha, image_start)) {
1160 printk("%s: Error trying to load fw from flash!\n", __func__);
1161 return QLA_ERROR;
1162 }
1163
1164 return QLA_SUCCESS;
1165}
1166
1167int
1168qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
1169 u64 off, void *data, int size)
1170{
1171 int i, j = 0, k, start, end, loop, sz[2], off0[2];
1172 int shift_amount;
1173 uint32_t temp;
1174 uint64_t off8, val, mem_crb, word[2] = {0, 0};
1175
1176 /*
1177 * If not MN, go check for MS or invalid.
1178 */
1179
1180 if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1181 mem_crb = QLA82XX_CRB_QDR_NET;
1182 else {
1183 mem_crb = QLA82XX_CRB_DDR_NET;
1184 if (qla4_8xxx_pci_mem_bound_check(ha, off, size) == 0)
1185 return qla4_8xxx_pci_mem_read_direct(ha,
1186 off, data, size);
1187 }
1188
1189
1190 off8 = off & 0xfffffff0;
1191 off0[0] = off & 0xf;
1192 sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]);
1193 shift_amount = 4;
1194
1195 loop = ((off0[0] + size - 1) >> shift_amount) + 1;
1196 off0[1] = 0;
1197 sz[1] = size - sz[0];
1198
1199 for (i = 0; i < loop; i++) {
1200 temp = off8 + (i << shift_amount);
1201 qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
1202 temp = 0;
1203 qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
1204 temp = MIU_TA_CTL_ENABLE;
1205 qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1206 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
1207 qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1208
1209 for (j = 0; j < MAX_CTL_CHECK; j++) {
1210 temp = qla4_8xxx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1211 if ((temp & MIU_TA_CTL_BUSY) == 0)
1212 break;
1213 }
1214
1215 if (j >= MAX_CTL_CHECK) {
1216 if (printk_ratelimit())
1217 ql4_printk(KERN_ERR, ha,
1218 "failed to read through agent\n");
1219 break;
1220 }
1221
1222 start = off0[i] >> 2;
1223 end = (off0[i] + sz[i] - 1) >> 2;
1224 for (k = start; k <= end; k++) {
1225 temp = qla4_8xxx_rd_32(ha,
1226 mem_crb + MIU_TEST_AGT_RDDATA(k));
1227 word[i] |= ((uint64_t)temp << (32 * (k & 1)));
1228 }
1229 }
1230
1231 if (j >= MAX_CTL_CHECK)
1232 return -1;
1233
1234 if ((off0[0] & 7) == 0) {
1235 val = word[0];
1236 } else {
1237 val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
1238 ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
1239 }
1240
1241 switch (size) {
1242 case 1:
1243 *(uint8_t *)data = val;
1244 break;
1245 case 2:
1246 *(uint16_t *)data = val;
1247 break;
1248 case 4:
1249 *(uint32_t *)data = val;
1250 break;
1251 case 8:
1252 *(uint64_t *)data = val;
1253 break;
1254 }
1255 return 0;
1256}
1257
1258int
1259qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
1260 u64 off, void *data, int size)
1261{
1262 int i, j, ret = 0, loop, sz[2], off0;
1263 int scale, shift_amount, startword;
1264 uint32_t temp;
1265 uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
1266
1267 /*
1268 * If not MN, go check for MS or invalid.
1269 */
1270 if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1271 mem_crb = QLA82XX_CRB_QDR_NET;
1272 else {
1273 mem_crb = QLA82XX_CRB_DDR_NET;
1274 if (qla4_8xxx_pci_mem_bound_check(ha, off, size) == 0)
1275 return qla4_8xxx_pci_mem_write_direct(ha,
1276 off, data, size);
1277 }
1278
1279 off0 = off & 0x7;
1280 sz[0] = (size < (8 - off0)) ? size : (8 - off0);
1281 sz[1] = size - sz[0];
1282
1283 off8 = off & 0xfffffff0;
1284 loop = (((off & 0xf) + size - 1) >> 4) + 1;
1285 shift_amount = 4;
1286 scale = 2;
1287 startword = (off & 0xf)/8;
1288
1289 for (i = 0; i < loop; i++) {
1290 if (qla4_8xxx_pci_mem_read_2M(ha, off8 +
1291 (i << shift_amount), &word[i * scale], 8))
1292 return -1;
1293 }
1294
1295 switch (size) {
1296 case 1:
1297 tmpw = *((uint8_t *)data);
1298 break;
1299 case 2:
1300 tmpw = *((uint16_t *)data);
1301 break;
1302 case 4:
1303 tmpw = *((uint32_t *)data);
1304 break;
1305 case 8:
1306 default:
1307 tmpw = *((uint64_t *)data);
1308 break;
1309 }
1310
1311 if (sz[0] == 8)
1312 word[startword] = tmpw;
1313 else {
1314 word[startword] &=
1315 ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
1316 word[startword] |= tmpw << (off0 * 8);
1317 }
1318
1319 if (sz[1] != 0) {
1320 word[startword+1] &= ~(~0ULL << (sz[1] * 8));
1321 word[startword+1] |= tmpw >> (sz[0] * 8);
1322 }
1323
1324 for (i = 0; i < loop; i++) {
1325 temp = off8 + (i << shift_amount);
1326 qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
1327 temp = 0;
1328 qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
1329 temp = word[i * scale] & 0xffffffff;
1330 qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
1331 temp = (word[i * scale] >> 32) & 0xffffffff;
1332 qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
1333 temp = word[i*scale + 1] & 0xffffffff;
1334 qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_LO,
1335 temp);
1336 temp = (word[i*scale + 1] >> 32) & 0xffffffff;
1337 qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_HI,
1338 temp);
1339
1340 temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1341 qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
1342 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1343 qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
1344
1345 for (j = 0; j < MAX_CTL_CHECK; j++) {
1346 temp = qla4_8xxx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1347 if ((temp & MIU_TA_CTL_BUSY) == 0)
1348 break;
1349 }
1350
1351 if (j >= MAX_CTL_CHECK) {
1352 if (printk_ratelimit())
1353 ql4_printk(KERN_ERR, ha,
1354 "failed to write through agent\n");
1355 ret = -1;
1356 break;
1357 }
1358 }
1359
1360 return ret;
1361}
1362
1363static int qla4_8xxx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val)
1364{
1365 u32 val = 0;
1366 int retries = 60;
1367
1368 if (!pegtune_val) {
1369 do {
1370 val = qla4_8xxx_rd_32(ha, CRB_CMDPEG_STATE);
1371 if ((val == PHAN_INITIALIZE_COMPLETE) ||
1372 (val == PHAN_INITIALIZE_ACK))
1373 return 0;
1374 set_current_state(TASK_UNINTERRUPTIBLE);
1375 schedule_timeout(500);
1376
1377 } while (--retries);
1378
1379 qla4_8xxx_check_for_bad_spd(ha);
1380
1381 if (!retries) {
1382 pegtune_val = qla4_8xxx_rd_32(ha,
1383 QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
1384 printk(KERN_WARNING "%s: init failed, "
1385 "pegtune_val = %x\n", __func__, pegtune_val);
1386 return -1;
1387 }
1388 }
1389 return 0;
1390}
1391
1392static int qla4_8xxx_rcvpeg_ready(struct scsi_qla_host *ha)
1393{
1394 uint32_t state = 0;
1395 int loops = 0;
1396
1397 /* Window 1 call */
1398 read_lock(&ha->hw_lock);
1399 state = qla4_8xxx_rd_32(ha, CRB_RCVPEG_STATE);
1400 read_unlock(&ha->hw_lock);
1401
1402 while ((state != PHAN_PEG_RCV_INITIALIZED) && (loops < 30000)) {
1403 udelay(100);
1404 /* Window 1 call */
1405 read_lock(&ha->hw_lock);
1406 state = qla4_8xxx_rd_32(ha, CRB_RCVPEG_STATE);
1407 read_unlock(&ha->hw_lock);
1408
1409 loops++;
1410 }
1411
1412 if (loops >= 30000) {
1413 DEBUG2(ql4_printk(KERN_INFO, ha,
1414 "Receive Peg initialization not complete: 0x%x.\n", state));
1415 return QLA_ERROR;
1416 }
1417
1418 return QLA_SUCCESS;
1419}
1420
1421static inline void
1422qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
1423{
1424 uint32_t drv_active;
1425
1426 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
1427 drv_active |= (1 << (ha->func_num * 4));
1428 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
1429}
1430
1431void
1432qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha)
1433{
1434 uint32_t drv_active;
1435
1436 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
1437 drv_active &= ~(1 << (ha->func_num * 4));
1438 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
1439}
1440
1441static inline int
1442qla4_8xxx_need_reset(struct scsi_qla_host *ha)
1443{
1444 uint32_t drv_state;
1445 int rval;
1446
1447 drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
1448 rval = drv_state & (1 << (ha->func_num * 4));
1449 return rval;
1450}
1451
1452static inline void
1453qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha)
1454{
1455 uint32_t drv_state;
1456
1457 drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
1458 drv_state |= (1 << (ha->func_num * 4));
1459 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
1460}
1461
1462static inline void
1463qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha)
1464{
1465 uint32_t drv_state;
1466
1467 drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
1468 drv_state &= ~(1 << (ha->func_num * 4));
1469 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
1470}
1471
1472static inline void
1473qla4_8xxx_set_qsnt_ready(struct scsi_qla_host *ha)
1474{
1475 uint32_t qsnt_state;
1476
1477 qsnt_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
1478 qsnt_state |= (2 << (ha->func_num * 4));
1479 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
1480}
1481
1482
1483static int
1484qla4_8xxx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start)
1485{
1486 int pcie_cap;
1487 uint16_t lnk;
1488
1489 /* scrub dma mask expansion register */
1490 qla4_8xxx_wr_32(ha, CRB_DMA_SHIFT, 0x55555555);
1491
1492 /* Overwrite stale initialization register values */
1493 qla4_8xxx_wr_32(ha, CRB_CMDPEG_STATE, 0);
1494 qla4_8xxx_wr_32(ha, CRB_RCVPEG_STATE, 0);
1495 qla4_8xxx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
1496 qla4_8xxx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
1497
1498 if (qla4_8xxx_load_fw(ha, image_start) != QLA_SUCCESS) {
1499 printk("%s: Error trying to start fw!\n", __func__);
1500 return QLA_ERROR;
1501 }
1502
1503 /* Handshake with the card before we register the devices. */
1504 if (qla4_8xxx_cmdpeg_ready(ha, 0) != QLA_SUCCESS) {
1505 printk("%s: Error during card handshake!\n", __func__);
1506 return QLA_ERROR;
1507 }
1508
1509 /* Negotiated Link width */
1510 pcie_cap = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
1511 pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk);
1512 ha->link_width = (lnk >> 4) & 0x3f;
1513
1514 /* Synchronize with Receive peg */
1515 return qla4_8xxx_rcvpeg_ready(ha);
1516}
1517
1518static int
1519qla4_8xxx_try_start_fw(struct scsi_qla_host *ha)
1520{
1521 int rval = QLA_ERROR;
1522
1523 /*
1524 * FW Load priority:
1525 * 1) Operational firmware residing in flash.
1526 * 2) Fail
1527 */
1528
1529 ql4_printk(KERN_INFO, ha,
1530 "FW: Retrieving flash offsets from FLT/FDT ...\n");
1531 rval = qla4_8xxx_get_flash_info(ha);
1532 if (rval != QLA_SUCCESS)
1533 return rval;
1534
1535 ql4_printk(KERN_INFO, ha,
1536 "FW: Attempting to load firmware from flash...\n");
1537 rval = qla4_8xxx_start_firmware(ha, ha->hw.flt_region_fw);
1538 if (rval == QLA_SUCCESS)
1539 return rval;
1540
1541 ql4_printk(KERN_ERR, ha, "FW: Load firmware from flash FAILED...\n");
1542
1543 return rval;
1544}
1545
1546/**
1547 * qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw
1548 * @ha: pointer to adapter structure
1549 *
1550 * Note: IDC lock must be held upon entry
1551 **/
1552static int
1553qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
1554{
1555 int rval, i, timeout;
1556 uint32_t old_count, count;
1557
1558 if (qla4_8xxx_need_reset(ha))
1559 goto dev_initialize;
1560
1561 old_count = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
1562
1563 for (i = 0; i < 10; i++) {
1564 timeout = msleep_interruptible(200);
1565 if (timeout) {
1566 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
1567 QLA82XX_DEV_FAILED);
1568 return QLA_ERROR;
1569 }
1570
1571 count = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
1572 if (count != old_count)
1573 goto dev_ready;
1574 }
1575
1576dev_initialize:
1577 /* set to DEV_INITIALIZING */
1578 ql4_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");
1579 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING);
1580
1581 /* Driver that sets device state to initializating sets IDC version */
1582 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
1583
1584 qla4_8xxx_idc_unlock(ha);
1585 rval = qla4_8xxx_try_start_fw(ha);
1586 qla4_8xxx_idc_lock(ha);
1587
1588 if (rval != QLA_SUCCESS) {
1589 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
1590 qla4_8xxx_clear_drv_active(ha);
1591 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED);
1592 return rval;
1593 }
1594
1595dev_ready:
1596 ql4_printk(KERN_INFO, ha, "HW State: READY\n");
1597 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
1598
1599 return QLA_SUCCESS;
1600}
1601
1602/**
1603 * qla4_8xxx_need_reset_handler - Code to start reset sequence
1604 * @ha: pointer to adapter structure
1605 *
1606 * Note: IDC lock must be held upon entry
1607 **/
1608static void
1609qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
1610{
1611 uint32_t dev_state, drv_state, drv_active;
1612 unsigned long reset_timeout;
1613
1614 ql4_printk(KERN_INFO, ha,
1615 "Performing ISP error recovery\n");
1616
1617 if (test_and_clear_bit(AF_ONLINE, &ha->flags)) {
1618 qla4_8xxx_idc_unlock(ha);
1619 ha->isp_ops->disable_intrs(ha);
1620 qla4_8xxx_idc_lock(ha);
1621 }
1622
1623 qla4_8xxx_set_rst_ready(ha);
1624
1625 /* wait for 10 seconds for reset ack from all functions */
1626 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
1627
1628 drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
1629 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
1630
1631 ql4_printk(KERN_INFO, ha,
1632 "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
1633 __func__, ha->host_no, drv_state, drv_active);
1634
1635 while (drv_state != drv_active) {
1636 if (time_after_eq(jiffies, reset_timeout)) {
1637 printk("%s: RESET TIMEOUT!\n", DRIVER_NAME);
1638 break;
1639 }
1640
1641 qla4_8xxx_idc_unlock(ha);
1642 msleep(1000);
1643 qla4_8xxx_idc_lock(ha);
1644
1645 drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
1646 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
1647 }
1648
1649 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
1650 ql4_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state,
1651 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
1652
1653 /* Force to DEV_COLD unless someone else is starting a reset */
1654 if (dev_state != QLA82XX_DEV_INITIALIZING) {
1655 ql4_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
1656 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
1657 }
1658}
1659
1660/**
1661 * qla4_8xxx_need_qsnt_handler - Code to start qsnt
1662 * @ha: pointer to adapter structure
1663 **/
1664void
1665qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha)
1666{
1667 qla4_8xxx_idc_lock(ha);
1668 qla4_8xxx_set_qsnt_ready(ha);
1669 qla4_8xxx_idc_unlock(ha);
1670}
1671
1672/**
1673 * qla4_8xxx_device_state_handler - Adapter state machine
1674 * @ha: pointer to host adapter structure.
1675 *
1676 * Note: IDC lock must be UNLOCKED upon entry
1677 **/
1678int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
1679{
1680 uint32_t dev_state;
1681 int rval = QLA_SUCCESS;
1682 unsigned long dev_init_timeout;
1683
1684 if (!test_bit(AF_INIT_DONE, &ha->flags))
1685 qla4_8xxx_set_drv_active(ha);
1686
1687 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
1688 ql4_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state,
1689 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
1690
1691 /* wait for 30 seconds for device to go ready */
1692 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
1693
1694 while (1) {
1695 qla4_8xxx_idc_lock(ha);
1696
1697 if (time_after_eq(jiffies, dev_init_timeout)) {
1698 ql4_printk(KERN_WARNING, ha, "Device init failed!\n");
1699 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
1700 QLA82XX_DEV_FAILED);
1701 }
1702
1703 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
1704 ql4_printk(KERN_INFO, ha,
1705 "2:Device state is 0x%x = %s\n", dev_state,
1706 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
1707
1708 /* NOTE: Make sure idc unlocked upon exit of switch statement */
1709 switch (dev_state) {
1710 case QLA82XX_DEV_READY:
1711 qla4_8xxx_idc_unlock(ha);
1712 goto exit;
1713 case QLA82XX_DEV_COLD:
1714 rval = qla4_8xxx_device_bootstrap(ha);
1715 qla4_8xxx_idc_unlock(ha);
1716 goto exit;
1717 case QLA82XX_DEV_INITIALIZING:
1718 qla4_8xxx_idc_unlock(ha);
1719 msleep(1000);
1720 break;
1721 case QLA82XX_DEV_NEED_RESET:
1722 if (!ql4xdontresethba) {
1723 qla4_8xxx_need_reset_handler(ha);
1724 /* Update timeout value after need
1725 * reset handler */
1726 dev_init_timeout = jiffies +
1727 (ha->nx_dev_init_timeout * HZ);
1728 }
1729 qla4_8xxx_idc_unlock(ha);
1730 break;
1731 case QLA82XX_DEV_NEED_QUIESCENT:
1732 qla4_8xxx_idc_unlock(ha);
1733 /* idc locked/unlocked in handler */
1734 qla4_8xxx_need_qsnt_handler(ha);
1735 qla4_8xxx_idc_lock(ha);
1736 /* fall thru needs idc_locked */
1737 case QLA82XX_DEV_QUIESCENT:
1738 qla4_8xxx_idc_unlock(ha);
1739 msleep(1000);
1740 break;
1741 case QLA82XX_DEV_FAILED:
1742 qla4_8xxx_idc_unlock(ha);
1743 qla4xxx_dead_adapter_cleanup(ha);
1744 rval = QLA_ERROR;
1745 goto exit;
1746 default:
1747 qla4_8xxx_idc_unlock(ha);
1748 qla4xxx_dead_adapter_cleanup(ha);
1749 rval = QLA_ERROR;
1750 goto exit;
1751 }
1752 }
1753exit:
1754 return rval;
1755}
1756
1757int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
1758{
1759 int retval;
1760 retval = qla4_8xxx_device_state_handler(ha);
1761
1762 if (retval == QLA_SUCCESS &&
1763 !test_bit(AF_INIT_DONE, &ha->flags)) {
1764 retval = qla4xxx_request_irqs(ha);
1765 if (retval != QLA_SUCCESS) {
1766 ql4_printk(KERN_WARNING, ha,
1767 "Failed to reserve interrupt %d already in use.\n",
1768 ha->pdev->irq);
1769 } else {
1770 set_bit(AF_IRQ_ATTACHED, &ha->flags);
1771 ha->host->irq = ha->pdev->irq;
1772 ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n",
1773 __func__, ha->pdev->irq);
1774 }
1775 }
1776 return retval;
1777}
1778
1779/*****************************************************************************/
1780/* Flash Manipulation Routines */
1781/*****************************************************************************/
1782
1783#define OPTROM_BURST_SIZE 0x1000
1784#define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4)
1785
1786#define FARX_DATA_FLAG BIT_31
1787#define FARX_ACCESS_FLASH_CONF 0x7FFD0000
1788#define FARX_ACCESS_FLASH_DATA 0x7FF00000
1789
1790static inline uint32_t
1791flash_conf_addr(struct ql82xx_hw_data *hw, uint32_t faddr)
1792{
1793 return hw->flash_conf_off | faddr;
1794}
1795
1796static inline uint32_t
1797flash_data_addr(struct ql82xx_hw_data *hw, uint32_t faddr)
1798{
1799 return hw->flash_data_off | faddr;
1800}
1801
1802static uint32_t *
1803qla4_8xxx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr,
1804 uint32_t faddr, uint32_t length)
1805{
1806 uint32_t i;
1807 uint32_t val;
1808 int loops = 0;
1809 while ((qla4_8xxx_rom_lock(ha) != 0) && (loops < 50000)) {
1810 udelay(100);
1811 cond_resched();
1812 loops++;
1813 }
1814 if (loops >= 50000) {
1815 ql4_printk(KERN_WARNING, ha, "ROM lock failed\n");
1816 return dwptr;
1817 }
1818
1819 /* Dword reads to flash. */
1820 for (i = 0; i < length/4; i++, faddr += 4) {
1821 if (qla4_8xxx_do_rom_fast_read(ha, faddr, &val)) {
1822 ql4_printk(KERN_WARNING, ha,
1823 "Do ROM fast read failed\n");
1824 goto done_read;
1825 }
1826 dwptr[i] = __constant_cpu_to_le32(val);
1827 }
1828
1829done_read:
1830 qla4_8xxx_rom_unlock(ha);
1831 return dwptr;
1832}
1833
1834/**
1835 * Address and length are byte address
1836 **/
1837static uint8_t *
1838qla4_8xxx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1839 uint32_t offset, uint32_t length)
1840{
1841 qla4_8xxx_read_flash_data(ha, (uint32_t *)buf, offset, length);
1842 return buf;
1843}
1844
1845static int
1846qla4_8xxx_find_flt_start(struct scsi_qla_host *ha, uint32_t *start)
1847{
1848 const char *loc, *locations[] = { "DEF", "PCI" };
1849
1850 /*
1851 * FLT-location structure resides after the last PCI region.
1852 */
1853
1854 /* Begin with sane defaults. */
1855 loc = locations[0];
1856 *start = FA_FLASH_LAYOUT_ADDR_82;
1857
1858 DEBUG2(ql4_printk(KERN_INFO, ha, "FLTL[%s] = 0x%x.\n", loc, *start));
1859 return QLA_SUCCESS;
1860}
1861
1862static void
1863qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr)
1864{
1865 const char *loc, *locations[] = { "DEF", "FLT" };
1866 uint16_t *wptr;
1867 uint16_t cnt, chksum;
1868 uint32_t start;
1869 struct qla_flt_header *flt;
1870 struct qla_flt_region *region;
1871 struct ql82xx_hw_data *hw = &ha->hw;
1872
1873 hw->flt_region_flt = flt_addr;
1874 wptr = (uint16_t *)ha->request_ring;
1875 flt = (struct qla_flt_header *)ha->request_ring;
1876 region = (struct qla_flt_region *)&flt[1];
1877 qla4_8xxx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
1878 flt_addr << 2, OPTROM_BURST_SIZE);
1879 if (*wptr == __constant_cpu_to_le16(0xffff))
1880 goto no_flash_data;
1881 if (flt->version != __constant_cpu_to_le16(1)) {
1882 DEBUG2(ql4_printk(KERN_INFO, ha, "Unsupported FLT detected: "
1883 "version=0x%x length=0x%x checksum=0x%x.\n",
1884 le16_to_cpu(flt->version), le16_to_cpu(flt->length),
1885 le16_to_cpu(flt->checksum)));
1886 goto no_flash_data;
1887 }
1888
1889 cnt = (sizeof(struct qla_flt_header) + le16_to_cpu(flt->length)) >> 1;
1890 for (chksum = 0; cnt; cnt--)
1891 chksum += le16_to_cpu(*wptr++);
1892 if (chksum) {
1893 DEBUG2(ql4_printk(KERN_INFO, ha, "Inconsistent FLT detected: "
1894 "version=0x%x length=0x%x checksum=0x%x.\n",
1895 le16_to_cpu(flt->version), le16_to_cpu(flt->length),
1896 chksum));
1897 goto no_flash_data;
1898 }
1899
1900 loc = locations[1];
1901 cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
1902 for ( ; cnt; cnt--, region++) {
1903 /* Store addresses as DWORD offsets. */
1904 start = le32_to_cpu(region->start) >> 2;
1905
1906 DEBUG3(ql4_printk(KERN_DEBUG, ha, "FLT[%02x]: start=0x%x "
1907 "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start,
1908 le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size)));
1909
1910 switch (le32_to_cpu(region->code) & 0xff) {
1911 case FLT_REG_FDT:
1912 hw->flt_region_fdt = start;
1913 break;
1914 case FLT_REG_BOOT_CODE_82:
1915 hw->flt_region_boot = start;
1916 break;
1917 case FLT_REG_FW_82:
1918 hw->flt_region_fw = start;
1919 break;
1920 case FLT_REG_BOOTLOAD_82:
1921 hw->flt_region_bootload = start;
1922 break;
1923 }
1924 }
1925 goto done;
1926
1927no_flash_data:
1928 /* Use hardcoded defaults. */
1929 loc = locations[0];
1930
1931 hw->flt_region_fdt = FA_FLASH_DESCR_ADDR_82;
1932 hw->flt_region_boot = FA_BOOT_CODE_ADDR_82;
1933 hw->flt_region_bootload = FA_BOOT_LOAD_ADDR_82;
1934 hw->flt_region_fw = FA_RISC_CODE_ADDR_82;
1935done:
1936 DEBUG2(ql4_printk(KERN_INFO, ha, "FLT[%s]: flt=0x%x fdt=0x%x "
1937 "boot=0x%x bootload=0x%x fw=0x%x\n", loc, hw->flt_region_flt,
1938 hw->flt_region_fdt, hw->flt_region_boot, hw->flt_region_bootload,
1939 hw->flt_region_fw));
1940}
1941
1942static void
1943qla4_8xxx_get_fdt_info(struct scsi_qla_host *ha)
1944{
1945#define FLASH_BLK_SIZE_4K 0x1000
1946#define FLASH_BLK_SIZE_32K 0x8000
1947#define FLASH_BLK_SIZE_64K 0x10000
1948 const char *loc, *locations[] = { "MID", "FDT" };
1949 uint16_t cnt, chksum;
1950 uint16_t *wptr;
1951 struct qla_fdt_layout *fdt;
1952 uint16_t mid, fid;
1953 struct ql82xx_hw_data *hw = &ha->hw;
1954
1955 hw->flash_conf_off = FARX_ACCESS_FLASH_CONF;
1956 hw->flash_data_off = FARX_ACCESS_FLASH_DATA;
1957
1958 wptr = (uint16_t *)ha->request_ring;
1959 fdt = (struct qla_fdt_layout *)ha->request_ring;
1960 qla4_8xxx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
1961 hw->flt_region_fdt << 2, OPTROM_BURST_SIZE);
1962
1963 if (*wptr == __constant_cpu_to_le16(0xffff))
1964 goto no_flash_data;
1965
1966 if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' ||
1967 fdt->sig[3] != 'D')
1968 goto no_flash_data;
1969
1970 for (cnt = 0, chksum = 0; cnt < sizeof(struct qla_fdt_layout) >> 1;
1971 cnt++)
1972 chksum += le16_to_cpu(*wptr++);
1973
1974 if (chksum) {
1975 DEBUG2(ql4_printk(KERN_INFO, ha, "Inconsistent FDT detected: "
1976 "checksum=0x%x id=%c version=0x%x.\n", chksum, fdt->sig[0],
1977 le16_to_cpu(fdt->version)));
1978 goto no_flash_data;
1979 }
1980
1981 loc = locations[1];
1982 mid = le16_to_cpu(fdt->man_id);
1983 fid = le16_to_cpu(fdt->id);
1984 hw->fdt_wrt_disable = fdt->wrt_disable_bits;
1985 hw->fdt_erase_cmd = flash_conf_addr(hw, 0x0300 | fdt->erase_cmd);
1986 hw->fdt_block_size = le32_to_cpu(fdt->block_size);
1987
1988 if (fdt->unprotect_sec_cmd) {
1989 hw->fdt_unprotect_sec_cmd = flash_conf_addr(hw, 0x0300 |
1990 fdt->unprotect_sec_cmd);
1991 hw->fdt_protect_sec_cmd = fdt->protect_sec_cmd ?
1992 flash_conf_addr(hw, 0x0300 | fdt->protect_sec_cmd) :
1993 flash_conf_addr(hw, 0x0336);
1994 }
1995 goto done;
1996
1997no_flash_data:
1998 loc = locations[0];
1999 hw->fdt_block_size = FLASH_BLK_SIZE_64K;
2000done:
2001 DEBUG2(ql4_printk(KERN_INFO, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x "
2002 "pro=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
2003 hw->fdt_erase_cmd, hw->fdt_protect_sec_cmd,
2004 hw->fdt_unprotect_sec_cmd, hw->fdt_wrt_disable,
2005 hw->fdt_block_size));
2006}
2007
2008static void
2009qla4_8xxx_get_idc_param(struct scsi_qla_host *ha)
2010{
2011#define QLA82XX_IDC_PARAM_ADDR 0x003e885c
2012 uint32_t *wptr;
2013
2014 if (!is_qla8022(ha))
2015 return;
2016 wptr = (uint32_t *)ha->request_ring;
2017 qla4_8xxx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
2018 QLA82XX_IDC_PARAM_ADDR , 8);
2019
2020 if (*wptr == __constant_cpu_to_le32(0xffffffff)) {
2021 ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT;
2022 ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT;
2023 } else {
2024 ha->nx_dev_init_timeout = le32_to_cpu(*wptr++);
2025 ha->nx_reset_timeout = le32_to_cpu(*wptr);
2026 }
2027
2028 DEBUG2(ql4_printk(KERN_DEBUG, ha,
2029 "ha->nx_dev_init_timeout = %d\n", ha->nx_dev_init_timeout));
2030 DEBUG2(ql4_printk(KERN_DEBUG, ha,
2031 "ha->nx_reset_timeout = %d\n", ha->nx_reset_timeout));
2032 return;
2033}
2034
2035int
2036qla4_8xxx_get_flash_info(struct scsi_qla_host *ha)
2037{
2038 int ret;
2039 uint32_t flt_addr;
2040
2041 ret = qla4_8xxx_find_flt_start(ha, &flt_addr);
2042 if (ret != QLA_SUCCESS)
2043 return ret;
2044
2045 qla4_8xxx_get_flt_info(ha, flt_addr);
2046 qla4_8xxx_get_fdt_info(ha);
2047 qla4_8xxx_get_idc_param(ha);
2048
2049 return QLA_SUCCESS;
2050}
2051
2052/**
2053 * qla4_8xxx_stop_firmware - stops firmware on specified adapter instance
2054 * @ha: pointer to host adapter structure.
2055 *
2056 * Remarks:
2057 * For iSCSI, throws away all I/O and AENs into bit bucket, so they will
2058 * not be available after successful return. Driver must cleanup potential
2059 * outstanding I/O's after calling this funcion.
2060 **/
2061int
2062qla4_8xxx_stop_firmware(struct scsi_qla_host *ha)
2063{
2064 int status;
2065 uint32_t mbox_cmd[MBOX_REG_COUNT];
2066 uint32_t mbox_sts[MBOX_REG_COUNT];
2067
2068 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
2069 memset(&mbox_sts, 0, sizeof(mbox_sts));
2070
2071 mbox_cmd[0] = MBOX_CMD_STOP_FW;
2072 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1,
2073 &mbox_cmd[0], &mbox_sts[0]);
2074
2075 DEBUG2(printk("scsi%ld: %s: status = %d\n", ha->host_no,
2076 __func__, status));
2077 return status;
2078}
2079
2080/**
2081 * qla4_8xxx_isp_reset - Resets ISP and aborts all outstanding commands.
2082 * @ha: pointer to host adapter structure.
2083 **/
2084int
2085qla4_8xxx_isp_reset(struct scsi_qla_host *ha)
2086{
2087 int rval;
2088 uint32_t dev_state;
2089
2090 qla4_8xxx_idc_lock(ha);
2091 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
2092
2093 if (dev_state == QLA82XX_DEV_READY) {
2094 ql4_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
2095 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2096 QLA82XX_DEV_NEED_RESET);
2097 } else
2098 ql4_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n");
2099
2100 qla4_8xxx_idc_unlock(ha);
2101
2102 rval = qla4_8xxx_device_state_handler(ha);
2103
2104 qla4_8xxx_idc_lock(ha);
2105 qla4_8xxx_clear_rst_ready(ha);
2106 qla4_8xxx_idc_unlock(ha);
2107
2108 return rval;
2109}
2110
2111/**
2112 * qla4_8xxx_get_sys_info - get adapter MAC address(es) and serial number
2113 * @ha: pointer to host adapter structure.
2114 *
2115 **/
2116int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
2117{
2118 uint32_t mbox_cmd[MBOX_REG_COUNT];
2119 uint32_t mbox_sts[MBOX_REG_COUNT];
2120 struct mbx_sys_info *sys_info;
2121 dma_addr_t sys_info_dma;
2122 int status = QLA_ERROR;
2123
2124 sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
2125 &sys_info_dma, GFP_KERNEL);
2126 if (sys_info == NULL) {
2127 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
2128 ha->host_no, __func__));
2129 return status;
2130 }
2131
2132 memset(sys_info, 0, sizeof(*sys_info));
2133 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
2134 memset(&mbox_sts, 0, sizeof(mbox_sts));
2135
2136 mbox_cmd[0] = MBOX_CMD_GET_SYS_INFO;
2137 mbox_cmd[1] = LSDW(sys_info_dma);
2138 mbox_cmd[2] = MSDW(sys_info_dma);
2139 mbox_cmd[4] = sizeof(*sys_info);
2140
2141 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 6, &mbox_cmd[0],
2142 &mbox_sts[0]) != QLA_SUCCESS) {
2143 DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO failed\n",
2144 ha->host_no, __func__));
2145 goto exit_validate_mac82;
2146 }
2147
2148 if (mbox_sts[4] < sizeof(*sys_info)) {
2149 DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO data receive"
2150 " error (%x)\n", ha->host_no, __func__, mbox_sts[4]));
2151 goto exit_validate_mac82;
2152
2153 }
2154
2155 /* Save M.A.C. address & serial_number */
2156 memcpy(ha->my_mac, &sys_info->mac_addr[0],
2157 min(sizeof(ha->my_mac), sizeof(sys_info->mac_addr)));
2158 memcpy(ha->serial_number, &sys_info->serial_number,
2159 min(sizeof(ha->serial_number), sizeof(sys_info->serial_number)));
2160
2161 DEBUG2(printk("scsi%ld: %s: "
2162 "mac %02x:%02x:%02x:%02x:%02x:%02x "
2163 "serial %s\n", ha->host_no, __func__,
2164 ha->my_mac[0], ha->my_mac[1], ha->my_mac[2],
2165 ha->my_mac[3], ha->my_mac[4], ha->my_mac[5],
2166 ha->serial_number));
2167
2168 status = QLA_SUCCESS;
2169
2170exit_validate_mac82:
2171 dma_free_coherent(&ha->pdev->dev, sizeof(*sys_info), sys_info,
2172 sys_info_dma);
2173 return status;
2174}
2175
2176/* Interrupt handling helpers. */
2177
2178static int
2179qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha)
2180{
2181 uint32_t mbox_cmd[MBOX_REG_COUNT];
2182 uint32_t mbox_sts[MBOX_REG_COUNT];
2183
2184 DEBUG2(ql4_printk(KERN_INFO, ha, "%s\n", __func__));
2185
2186 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
2187 memset(&mbox_sts, 0, sizeof(mbox_sts));
2188 mbox_cmd[0] = MBOX_CMD_ENABLE_INTRS;
2189 mbox_cmd[1] = INTR_ENABLE;
2190 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
2191 &mbox_sts[0]) != QLA_SUCCESS) {
2192 DEBUG2(ql4_printk(KERN_INFO, ha,
2193 "%s: MBOX_CMD_ENABLE_INTRS failed (0x%04x)\n",
2194 __func__, mbox_sts[0]));
2195 return QLA_ERROR;
2196 }
2197 return QLA_SUCCESS;
2198}
2199
2200static int
2201qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha)
2202{
2203 uint32_t mbox_cmd[MBOX_REG_COUNT];
2204 uint32_t mbox_sts[MBOX_REG_COUNT];
2205
2206 DEBUG2(ql4_printk(KERN_INFO, ha, "%s\n", __func__));
2207
2208 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
2209 memset(&mbox_sts, 0, sizeof(mbox_sts));
2210 mbox_cmd[0] = MBOX_CMD_ENABLE_INTRS;
2211 mbox_cmd[1] = INTR_DISABLE;
2212 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
2213 &mbox_sts[0]) != QLA_SUCCESS) {
2214 DEBUG2(ql4_printk(KERN_INFO, ha,
2215 "%s: MBOX_CMD_ENABLE_INTRS failed (0x%04x)\n",
2216 __func__, mbox_sts[0]));
2217 return QLA_ERROR;
2218 }
2219
2220 return QLA_SUCCESS;
2221}
2222
2223void
2224qla4_8xxx_enable_intrs(struct scsi_qla_host *ha)
2225{
2226 qla4_8xxx_mbx_intr_enable(ha);
2227
2228 spin_lock_irq(&ha->hardware_lock);
2229 /* BIT 10 - reset */
2230 qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2231 spin_unlock_irq(&ha->hardware_lock);
2232 set_bit(AF_INTERRUPTS_ON, &ha->flags);
2233}
2234
2235void
2236qla4_8xxx_disable_intrs(struct scsi_qla_host *ha)
2237{
2238 if (test_bit(AF_INTERRUPTS_ON, &ha->flags))
2239 qla4_8xxx_mbx_intr_disable(ha);
2240
2241 spin_lock_irq(&ha->hardware_lock);
2242 /* BIT 10 - set */
2243 qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
2244 spin_unlock_irq(&ha->hardware_lock);
2245 clear_bit(AF_INTERRUPTS_ON, &ha->flags);
2246}
2247
2248struct ql4_init_msix_entry {
2249 uint16_t entry;
2250 uint16_t index;
2251 const char *name;
2252 irq_handler_t handler;
2253};
2254
2255static struct ql4_init_msix_entry qla4_8xxx_msix_entries[QLA_MSIX_ENTRIES] = {
2256 { QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT,
2257 "qla4xxx (default)",
2258 (irq_handler_t)qla4_8xxx_default_intr_handler },
2259 { QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q,
2260 "qla4xxx (rsp_q)", (irq_handler_t)qla4_8xxx_msix_rsp_q },
2261};
2262
2263void
2264qla4_8xxx_disable_msix(struct scsi_qla_host *ha)
2265{
2266 int i;
2267 struct ql4_msix_entry *qentry;
2268
2269 for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
2270 qentry = &ha->msix_entries[qla4_8xxx_msix_entries[i].index];
2271 if (qentry->have_irq) {
2272 free_irq(qentry->msix_vector, ha);
2273 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %s\n",
2274 __func__, qla4_8xxx_msix_entries[i].name));
2275 }
2276 }
2277 pci_disable_msix(ha->pdev);
2278 clear_bit(AF_MSIX_ENABLED, &ha->flags);
2279}
2280
2281int
2282qla4_8xxx_enable_msix(struct scsi_qla_host *ha)
2283{
2284 int i, ret;
2285 struct msix_entry entries[QLA_MSIX_ENTRIES];
2286 struct ql4_msix_entry *qentry;
2287
2288 for (i = 0; i < QLA_MSIX_ENTRIES; i++)
2289 entries[i].entry = qla4_8xxx_msix_entries[i].entry;
2290
2291 ret = pci_enable_msix(ha->pdev, entries, ARRAY_SIZE(entries));
2292 if (ret) {
2293 ql4_printk(KERN_WARNING, ha,
2294 "MSI-X: Failed to enable support -- %d/%d\n",
2295 QLA_MSIX_ENTRIES, ret);
2296 goto msix_out;
2297 }
2298 set_bit(AF_MSIX_ENABLED, &ha->flags);
2299
2300 for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
2301 qentry = &ha->msix_entries[qla4_8xxx_msix_entries[i].index];
2302 qentry->msix_vector = entries[i].vector;
2303 qentry->msix_entry = entries[i].entry;
2304 qentry->have_irq = 0;
2305 ret = request_irq(qentry->msix_vector,
2306 qla4_8xxx_msix_entries[i].handler, 0,
2307 qla4_8xxx_msix_entries[i].name, ha);
2308 if (ret) {
2309 ql4_printk(KERN_WARNING, ha,
2310 "MSI-X: Unable to register handler -- %x/%d.\n",
2311 qla4_8xxx_msix_entries[i].index, ret);
2312 qla4_8xxx_disable_msix(ha);
2313 goto msix_out;
2314 }
2315 qentry->have_irq = 1;
2316 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %s\n",
2317 __func__, qla4_8xxx_msix_entries[i].name));
2318 }
2319msix_out:
2320 return ret;
2321}
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
new file mode 100644
index 000000000000..931ad3f1e918
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -0,0 +1,779 @@
1/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#ifndef __QLA_NX_H
8#define __QLA_NX_H
9
10/*
11 * Following are the states of the Phantom. Phantom will set them and
12 * Host will read to check if the fields are correct.
13*/
14#define PHAN_INITIALIZE_FAILED 0xffff
15#define PHAN_INITIALIZE_COMPLETE 0xff01
16
17/* Host writes the following to notify that it has done the init-handshake */
18#define PHAN_INITIALIZE_ACK 0xf00f
19#define PHAN_PEG_RCV_INITIALIZED 0xff01
20
21/*CRB_RELATED*/
22#define QLA82XX_CRB_BASE QLA82XX_CAM_RAM(0x200)
23#define QLA82XX_REG(X) (QLA82XX_CRB_BASE+(X))
24
25#define CRB_CMDPEG_STATE QLA82XX_REG(0x50)
26#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
27#define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54)
28#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
29
30#define QLA82XX_HW_H0_CH_HUB_ADR 0x05
31#define QLA82XX_HW_H1_CH_HUB_ADR 0x0E
32#define QLA82XX_HW_H2_CH_HUB_ADR 0x03
33#define QLA82XX_HW_H3_CH_HUB_ADR 0x01
34#define QLA82XX_HW_H4_CH_HUB_ADR 0x06
35#define QLA82XX_HW_H5_CH_HUB_ADR 0x07
36#define QLA82XX_HW_H6_CH_HUB_ADR 0x08
37
38/* Hub 0 */
39#define QLA82XX_HW_MN_CRB_AGT_ADR 0x15
40#define QLA82XX_HW_MS_CRB_AGT_ADR 0x25
41
42/* Hub 1 */
43#define QLA82XX_HW_PS_CRB_AGT_ADR 0x73
44#define QLA82XX_HW_QMS_CRB_AGT_ADR 0x00
45#define QLA82XX_HW_RPMX3_CRB_AGT_ADR 0x0b
46#define QLA82XX_HW_SQGS0_CRB_AGT_ADR 0x01
47#define QLA82XX_HW_SQGS1_CRB_AGT_ADR 0x02
48#define QLA82XX_HW_SQGS2_CRB_AGT_ADR 0x03
49#define QLA82XX_HW_SQGS3_CRB_AGT_ADR 0x04
50#define QLA82XX_HW_C2C0_CRB_AGT_ADR 0x58
51#define QLA82XX_HW_C2C1_CRB_AGT_ADR 0x59
52#define QLA82XX_HW_C2C2_CRB_AGT_ADR 0x5a
53#define QLA82XX_HW_RPMX2_CRB_AGT_ADR 0x0a
54#define QLA82XX_HW_RPMX4_CRB_AGT_ADR 0x0c
55#define QLA82XX_HW_RPMX7_CRB_AGT_ADR 0x0f
56#define QLA82XX_HW_RPMX9_CRB_AGT_ADR 0x12
57#define QLA82XX_HW_SMB_CRB_AGT_ADR 0x18
58
59/* Hub 2 */
60#define QLA82XX_HW_NIU_CRB_AGT_ADR 0x31
61#define QLA82XX_HW_I2C0_CRB_AGT_ADR 0x19
62#define QLA82XX_HW_I2C1_CRB_AGT_ADR 0x29
63
64#define QLA82XX_HW_SN_CRB_AGT_ADR 0x10
65#define QLA82XX_HW_I2Q_CRB_AGT_ADR 0x20
66#define QLA82XX_HW_LPC_CRB_AGT_ADR 0x22
67#define QLA82XX_HW_ROMUSB_CRB_AGT_ADR 0x21
68#define QLA82XX_HW_QM_CRB_AGT_ADR 0x66
69#define QLA82XX_HW_SQG0_CRB_AGT_ADR 0x60
70#define QLA82XX_HW_SQG1_CRB_AGT_ADR 0x61
71#define QLA82XX_HW_SQG2_CRB_AGT_ADR 0x62
72#define QLA82XX_HW_SQG3_CRB_AGT_ADR 0x63
73#define QLA82XX_HW_RPMX1_CRB_AGT_ADR 0x09
74#define QLA82XX_HW_RPMX5_CRB_AGT_ADR 0x0d
75#define QLA82XX_HW_RPMX6_CRB_AGT_ADR 0x0e
76#define QLA82XX_HW_RPMX8_CRB_AGT_ADR 0x11
77
78/* Hub 3 */
79#define QLA82XX_HW_PH_CRB_AGT_ADR 0x1A
80#define QLA82XX_HW_SRE_CRB_AGT_ADR 0x50
81#define QLA82XX_HW_EG_CRB_AGT_ADR 0x51
82#define QLA82XX_HW_RPMX0_CRB_AGT_ADR 0x08
83
84/* Hub 4 */
85#define QLA82XX_HW_PEGN0_CRB_AGT_ADR 0x40
86#define QLA82XX_HW_PEGN1_CRB_AGT_ADR 0x41
87#define QLA82XX_HW_PEGN2_CRB_AGT_ADR 0x42
88#define QLA82XX_HW_PEGN3_CRB_AGT_ADR 0x43
89#define QLA82XX_HW_PEGNI_CRB_AGT_ADR 0x44
90#define QLA82XX_HW_PEGND_CRB_AGT_ADR 0x45
91#define QLA82XX_HW_PEGNC_CRB_AGT_ADR 0x46
92#define QLA82XX_HW_PEGR0_CRB_AGT_ADR 0x47
93#define QLA82XX_HW_PEGR1_CRB_AGT_ADR 0x48
94#define QLA82XX_HW_PEGR2_CRB_AGT_ADR 0x49
95#define QLA82XX_HW_PEGR3_CRB_AGT_ADR 0x4a
96#define QLA82XX_HW_PEGN4_CRB_AGT_ADR 0x4b
97
98/* Hub 5 */
99#define QLA82XX_HW_PEGS0_CRB_AGT_ADR 0x40
100#define QLA82XX_HW_PEGS1_CRB_AGT_ADR 0x41
101#define QLA82XX_HW_PEGS2_CRB_AGT_ADR 0x42
102#define QLA82XX_HW_PEGS3_CRB_AGT_ADR 0x43
103
104#define QLA82XX_HW_PEGSI_CRB_AGT_ADR 0x44
105#define QLA82XX_HW_PEGSD_CRB_AGT_ADR 0x45
106#define QLA82XX_HW_PEGSC_CRB_AGT_ADR 0x46
107
108/* Hub 6 */
109#define QLA82XX_HW_CAS0_CRB_AGT_ADR 0x46
110#define QLA82XX_HW_CAS1_CRB_AGT_ADR 0x47
111#define QLA82XX_HW_CAS2_CRB_AGT_ADR 0x48
112#define QLA82XX_HW_CAS3_CRB_AGT_ADR 0x49
113#define QLA82XX_HW_NCM_CRB_AGT_ADR 0x16
114#define QLA82XX_HW_TMR_CRB_AGT_ADR 0x17
115#define QLA82XX_HW_XDMA_CRB_AGT_ADR 0x05
116#define QLA82XX_HW_OCM0_CRB_AGT_ADR 0x06
117#define QLA82XX_HW_OCM1_CRB_AGT_ADR 0x07
118
119/* This field defines PCI/X adr [25:20] of agents on the CRB */
120/* */
121#define QLA82XX_HW_PX_MAP_CRB_PH 0
122#define QLA82XX_HW_PX_MAP_CRB_PS 1
123#define QLA82XX_HW_PX_MAP_CRB_MN 2
124#define QLA82XX_HW_PX_MAP_CRB_MS 3
125#define QLA82XX_HW_PX_MAP_CRB_SRE 5
126#define QLA82XX_HW_PX_MAP_CRB_NIU 6
127#define QLA82XX_HW_PX_MAP_CRB_QMN 7
128#define QLA82XX_HW_PX_MAP_CRB_SQN0 8
129#define QLA82XX_HW_PX_MAP_CRB_SQN1 9
130#define QLA82XX_HW_PX_MAP_CRB_SQN2 10
131#define QLA82XX_HW_PX_MAP_CRB_SQN3 11
132#define QLA82XX_HW_PX_MAP_CRB_QMS 12
133#define QLA82XX_HW_PX_MAP_CRB_SQS0 13
134#define QLA82XX_HW_PX_MAP_CRB_SQS1 14
135#define QLA82XX_HW_PX_MAP_CRB_SQS2 15
136#define QLA82XX_HW_PX_MAP_CRB_SQS3 16
137#define QLA82XX_HW_PX_MAP_CRB_PGN0 17
138#define QLA82XX_HW_PX_MAP_CRB_PGN1 18
139#define QLA82XX_HW_PX_MAP_CRB_PGN2 19
140#define QLA82XX_HW_PX_MAP_CRB_PGN3 20
141#define QLA82XX_HW_PX_MAP_CRB_PGN4 QLA82XX_HW_PX_MAP_CRB_SQS2
142#define QLA82XX_HW_PX_MAP_CRB_PGND 21
143#define QLA82XX_HW_PX_MAP_CRB_PGNI 22
144#define QLA82XX_HW_PX_MAP_CRB_PGS0 23
145#define QLA82XX_HW_PX_MAP_CRB_PGS1 24
146#define QLA82XX_HW_PX_MAP_CRB_PGS2 25
147#define QLA82XX_HW_PX_MAP_CRB_PGS3 26
148#define QLA82XX_HW_PX_MAP_CRB_PGSD 27
149#define QLA82XX_HW_PX_MAP_CRB_PGSI 28
150#define QLA82XX_HW_PX_MAP_CRB_SN 29
151#define QLA82XX_HW_PX_MAP_CRB_EG 31
152#define QLA82XX_HW_PX_MAP_CRB_PH2 32
153#define QLA82XX_HW_PX_MAP_CRB_PS2 33
154#define QLA82XX_HW_PX_MAP_CRB_CAM 34
155#define QLA82XX_HW_PX_MAP_CRB_CAS0 35
156#define QLA82XX_HW_PX_MAP_CRB_CAS1 36
157#define QLA82XX_HW_PX_MAP_CRB_CAS2 37
158#define QLA82XX_HW_PX_MAP_CRB_C2C0 38
159#define QLA82XX_HW_PX_MAP_CRB_C2C1 39
160#define QLA82XX_HW_PX_MAP_CRB_TIMR 40
161#define QLA82XX_HW_PX_MAP_CRB_RPMX1 42
162#define QLA82XX_HW_PX_MAP_CRB_RPMX2 43
163#define QLA82XX_HW_PX_MAP_CRB_RPMX3 44
164#define QLA82XX_HW_PX_MAP_CRB_RPMX4 45
165#define QLA82XX_HW_PX_MAP_CRB_RPMX5 46
166#define QLA82XX_HW_PX_MAP_CRB_RPMX6 47
167#define QLA82XX_HW_PX_MAP_CRB_RPMX7 48
168#define QLA82XX_HW_PX_MAP_CRB_XDMA 49
169#define QLA82XX_HW_PX_MAP_CRB_I2Q 50
170#define QLA82XX_HW_PX_MAP_CRB_ROMUSB 51
171#define QLA82XX_HW_PX_MAP_CRB_CAS3 52
172#define QLA82XX_HW_PX_MAP_CRB_RPMX0 53
173#define QLA82XX_HW_PX_MAP_CRB_RPMX8 54
174#define QLA82XX_HW_PX_MAP_CRB_RPMX9 55
175#define QLA82XX_HW_PX_MAP_CRB_OCM0 56
176#define QLA82XX_HW_PX_MAP_CRB_OCM1 57
177#define QLA82XX_HW_PX_MAP_CRB_SMB 58
178#define QLA82XX_HW_PX_MAP_CRB_I2C0 59
179#define QLA82XX_HW_PX_MAP_CRB_I2C1 60
180#define QLA82XX_HW_PX_MAP_CRB_LPC 61
181#define QLA82XX_HW_PX_MAP_CRB_PGNC 62
182#define QLA82XX_HW_PX_MAP_CRB_PGR0 63
183#define QLA82XX_HW_PX_MAP_CRB_PGR1 4
184#define QLA82XX_HW_PX_MAP_CRB_PGR2 30
185#define QLA82XX_HW_PX_MAP_CRB_PGR3 41
186
187/* This field defines CRB adr [31:20] of the agents */
188/* */
189
190#define QLA82XX_HW_CRB_HUB_AGT_ADR_MN ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \
191 QLA82XX_HW_MN_CRB_AGT_ADR)
192#define QLA82XX_HW_CRB_HUB_AGT_ADR_PH ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \
193 QLA82XX_HW_PH_CRB_AGT_ADR)
194#define QLA82XX_HW_CRB_HUB_AGT_ADR_MS ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \
195 QLA82XX_HW_MS_CRB_AGT_ADR)
196#define QLA82XX_HW_CRB_HUB_AGT_ADR_PS ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
197 QLA82XX_HW_PS_CRB_AGT_ADR)
198#define QLA82XX_HW_CRB_HUB_AGT_ADR_SS ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
199 QLA82XX_HW_SS_CRB_AGT_ADR)
200#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
201 QLA82XX_HW_RPMX3_CRB_AGT_ADR)
202#define QLA82XX_HW_CRB_HUB_AGT_ADR_QMS ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
203 QLA82XX_HW_QMS_CRB_AGT_ADR)
204#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS0 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
205 QLA82XX_HW_SQGS0_CRB_AGT_ADR)
206#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS1 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
207 QLA82XX_HW_SQGS1_CRB_AGT_ADR)
208#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS2 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
209 QLA82XX_HW_SQGS2_CRB_AGT_ADR)
210#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS3 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
211 QLA82XX_HW_SQGS3_CRB_AGT_ADR)
212#define QLA82XX_HW_CRB_HUB_AGT_ADR_C2C0 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
213 QLA82XX_HW_C2C0_CRB_AGT_ADR)
214#define QLA82XX_HW_CRB_HUB_AGT_ADR_C2C1 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
215 QLA82XX_HW_C2C1_CRB_AGT_ADR)
216#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
217 QLA82XX_HW_RPMX2_CRB_AGT_ADR)
218#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
219 QLA82XX_HW_RPMX4_CRB_AGT_ADR)
220#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
221 QLA82XX_HW_RPMX7_CRB_AGT_ADR)
222#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
223 QLA82XX_HW_RPMX9_CRB_AGT_ADR)
224#define QLA82XX_HW_CRB_HUB_AGT_ADR_SMB ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
225 QLA82XX_HW_SMB_CRB_AGT_ADR)
226
227#define QLA82XX_HW_CRB_HUB_AGT_ADR_NIU ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \
228 QLA82XX_HW_NIU_CRB_AGT_ADR)
229#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0 ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \
230 QLA82XX_HW_I2C0_CRB_AGT_ADR)
231#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1 ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \
232 QLA82XX_HW_I2C1_CRB_AGT_ADR)
233
234#define QLA82XX_HW_CRB_HUB_AGT_ADR_SRE ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
235 QLA82XX_HW_SRE_CRB_AGT_ADR)
236#define QLA82XX_HW_CRB_HUB_AGT_ADR_EG ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
237 QLA82XX_HW_EG_CRB_AGT_ADR)
238#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
239 QLA82XX_HW_RPMX0_CRB_AGT_ADR)
240#define QLA82XX_HW_CRB_HUB_AGT_ADR_QMN ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
241 QLA82XX_HW_QM_CRB_AGT_ADR)
242#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
243 QLA82XX_HW_SQG0_CRB_AGT_ADR)
244#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
245 QLA82XX_HW_SQG1_CRB_AGT_ADR)
246#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
247 QLA82XX_HW_SQG2_CRB_AGT_ADR)
248#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
249 QLA82XX_HW_SQG3_CRB_AGT_ADR)
250#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
251 QLA82XX_HW_RPMX1_CRB_AGT_ADR)
252#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
253 QLA82XX_HW_RPMX5_CRB_AGT_ADR)
254#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
255 QLA82XX_HW_RPMX6_CRB_AGT_ADR)
256#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
257 QLA82XX_HW_RPMX8_CRB_AGT_ADR)
258#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS0 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
259 QLA82XX_HW_CAS0_CRB_AGT_ADR)
260#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS1 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
261 QLA82XX_HW_CAS1_CRB_AGT_ADR)
262#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS2 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
263 QLA82XX_HW_CAS2_CRB_AGT_ADR)
264#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS3 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
265 QLA82XX_HW_CAS3_CRB_AGT_ADR)
266
267#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
268 QLA82XX_HW_PEGNI_CRB_AGT_ADR)
269#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGND ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
270 QLA82XX_HW_PEGND_CRB_AGT_ADR)
271#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
272 QLA82XX_HW_PEGN0_CRB_AGT_ADR)
273#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
274 QLA82XX_HW_PEGN1_CRB_AGT_ADR)
275#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
276 QLA82XX_HW_PEGN2_CRB_AGT_ADR)
277#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
278 QLA82XX_HW_PEGN3_CRB_AGT_ADR)
279#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
280 QLA82XX_HW_PEGN4_CRB_AGT_ADR)
281
282#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
283 QLA82XX_HW_PEGNC_CRB_AGT_ADR)
284#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR0 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
285 QLA82XX_HW_PEGR0_CRB_AGT_ADR)
286#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR1 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
287 QLA82XX_HW_PEGR1_CRB_AGT_ADR)
288#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR2 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
289 QLA82XX_HW_PEGR2_CRB_AGT_ADR)
290#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR3 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
291 QLA82XX_HW_PEGR3_CRB_AGT_ADR)
292
293#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
294 QLA82XX_HW_PEGSI_CRB_AGT_ADR)
295#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSD ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
296 QLA82XX_HW_PEGSD_CRB_AGT_ADR)
297#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
298 QLA82XX_HW_PEGS0_CRB_AGT_ADR)
299#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
300 QLA82XX_HW_PEGS1_CRB_AGT_ADR)
301#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
302 QLA82XX_HW_PEGS2_CRB_AGT_ADR)
303#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
304 QLA82XX_HW_PEGS3_CRB_AGT_ADR)
305#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSC ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
306 QLA82XX_HW_PEGSC_CRB_AGT_ADR)
307
308#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAM ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
309 QLA82XX_HW_NCM_CRB_AGT_ADR)
310#define QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
311 QLA82XX_HW_TMR_CRB_AGT_ADR)
312#define QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
313 QLA82XX_HW_XDMA_CRB_AGT_ADR)
314#define QLA82XX_HW_CRB_HUB_AGT_ADR_SN ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
315 QLA82XX_HW_SN_CRB_AGT_ADR)
316#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
317 QLA82XX_HW_I2Q_CRB_AGT_ADR)
318#define QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
319 QLA82XX_HW_ROMUSB_CRB_AGT_ADR)
320#define QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0 ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
321 QLA82XX_HW_OCM0_CRB_AGT_ADR)
322#define QLA82XX_HW_CRB_HUB_AGT_ADR_OCM1 ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
323 QLA82XX_HW_OCM1_CRB_AGT_ADR)
324#define QLA82XX_HW_CRB_HUB_AGT_ADR_LPC ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
325 QLA82XX_HW_LPC_CRB_AGT_ADR)
326
327#define ROMUSB_GLB (QLA82XX_CRB_ROMUSB + 0x00000)
328#define QLA82XX_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c)
329#define QLA82XX_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004)
330#define QLA82XX_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008)
331#define QLA82XX_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008)
332#define QLA82XX_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c)
333#define QLA82XX_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010)
334#define QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
335#define QLA82XX_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
336
337#define ROMUSB_ROM (QLA82XX_CRB_ROMUSB + 0x10000)
338#define QLA82XX_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
339#define QLA82XX_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038)
340
341/* Lock IDs for ROM lock */
342#define ROM_LOCK_DRIVER 0x0d417340
343
344#define QLA82XX_PCI_CRB_WINDOWSIZE 0x00100000 /* all are 1MB windows */
345#define QLA82XX_PCI_CRB_WINDOW(A) (QLA82XX_PCI_CRBSPACE + \
346 (A)*QLA82XX_PCI_CRB_WINDOWSIZE)
347
348#define QLA82XX_CRB_C2C_0 \
349 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C0)
350#define QLA82XX_CRB_C2C_1 \
351 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C1)
352#define QLA82XX_CRB_C2C_2 \
353 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C2)
354#define QLA82XX_CRB_CAM \
355 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAM)
356#define QLA82XX_CRB_CASPER \
357 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS)
358#define QLA82XX_CRB_CASPER_0 \
359 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS0)
360#define QLA82XX_CRB_CASPER_1 \
361 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS1)
362#define QLA82XX_CRB_CASPER_2 \
363 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS2)
364#define QLA82XX_CRB_DDR_MD \
365 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_MS)
366#define QLA82XX_CRB_DDR_NET \
367 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_MN)
368#define QLA82XX_CRB_EPG \
369 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_EG)
370#define QLA82XX_CRB_I2Q \
371 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2Q)
372#define QLA82XX_CRB_NIU \
373 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_NIU)
374/* HACK upon HACK upon HACK (for PCIE builds) */
375#define QLA82XX_CRB_PCIX_HOST \
376 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PH)
377#define QLA82XX_CRB_PCIX_HOST2 \
378 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PH2)
379#define QLA82XX_CRB_PCIX_MD \
380 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PS)
381#define QLA82XX_CRB_PCIE QLA82XX_CRB_PCIX_MD
382/* window 1 pcie slot */
383#define QLA82XX_CRB_PCIE2 \
384 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PS2)
385
386#define QLA82XX_CRB_PEG_MD_0 \
387 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS0)
388#define QLA82XX_CRB_PEG_MD_1 \
389 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS1)
390#define QLA82XX_CRB_PEG_MD_2 \
391 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS2)
392#define QLA82XX_CRB_PEG_MD_3 \
393 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS3)
394#define QLA82XX_CRB_PEG_MD_3 \
395 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS3)
396#define QLA82XX_CRB_PEG_MD_D \
397 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGSD)
398#define QLA82XX_CRB_PEG_MD_I \
399 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGSI)
400#define QLA82XX_CRB_PEG_NET_0 \
401 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN0)
402#define QLA82XX_CRB_PEG_NET_1 \
403 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN1)
404#define QLA82XX_CRB_PEG_NET_2 \
405 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN2)
406#define QLA82XX_CRB_PEG_NET_3 \
407 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN3)
408#define QLA82XX_CRB_PEG_NET_4 \
409 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN4)
410#define QLA82XX_CRB_PEG_NET_D \
411 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGND)
412#define QLA82XX_CRB_PEG_NET_I \
413 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGNI)
414#define QLA82XX_CRB_PQM_MD \
415 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_QMS)
416#define QLA82XX_CRB_PQM_NET \
417 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_QMN)
418#define QLA82XX_CRB_QDR_MD \
419 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SS)
420#define QLA82XX_CRB_QDR_NET \
421 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SN)
422#define QLA82XX_CRB_ROMUSB \
423 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_ROMUSB)
424#define QLA82XX_CRB_RPMX_0 \
425 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX0)
426#define QLA82XX_CRB_RPMX_1 \
427 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX1)
428#define QLA82XX_CRB_RPMX_2 \
429 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX2)
430#define QLA82XX_CRB_RPMX_3 \
431 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX3)
432#define QLA82XX_CRB_RPMX_4 \
433 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX4)
434#define QLA82XX_CRB_RPMX_5 \
435 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX5)
436#define QLA82XX_CRB_RPMX_6 \
437 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX6)
438#define QLA82XX_CRB_RPMX_7 \
439 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX7)
440#define QLA82XX_CRB_SQM_MD_0 \
441 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS0)
442#define QLA82XX_CRB_SQM_MD_1 \
443 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS1)
444#define QLA82XX_CRB_SQM_MD_2 \
445 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS2)
446#define QLA82XX_CRB_SQM_MD_3 \
447 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS3)
448#define QLA82XX_CRB_SQM_NET_0 \
449 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN0)
450#define QLA82XX_CRB_SQM_NET_1 \
451 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN1)
452#define QLA82XX_CRB_SQM_NET_2 \
453 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN2)
454#define QLA82XX_CRB_SQM_NET_3 \
455 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN3)
456#define QLA82XX_CRB_SRE \
457 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SRE)
458#define QLA82XX_CRB_TIMER \
459 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_TIMR)
460#define QLA82XX_CRB_XDMA \
461 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_XDMA)
462#define QLA82XX_CRB_I2C0 \
463 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2C0)
464#define QLA82XX_CRB_I2C1 \
465 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2C1)
466#define QLA82XX_CRB_OCM0 \
467 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_OCM0)
468#define QLA82XX_CRB_SMB \
469 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SMB)
470
471#define QLA82XX_CRB_MAX QLA82XX_PCI_CRB_WINDOW(64)
472
473/*
474 * ====================== BASE ADDRESSES ON-CHIP ======================
475 * Base addresses of major components on-chip.
476 * ====================== BASE ADDRESSES ON-CHIP ======================
477 */
478#define QLA82XX_ADDR_DDR_NET (0x0000000000000000ULL)
479#define QLA82XX_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
480
481/* Imbus address bit used to indicate a host address. This bit is
482 * eliminated by the pcie bar and bar select before presentation
483 * over pcie. */
484/* host memory via IMBUS */
485#define QLA82XX_P2_ADDR_PCIE (0x0000000800000000ULL)
486#define QLA82XX_P3_ADDR_PCIE (0x0000008000000000ULL)
487#define QLA82XX_ADDR_PCIE_MAX (0x0000000FFFFFFFFFULL)
488#define QLA82XX_ADDR_OCM0 (0x0000000200000000ULL)
489#define QLA82XX_ADDR_OCM0_MAX (0x00000002000fffffULL)
490#define QLA82XX_ADDR_OCM1 (0x0000000200400000ULL)
491#define QLA82XX_ADDR_OCM1_MAX (0x00000002004fffffULL)
492#define QLA82XX_ADDR_QDR_NET (0x0000000300000000ULL)
493
494#define QLA82XX_P2_ADDR_QDR_NET_MAX (0x00000003001fffffULL)
495#define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL)
496
497#define QLA82XX_PCI_CRBSPACE (unsigned long)0x06000000
498#define QLA82XX_PCI_DIRECT_CRB (unsigned long)0x04400000
499#define QLA82XX_PCI_CAMQM (unsigned long)0x04800000
500#define QLA82XX_PCI_CAMQM_MAX (unsigned long)0x04ffffff
501#define QLA82XX_PCI_DDR_NET (unsigned long)0x00000000
502#define QLA82XX_PCI_QDR_NET (unsigned long)0x04000000
503#define QLA82XX_PCI_QDR_NET_MAX (unsigned long)0x043fffff
504
505/*
506 * Register offsets for MN
507 */
508#define MIU_CONTROL (0x000)
509#define MIU_TAG (0x004)
510#define MIU_TEST_AGT_CTRL (0x090)
511#define MIU_TEST_AGT_ADDR_LO (0x094)
512#define MIU_TEST_AGT_ADDR_HI (0x098)
513#define MIU_TEST_AGT_WRDATA_LO (0x0a0)
514#define MIU_TEST_AGT_WRDATA_HI (0x0a4)
515#define MIU_TEST_AGT_WRDATA(i) (0x0a0+(4*(i)))
516#define MIU_TEST_AGT_RDDATA_LO (0x0a8)
517#define MIU_TEST_AGT_RDDATA_HI (0x0ac)
518#define MIU_TEST_AGT_RDDATA(i) (0x0a8+(4*(i)))
519#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
520#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
521
522/* MIU_TEST_AGT_CTRL flags. work for SIU as well */
523#define MIU_TA_CTL_START 1
524#define MIU_TA_CTL_ENABLE 2
525#define MIU_TA_CTL_WRITE 4
526#define MIU_TA_CTL_BUSY 8
527
528/*CAM RAM */
529# define QLA82XX_CAM_RAM_BASE (QLA82XX_CRB_CAM + 0x02000)
530# define QLA82XX_CAM_RAM(reg) (QLA82XX_CAM_RAM_BASE + (reg))
531
532#define QLA82XX_PEG_TUNE_MN_SPD_ZEROED 0x80000000
533#define QLA82XX_BOOT_LOADER_MN_ISSUE 0xff00ffff
534#define QLA82XX_PORT_MODE_ADDR (QLA82XX_CAM_RAM(0x24))
535#define QLA82XX_PEG_HALT_STATUS1 (QLA82XX_CAM_RAM(0xa8))
536#define QLA82XX_PEG_HALT_STATUS2 (QLA82XX_CAM_RAM(0xac))
537#define QLA82XX_PEG_ALIVE_COUNTER (QLA82XX_CAM_RAM(0xb0))
538
539#define HALT_STATUS_UNRECOVERABLE 0x80000000
540#define HALT_STATUS_RECOVERABLE 0x40000000
541
542
543#define QLA82XX_ROM_LOCK_ID (QLA82XX_CAM_RAM(0x100))
544#define QLA82XX_CRB_WIN_LOCK_ID (QLA82XX_CAM_RAM(0x124))
545#define QLA82XX_FW_VERSION_MAJOR (QLA82XX_CAM_RAM(0x150))
546#define QLA82XX_FW_VERSION_MINOR (QLA82XX_CAM_RAM(0x154))
547#define QLA82XX_FW_VERSION_SUB (QLA82XX_CAM_RAM(0x158))
548#define QLA82XX_PCIE_REG(reg) (QLA82XX_CRB_PCIE + (reg))
549
550/* Driver Coexistence Defines */
551#define QLA82XX_CRB_DRV_ACTIVE (QLA82XX_CAM_RAM(0x138))
552#define QLA82XX_CRB_DEV_STATE (QLA82XX_CAM_RAM(0x140))
553#define QLA82XX_CRB_DEV_PART_INFO (QLA82XX_CAM_RAM(0x14c))
554#define QLA82XX_CRB_DRV_IDC_VERSION (QLA82XX_CAM_RAM(0x174))
555#define QLA82XX_CRB_DRV_STATE (QLA82XX_CAM_RAM(0x144))
556#define QLA82XX_CRB_DRV_SCRATCH (QLA82XX_CAM_RAM(0x148))
557#define QLA82XX_CRB_DEV_PART_INFO (QLA82XX_CAM_RAM(0x14c))
558
559/* Every driver should use these Device State */
560#define QLA82XX_DEV_COLD 1
561#define QLA82XX_DEV_INITIALIZING 2
562#define QLA82XX_DEV_READY 3
563#define QLA82XX_DEV_NEED_RESET 4
564#define QLA82XX_DEV_NEED_QUIESCENT 5
565#define QLA82XX_DEV_FAILED 6
566#define QLA82XX_DEV_QUIESCENT 7
567#define MAX_STATES 8 /* Increment if new state added */
568
569#define QLA82XX_IDC_VERSION 0x1
570#define ROM_DEV_INIT_TIMEOUT 30
571#define ROM_DRV_RESET_ACK_TIMEOUT 10
572
573#define PCIE_SETUP_FUNCTION (0x12040)
574#define PCIE_SETUP_FUNCTION2 (0x12048)
575
576#define QLA82XX_PCIX_PS_REG(reg) (QLA82XX_CRB_PCIX_MD + (reg))
577#define QLA82XX_PCIX_PS2_REG(reg) (QLA82XX_CRB_PCIE2 + (reg))
578
579#define PCIE_SEM2_LOCK (0x1c010) /* Flash lock */
580#define PCIE_SEM2_UNLOCK (0x1c014) /* Flash unlock */
581#define PCIE_SEM5_LOCK (0x1c028) /* Coexistence lock */
582#define PCIE_SEM5_UNLOCK (0x1c02c) /* Coexistence unlock */
583#define PCIE_SEM7_LOCK (0x1c038) /* crb win lock */
584#define PCIE_SEM7_UNLOCK (0x1c03c) /* crbwin unlock*/
585
586/*
587 * The PCI VendorID and DeviceID for our board.
588 */
589#define QLA82XX_MSIX_TBL_SPACE 8192
590#define QLA82XX_PCI_REG_MSIX_TBL 0x44
591#define QLA82XX_PCI_MSIX_CONTROL 0x40
592
593struct crb_128M_2M_sub_block_map {
594 unsigned valid;
595 unsigned start_128M;
596 unsigned end_128M;
597 unsigned start_2M;
598};
599
600struct crb_128M_2M_block_map {
601 struct crb_128M_2M_sub_block_map sub_block[16];
602};
603
604struct crb_addr_pair {
605 long addr;
606 long data;
607};
608
609#define ADDR_ERROR ((unsigned long) 0xffffffff)
610#define MAX_CTL_CHECK 1000
611
612/***************************************************************************
613 * PCI related defines.
614 **************************************************************************/
615
616/*
617 * Interrupt related defines.
618 */
619#define PCIX_TARGET_STATUS (0x10118)
620#define PCIX_TARGET_STATUS_F1 (0x10160)
621#define PCIX_TARGET_STATUS_F2 (0x10164)
622#define PCIX_TARGET_STATUS_F3 (0x10168)
623#define PCIX_TARGET_STATUS_F4 (0x10360)
624#define PCIX_TARGET_STATUS_F5 (0x10364)
625#define PCIX_TARGET_STATUS_F6 (0x10368)
626#define PCIX_TARGET_STATUS_F7 (0x1036c)
627
628#define PCIX_TARGET_MASK (0x10128)
629#define PCIX_TARGET_MASK_F1 (0x10170)
630#define PCIX_TARGET_MASK_F2 (0x10174)
631#define PCIX_TARGET_MASK_F3 (0x10178)
632#define PCIX_TARGET_MASK_F4 (0x10370)
633#define PCIX_TARGET_MASK_F5 (0x10374)
634#define PCIX_TARGET_MASK_F6 (0x10378)
635#define PCIX_TARGET_MASK_F7 (0x1037c)
636
637/*
638 * Message Signaled Interrupts
639 */
640#define PCIX_MSI_F0 (0x13000)
641#define PCIX_MSI_F1 (0x13004)
642#define PCIX_MSI_F2 (0x13008)
643#define PCIX_MSI_F3 (0x1300c)
644#define PCIX_MSI_F4 (0x13010)
645#define PCIX_MSI_F5 (0x13014)
646#define PCIX_MSI_F6 (0x13018)
647#define PCIX_MSI_F7 (0x1301c)
648#define PCIX_MSI_F(FUNC) (0x13000 + ((FUNC) * 4))
649
650/*
651 *
652 */
653#define PCIX_INT_VECTOR (0x10100)
654#define PCIX_INT_MASK (0x10104)
655
656/*
657 * Interrupt state machine and other bits.
658 */
659#define PCIE_MISCCFG_RC (0x1206c)
660
661
662#define ISR_INT_TARGET_STATUS \
663 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS))
664#define ISR_INT_TARGET_STATUS_F1 \
665 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
666#define ISR_INT_TARGET_STATUS_F2 \
667 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
668#define ISR_INT_TARGET_STATUS_F3 \
669 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
670#define ISR_INT_TARGET_STATUS_F4 \
671 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
672#define ISR_INT_TARGET_STATUS_F5 \
673 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
674#define ISR_INT_TARGET_STATUS_F6 \
675 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
676#define ISR_INT_TARGET_STATUS_F7 \
677 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
678
679#define ISR_INT_TARGET_MASK \
680 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK))
681#define ISR_INT_TARGET_MASK_F1 \
682 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
683#define ISR_INT_TARGET_MASK_F2 \
684 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
685#define ISR_INT_TARGET_MASK_F3 \
686 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
687#define ISR_INT_TARGET_MASK_F4 \
688 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
689#define ISR_INT_TARGET_MASK_F5 \
690 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
691#define ISR_INT_TARGET_MASK_F6 \
692 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
693#define ISR_INT_TARGET_MASK_F7 \
694 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
695
696#define ISR_INT_VECTOR (QLA82XX_PCIX_PS_REG(PCIX_INT_VECTOR))
697#define ISR_INT_MASK (QLA82XX_PCIX_PS_REG(PCIX_INT_MASK))
698#define ISR_INT_STATE_REG (QLA82XX_PCIX_PS_REG(PCIE_MISCCFG_RC))
699
700#define ISR_MSI_INT_TRIGGER(FUNC) (QLA82XX_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
701
702
703#define ISR_IS_LEGACY_INTR_IDLE(VAL) (((VAL) & 0x300) == 0)
704#define ISR_IS_LEGACY_INTR_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
705
706/*
707 * PCI Interrupt Vector Values.
708 */
709#define PCIX_INT_VECTOR_BIT_F0 0x0080
710#define PCIX_INT_VECTOR_BIT_F1 0x0100
711#define PCIX_INT_VECTOR_BIT_F2 0x0200
712#define PCIX_INT_VECTOR_BIT_F3 0x0400
713#define PCIX_INT_VECTOR_BIT_F4 0x0800
714#define PCIX_INT_VECTOR_BIT_F5 0x1000
715#define PCIX_INT_VECTOR_BIT_F6 0x2000
716#define PCIX_INT_VECTOR_BIT_F7 0x4000
717
718/* struct qla4_8xxx_legacy_intr_set defined in ql4_def.h */
719
720#define QLA82XX_LEGACY_INTR_CONFIG \
721{ \
722 { \
723 .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \
724 .tgt_status_reg = ISR_INT_TARGET_STATUS, \
725 .tgt_mask_reg = ISR_INT_TARGET_MASK, \
726 .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \
727 \
728 { \
729 .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \
730 .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \
731 .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \
732 .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \
733 \
734 { \
735 .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \
736 .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \
737 .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \
738 .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \
739 \
740 { \
741 .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \
742 .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \
743 .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \
744 .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \
745 \
746 { \
747 .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \
748 .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \
749 .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \
750 .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \
751 \
752 { \
753 .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \
754 .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \
755 .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \
756 .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \
757 \
758 { \
759 .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \
760 .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \
761 .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \
762 .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \
763 \
764 { \
765 .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \
766 .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \
767 .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \
768 .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \
769}
770
771/* Magic number to let user know flash is programmed */
772#define QLA82XX_BDINFO_MAGIC 0x12345678
773#define FW_SIZE_OFFSET (0x3e840c)
774
775/* QLA82XX additions */
776#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x0b0)
777#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x0b4)
778
779#endif
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 38b1d38afca5..5529b2a39741 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -30,22 +30,29 @@ static struct kmem_cache *srb_cachep;
30 * Module parameter information and variables 30 * Module parameter information and variables
31 */ 31 */
32int ql4xdiscoverywait = 60; 32int ql4xdiscoverywait = 60;
33module_param(ql4xdiscoverywait, int, S_IRUGO | S_IRUSR); 33module_param(ql4xdiscoverywait, int, S_IRUGO | S_IWUSR);
34MODULE_PARM_DESC(ql4xdiscoverywait, "Discovery wait time"); 34MODULE_PARM_DESC(ql4xdiscoverywait, "Discovery wait time");
35
35int ql4xdontresethba = 0; 36int ql4xdontresethba = 0;
36module_param(ql4xdontresethba, int, S_IRUGO | S_IRUSR); 37module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(ql4xdontresethba, 38MODULE_PARM_DESC(ql4xdontresethba,
38 "Dont reset the HBA when the driver gets 0x8002 AEN " 39 "Don't reset the HBA for driver recovery \n"
39 " default it will reset hba :0" 40 " 0 - It will reset HBA (Default)\n"
40 " set to 1 to avoid resetting HBA"); 41 " 1 - It will NOT reset HBA");
41 42
42int ql4xextended_error_logging = 0; /* 0 = off, 1 = log errors */ 43int ql4xextended_error_logging = 0; /* 0 = off, 1 = log errors */
43module_param(ql4xextended_error_logging, int, S_IRUGO | S_IRUSR); 44module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
44MODULE_PARM_DESC(ql4xextended_error_logging, 45MODULE_PARM_DESC(ql4xextended_error_logging,
45 "Option to enable extended error logging, " 46 "Option to enable extended error logging, "
46 "Default is 0 - no logging, 1 - debug logging"); 47 "Default is 0 - no logging, 1 - debug logging");
47 48
48int ql4_mod_unload = 0; 49int ql4xenablemsix = 1;
50module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
51MODULE_PARM_DESC(ql4xenablemsix,
52 "Set to enable MSI or MSI-X interrupt mechanism.\n"
53 " 0 = enable INTx interrupt mechanism.\n"
54 " 1 = enable MSI-X interrupt mechanism (Default).\n"
55 " 2 = enable MSI interrupt mechanism.");
49 56
50#define QL4_DEF_QDEPTH 32 57#define QL4_DEF_QDEPTH 32
51 58
@@ -83,6 +90,9 @@ static int qla4xxx_slave_configure(struct scsi_device *device);
83static void qla4xxx_slave_destroy(struct scsi_device *sdev); 90static void qla4xxx_slave_destroy(struct scsi_device *sdev);
84static void qla4xxx_scan_start(struct Scsi_Host *shost); 91static void qla4xxx_scan_start(struct Scsi_Host *shost);
85 92
93static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
94 QLA82XX_LEGACY_INTR_CONFIG;
95
86static struct scsi_host_template qla4xxx_driver_template = { 96static struct scsi_host_template qla4xxx_driver_template = {
87 .module = THIS_MODULE, 97 .module = THIS_MODULE,
88 .name = DRIVER_NAME, 98 .name = DRIVER_NAME,
@@ -116,7 +126,8 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
116 .caps = CAP_FW_DB | CAP_SENDTARGETS_OFFLOAD | 126 .caps = CAP_FW_DB | CAP_SENDTARGETS_OFFLOAD |
117 CAP_DATA_PATH_OFFLOAD, 127 CAP_DATA_PATH_OFFLOAD,
118 .param_mask = ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS | 128 .param_mask = ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS |
119 ISCSI_TARGET_NAME | ISCSI_TPGT, 129 ISCSI_TARGET_NAME | ISCSI_TPGT |
130 ISCSI_TARGET_ALIAS,
120 .host_param_mask = ISCSI_HOST_HWADDRESS | 131 .host_param_mask = ISCSI_HOST_HWADDRESS |
121 ISCSI_HOST_IPADDRESS | 132 ISCSI_HOST_IPADDRESS |
122 ISCSI_HOST_INITIATOR_NAME, 133 ISCSI_HOST_INITIATOR_NAME,
@@ -152,15 +163,12 @@ static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session)
152 if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) { 163 if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
153 atomic_set(&ddb_entry->state, DDB_STATE_DEAD); 164 atomic_set(&ddb_entry->state, DDB_STATE_DEAD);
154 165
155 DEBUG2(printk("scsi%ld: %s: index [%d] port down retry count " 166 DEBUG2(printk("scsi%ld: %s: ddb [%d] port down retry count "
156 "of (%d) secs exhausted, marking device DEAD.\n", 167 "of (%d) secs exhausted, marking device DEAD.\n",
157 ha->host_no, __func__, ddb_entry->fw_ddb_index, 168 ha->host_no, __func__, ddb_entry->fw_ddb_index,
158 ha->port_down_retry_count)); 169 ha->port_down_retry_count));
159 170
160 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine - dpc " 171 qla4xxx_wake_dpc(ha);
161 "flags = 0x%lx\n",
162 ha->host_no, __func__, ha->dpc_flags));
163 queue_work(ha->dpc_thread, &ha->dpc_work);
164 } 172 }
165} 173}
166 174
@@ -203,6 +211,10 @@ static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess,
203 case ISCSI_PARAM_TPGT: 211 case ISCSI_PARAM_TPGT:
204 len = sprintf(buf, "%u\n", ddb_entry->tpgt); 212 len = sprintf(buf, "%u\n", ddb_entry->tpgt);
205 break; 213 break;
214 case ISCSI_PARAM_TARGET_ALIAS:
215 len = snprintf(buf, PAGE_SIZE - 1, "%s\n",
216 ddb_entry->iscsi_alias);
217 break;
206 default: 218 default:
207 return -ENOSYS; 219 return -ENOSYS;
208 } 220 }
@@ -362,19 +374,37 @@ static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
362 * @ha: Pointer to host adapter structure. 374 * @ha: Pointer to host adapter structure.
363 * @ddb_entry: Pointer to device database entry 375 * @ddb_entry: Pointer to device database entry
364 * 376 *
365 * This routine marks a device missing and resets the relogin retry count. 377 * This routine marks a device missing and close connection.
366 **/ 378 **/
367void qla4xxx_mark_device_missing(struct scsi_qla_host *ha, 379void qla4xxx_mark_device_missing(struct scsi_qla_host *ha,
368 struct ddb_entry *ddb_entry) 380 struct ddb_entry *ddb_entry)
369{ 381{
370 atomic_set(&ddb_entry->state, DDB_STATE_MISSING); 382 if ((atomic_read(&ddb_entry->state) != DDB_STATE_DEAD)) {
371 DEBUG3(printk("scsi%d:%d:%d: index [%d] marked MISSING\n", 383 atomic_set(&ddb_entry->state, DDB_STATE_MISSING);
372 ha->host_no, ddb_entry->bus, ddb_entry->target, 384 DEBUG2(printk("scsi%ld: ddb [%d] marked MISSING\n",
373 ddb_entry->fw_ddb_index)); 385 ha->host_no, ddb_entry->fw_ddb_index));
386 } else
387 DEBUG2(printk("scsi%ld: ddb [%d] DEAD\n", ha->host_no,
388 ddb_entry->fw_ddb_index))
389
374 iscsi_block_session(ddb_entry->sess); 390 iscsi_block_session(ddb_entry->sess);
375 iscsi_conn_error_event(ddb_entry->conn, ISCSI_ERR_CONN_FAILED); 391 iscsi_conn_error_event(ddb_entry->conn, ISCSI_ERR_CONN_FAILED);
376} 392}
377 393
394/**
395 * qla4xxx_mark_all_devices_missing - mark all devices as missing.
396 * @ha: Pointer to host adapter structure.
397 *
398 * This routine marks a device missing and resets the relogin retry count.
399 **/
400void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
401{
402 struct ddb_entry *ddb_entry, *ddbtemp;
403 list_for_each_entry_safe(ddb_entry, ddbtemp, &ha->ddb_list, list) {
404 qla4xxx_mark_device_missing(ha, ddb_entry);
405 }
406}
407
378static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha, 408static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
379 struct ddb_entry *ddb_entry, 409 struct ddb_entry *ddb_entry,
380 struct scsi_cmnd *cmd, 410 struct scsi_cmnd *cmd,
@@ -463,7 +493,13 @@ static int qla4xxx_queuecommand(struct scsi_cmnd *cmd,
463 return SCSI_MLQUEUE_TARGET_BUSY; 493 return SCSI_MLQUEUE_TARGET_BUSY;
464 } 494 }
465 495
466 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) 496 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
497 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
498 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
499 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
500 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
501 !test_bit(AF_ONLINE, &ha->flags) ||
502 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
467 goto qc_host_busy; 503 goto qc_host_busy;
468 504
469 spin_unlock_irq(ha->host->host_lock); 505 spin_unlock_irq(ha->host->host_lock);
@@ -524,7 +560,15 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
524 ha->srb_mempool = NULL; 560 ha->srb_mempool = NULL;
525 561
526 /* release io space registers */ 562 /* release io space registers */
527 if (ha->reg) 563 if (is_qla8022(ha)) {
564 if (ha->nx_pcibase)
565 iounmap(
566 (struct device_reg_82xx __iomem *)ha->nx_pcibase);
567
568 if (ha->nx_db_wr_ptr)
569 iounmap(
570 (struct device_reg_82xx __iomem *)ha->nx_db_wr_ptr);
571 } else if (ha->reg)
528 iounmap(ha->reg); 572 iounmap(ha->reg);
529 pci_release_regions(ha->pdev); 573 pci_release_regions(ha->pdev);
530} 574}
@@ -549,8 +593,8 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
549 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len, 593 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
550 &ha->queues_dma, GFP_KERNEL); 594 &ha->queues_dma, GFP_KERNEL);
551 if (ha->queues == NULL) { 595 if (ha->queues == NULL) {
552 dev_warn(&ha->pdev->dev, 596 ql4_printk(KERN_WARNING, ha,
553 "Memory Allocation failed - queues.\n"); 597 "Memory Allocation failed - queues.\n");
554 598
555 goto mem_alloc_error_exit; 599 goto mem_alloc_error_exit;
556 } 600 }
@@ -586,8 +630,8 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
586 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab, 630 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
587 mempool_free_slab, srb_cachep); 631 mempool_free_slab, srb_cachep);
588 if (ha->srb_mempool == NULL) { 632 if (ha->srb_mempool == NULL) {
589 dev_warn(&ha->pdev->dev, 633 ql4_printk(KERN_WARNING, ha,
590 "Memory Allocation failed - SRB Pool.\n"); 634 "Memory Allocation failed - SRB Pool.\n");
591 635
592 goto mem_alloc_error_exit; 636 goto mem_alloc_error_exit;
593 } 637 }
@@ -600,6 +644,74 @@ mem_alloc_error_exit:
600} 644}
601 645
602/** 646/**
647 * qla4_8xxx_check_fw_alive - Check firmware health
648 * @ha: Pointer to host adapter structure.
649 *
650 * Context: Interrupt
651 **/
652static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
653{
654 uint32_t fw_heartbeat_counter, halt_status;
655
656 fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
657
658 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
659 ha->seconds_since_last_heartbeat++;
660 /* FW not alive after 2 seconds */
661 if (ha->seconds_since_last_heartbeat == 2) {
662 ha->seconds_since_last_heartbeat = 0;
663 halt_status = qla4_8xxx_rd_32(ha,
664 QLA82XX_PEG_HALT_STATUS1);
665 /* Since we cannot change dev_state in interrupt
666 * context, set appropriate DPC flag then wakeup
667 * DPC */
668 if (halt_status & HALT_STATUS_UNRECOVERABLE)
669 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
670 else {
671 printk("scsi%ld: %s: detect abort needed!\n",
672 ha->host_no, __func__);
673 set_bit(DPC_RESET_HA, &ha->dpc_flags);
674 }
675 qla4xxx_wake_dpc(ha);
676 }
677 }
678 ha->fw_heartbeat_counter = fw_heartbeat_counter;
679}
680
681/**
682 * qla4_8xxx_watchdog - Poll dev state
683 * @ha: Pointer to host adapter structure.
684 *
685 * Context: Interrupt
686 **/
687void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
688{
689 uint32_t dev_state;
690
691 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
692
693 /* don't poll if reset is going on */
694 if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags)) {
695 if (dev_state == QLA82XX_DEV_NEED_RESET &&
696 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
697 printk("scsi%ld: %s: HW State: NEED RESET!\n",
698 ha->host_no, __func__);
699 set_bit(DPC_RESET_HA, &ha->dpc_flags);
700 qla4xxx_wake_dpc(ha);
701 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
702 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
703 printk("scsi%ld: %s: HW State: NEED QUIES!\n",
704 ha->host_no, __func__);
705 set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
706 qla4xxx_wake_dpc(ha);
707 } else {
708 /* Check firmware health */
709 qla4_8xxx_check_fw_alive(ha);
710 }
711 }
712}
713
714/**
603 * qla4xxx_timer - checks every second for work to do. 715 * qla4xxx_timer - checks every second for work to do.
604 * @ha: Pointer to host adapter structure. 716 * @ha: Pointer to host adapter structure.
605 **/ 717 **/
@@ -608,6 +720,16 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
608 struct ddb_entry *ddb_entry, *dtemp; 720 struct ddb_entry *ddb_entry, *dtemp;
609 int start_dpc = 0; 721 int start_dpc = 0;
610 722
723 if (test_bit(AF_HBA_GOING_AWAY, &ha->flags)) {
724 DEBUG2(ql4_printk(KERN_INFO, ha, "%s exited. HBA GOING AWAY\n",
725 __func__));
726 return;
727 }
728
729 if (is_qla8022(ha)) {
730 qla4_8xxx_watchdog(ha);
731 }
732
611 /* Search for relogin's to time-out and port down retry. */ 733 /* Search for relogin's to time-out and port down retry. */
612 list_for_each_entry_safe(ddb_entry, dtemp, &ha->ddb_list, list) { 734 list_for_each_entry_safe(ddb_entry, dtemp, &ha->ddb_list, list) {
613 /* Count down time between sending relogins */ 735 /* Count down time between sending relogins */
@@ -624,7 +746,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
624 set_bit(DPC_RELOGIN_DEVICE, 746 set_bit(DPC_RELOGIN_DEVICE,
625 &ha->dpc_flags); 747 &ha->dpc_flags);
626 set_bit(DF_RELOGIN, &ddb_entry->flags); 748 set_bit(DF_RELOGIN, &ddb_entry->flags);
627 DEBUG2(printk("scsi%ld: %s: index [%d]" 749 DEBUG2(printk("scsi%ld: %s: ddb [%d]"
628 " login device\n", 750 " login device\n",
629 ha->host_no, __func__, 751 ha->host_no, __func__,
630 ddb_entry->fw_ddb_index)); 752 ddb_entry->fw_ddb_index));
@@ -647,7 +769,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
647 DDB_DS_SESSION_FAILED) { 769 DDB_DS_SESSION_FAILED) {
648 /* Reset retry relogin timer */ 770 /* Reset retry relogin timer */
649 atomic_inc(&ddb_entry->relogin_retry_count); 771 atomic_inc(&ddb_entry->relogin_retry_count);
650 DEBUG2(printk("scsi%ld: index[%d] relogin" 772 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
651 " timed out-retrying" 773 " timed out-retrying"
652 " relogin (%d)\n", 774 " relogin (%d)\n",
653 ha->host_no, 775 ha->host_no,
@@ -656,7 +778,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
656 relogin_retry_count)) 778 relogin_retry_count))
657 ); 779 );
658 start_dpc++; 780 start_dpc++;
659 DEBUG(printk("scsi%ld:%d:%d: index [%d] " 781 DEBUG(printk("scsi%ld:%d:%d: ddb [%d] "
660 "initate relogin after" 782 "initate relogin after"
661 " %d seconds\n", 783 " %d seconds\n",
662 ha->host_no, ddb_entry->bus, 784 ha->host_no, ddb_entry->bus,
@@ -671,31 +793,35 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
671 } 793 }
672 } 794 }
673 795
674 /* Check for heartbeat interval. */ 796 if (!is_qla8022(ha)) {
675 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE && 797 /* Check for heartbeat interval. */
676 ha->heartbeat_interval != 0) { 798 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
677 ha->seconds_since_last_heartbeat++; 799 ha->heartbeat_interval != 0) {
678 if (ha->seconds_since_last_heartbeat > 800 ha->seconds_since_last_heartbeat++;
679 ha->heartbeat_interval + 2) 801 if (ha->seconds_since_last_heartbeat >
680 set_bit(DPC_RESET_HA, &ha->dpc_flags); 802 ha->heartbeat_interval + 2)
803 set_bit(DPC_RESET_HA, &ha->dpc_flags);
804 }
681 } 805 }
682 806
683
684 /* Wakeup the dpc routine for this adapter, if needed. */ 807 /* Wakeup the dpc routine for this adapter, if needed. */
685 if ((start_dpc || 808 if ((start_dpc ||
686 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 809 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
687 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) || 810 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
688 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) || 811 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
689 test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags) || 812 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
690 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 813 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
691 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) || 814 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
692 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) || 815 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
816 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
817 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
693 test_bit(DPC_AEN, &ha->dpc_flags)) && 818 test_bit(DPC_AEN, &ha->dpc_flags)) &&
819 !test_bit(AF_DPC_SCHEDULED, &ha->flags) &&
694 ha->dpc_thread) { 820 ha->dpc_thread) {
695 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine" 821 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
696 " - dpc flags = 0x%lx\n", 822 " - dpc flags = 0x%lx\n",
697 ha->host_no, __func__, ha->dpc_flags)); 823 ha->host_no, __func__, ha->dpc_flags));
698 queue_work(ha->dpc_thread, &ha->dpc_work); 824 qla4xxx_wake_dpc(ha);
699 } 825 }
700 826
701 /* Reschedule timer thread to call us back in one second */ 827 /* Reschedule timer thread to call us back in one second */
@@ -714,16 +840,15 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
714static int qla4xxx_cmd_wait(struct scsi_qla_host *ha) 840static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
715{ 841{
716 uint32_t index = 0; 842 uint32_t index = 0;
717 int stat = QLA_SUCCESS;
718 unsigned long flags; 843 unsigned long flags;
719 struct scsi_cmnd *cmd; 844 struct scsi_cmnd *cmd;
720 int wait_cnt = WAIT_CMD_TOV; /*
721 * Initialized for 30 seconds as we
722 * expect all commands to retuned
723 * ASAP.
724 */
725 845
726 while (wait_cnt) { 846 unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ);
847
848 DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to "
849 "complete\n", WAIT_CMD_TOV));
850
851 while (!time_after_eq(jiffies, wtime)) {
727 spin_lock_irqsave(&ha->hardware_lock, flags); 852 spin_lock_irqsave(&ha->hardware_lock, flags);
728 /* Find a command that hasn't completed. */ 853 /* Find a command that hasn't completed. */
729 for (index = 0; index < ha->host->can_queue; index++) { 854 for (index = 0; index < ha->host->can_queue; index++) {
@@ -734,31 +859,26 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
734 spin_unlock_irqrestore(&ha->hardware_lock, flags); 859 spin_unlock_irqrestore(&ha->hardware_lock, flags);
735 860
736 /* If No Commands are pending, wait is complete */ 861 /* If No Commands are pending, wait is complete */
737 if (index == ha->host->can_queue) { 862 if (index == ha->host->can_queue)
738 break; 863 return QLA_SUCCESS;
739 }
740
741 /* If we timed out on waiting for commands to come back
742 * return ERROR.
743 */
744 wait_cnt--;
745 if (wait_cnt == 0)
746 stat = QLA_ERROR;
747 else {
748 msleep(1000);
749 }
750 } /* End of While (wait_cnt) */
751 864
752 return stat; 865 msleep(1000);
866 }
867 /* If we timed out on waiting for commands to come back
868 * return ERROR. */
869 return QLA_ERROR;
753} 870}
754 871
755void qla4xxx_hw_reset(struct scsi_qla_host *ha) 872int qla4xxx_hw_reset(struct scsi_qla_host *ha)
756{ 873{
757 uint32_t ctrl_status; 874 uint32_t ctrl_status;
758 unsigned long flags = 0; 875 unsigned long flags = 0;
759 876
760 DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__)); 877 DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
761 878
879 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
880 return QLA_ERROR;
881
762 spin_lock_irqsave(&ha->hardware_lock, flags); 882 spin_lock_irqsave(&ha->hardware_lock, flags);
763 883
764 /* 884 /*
@@ -774,6 +894,7 @@ void qla4xxx_hw_reset(struct scsi_qla_host *ha)
774 readl(&ha->reg->ctrl_status); 894 readl(&ha->reg->ctrl_status);
775 895
776 spin_unlock_irqrestore(&ha->hardware_lock, flags); 896 spin_unlock_irqrestore(&ha->hardware_lock, flags);
897 return QLA_SUCCESS;
777} 898}
778 899
779/** 900/**
@@ -872,15 +993,16 @@ int qla4xxx_soft_reset(struct scsi_qla_host *ha)
872} 993}
873 994
874/** 995/**
875 * qla4xxx_flush_active_srbs - returns all outstanding i/o requests to O.S. 996 * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
876 * @ha: Pointer to host adapter structure. 997 * @ha: Pointer to host adapter structure.
998 * @res: returned scsi status
877 * 999 *
878 * This routine is called just prior to a HARD RESET to return all 1000 * This routine is called just prior to a HARD RESET to return all
879 * outstanding commands back to the Operating System. 1001 * outstanding commands back to the Operating System.
880 * Caller should make sure that the following locks are released 1002 * Caller should make sure that the following locks are released
881 * before this calling routine: Hardware lock, and io_request_lock. 1003 * before this calling routine: Hardware lock, and io_request_lock.
882 **/ 1004 **/
883static void qla4xxx_flush_active_srbs(struct scsi_qla_host *ha) 1005static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
884{ 1006{
885 struct srb *srb; 1007 struct srb *srb;
886 int i; 1008 int i;
@@ -890,74 +1012,116 @@ static void qla4xxx_flush_active_srbs(struct scsi_qla_host *ha)
890 for (i = 0; i < ha->host->can_queue; i++) { 1012 for (i = 0; i < ha->host->can_queue; i++) {
891 srb = qla4xxx_del_from_active_array(ha, i); 1013 srb = qla4xxx_del_from_active_array(ha, i);
892 if (srb != NULL) { 1014 if (srb != NULL) {
893 srb->cmd->result = DID_RESET << 16; 1015 srb->cmd->result = res;
894 kref_put(&srb->srb_ref, qla4xxx_srb_compl); 1016 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
895 } 1017 }
896 } 1018 }
897 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1019 spin_unlock_irqrestore(&ha->hardware_lock, flags);
898} 1020}
899 1021
1022void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
1023{
1024 clear_bit(AF_ONLINE, &ha->flags);
1025
1026 /* Disable the board */
1027 ql4_printk(KERN_INFO, ha, "Disabling the board\n");
1028 set_bit(AF_HBA_GOING_AWAY, &ha->flags);
1029
1030 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
1031 qla4xxx_mark_all_devices_missing(ha);
1032 clear_bit(AF_INIT_DONE, &ha->flags);
1033}
1034
900/** 1035/**
901 * qla4xxx_recover_adapter - recovers adapter after a fatal error 1036 * qla4xxx_recover_adapter - recovers adapter after a fatal error
902 * @ha: Pointer to host adapter structure. 1037 * @ha: Pointer to host adapter structure.
903 * @renew_ddb_list: Indicates what to do with the adapter's ddb list
904 *
905 * renew_ddb_list value can be 0=preserve ddb list, 1=destroy and rebuild
906 * ddb list.
907 **/ 1038 **/
908static int qla4xxx_recover_adapter(struct scsi_qla_host *ha, 1039static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
909 uint8_t renew_ddb_list)
910{ 1040{
911 int status; 1041 int status = QLA_ERROR;
1042 uint8_t reset_chip = 0;
912 1043
913 /* Stall incoming I/O until we are done */ 1044 /* Stall incoming I/O until we are done */
1045 scsi_block_requests(ha->host);
914 clear_bit(AF_ONLINE, &ha->flags); 1046 clear_bit(AF_ONLINE, &ha->flags);
915 1047
916 DEBUG2(printk("scsi%ld: %s calling qla4xxx_cmd_wait\n", ha->host_no, 1048 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
917 __func__));
918 1049
919 /* Wait for outstanding commands to complete. 1050 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
920 * Stalls the driver for max 30 secs
921 */
922 status = qla4xxx_cmd_wait(ha);
923 1051
924 qla4xxx_disable_intrs(ha); 1052 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
1053 reset_chip = 1;
925 1054
926 /* Flush any pending ddb changed AENs */ 1055 /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
927 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 1056 * do not reset adapter, jump to initialize_adapter */
1057 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
1058 status = QLA_SUCCESS;
1059 goto recover_ha_init_adapter;
1060 }
928 1061
929 qla4xxx_flush_active_srbs(ha); 1062 /* For the ISP-82xx adapter, issue a stop_firmware if invoked
1063 * from eh_host_reset or ioctl module */
1064 if (is_qla8022(ha) && !reset_chip &&
1065 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
1066
1067 DEBUG2(ql4_printk(KERN_INFO, ha,
1068 "scsi%ld: %s - Performing stop_firmware...\n",
1069 ha->host_no, __func__));
1070 status = ha->isp_ops->reset_firmware(ha);
1071 if (status == QLA_SUCCESS) {
1072 qla4xxx_cmd_wait(ha);
1073 ha->isp_ops->disable_intrs(ha);
1074 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
1075 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
1076 } else {
1077 /* If the stop_firmware fails then
1078 * reset the entire chip */
1079 reset_chip = 1;
1080 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
1081 set_bit(DPC_RESET_HA, &ha->dpc_flags);
1082 }
1083 }
930 1084
931 /* Reset the firmware. If successful, function 1085 /* Issue full chip reset if recovering from a catastrophic error,
932 * returns with ISP interrupts enabled. 1086 * or if stop_firmware fails for ISP-82xx.
933 */ 1087 * This is the default case for ISP-4xxx */
934 DEBUG2(printk("scsi%ld: %s - Performing soft reset..\n", 1088 if (!is_qla8022(ha) || reset_chip) {
935 ha->host_no, __func__)); 1089 qla4xxx_cmd_wait(ha);
936 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) 1090 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
937 status = qla4xxx_soft_reset(ha); 1091 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
938 else 1092 DEBUG2(ql4_printk(KERN_INFO, ha,
939 status = QLA_ERROR; 1093 "scsi%ld: %s - Performing chip reset..\n",
1094 ha->host_no, __func__));
1095 status = ha->isp_ops->reset_chip(ha);
1096 }
940 1097
941 /* Flush any pending ddb changed AENs */ 1098 /* Flush any pending ddb changed AENs */
942 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 1099 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
943 1100
944 /* Re-initialize firmware. If successful, function returns 1101recover_ha_init_adapter:
945 * with ISP interrupts enabled */ 1102 /* Upon successful firmware/chip reset, re-initialize the adapter */
946 if (status == QLA_SUCCESS) { 1103 if (status == QLA_SUCCESS) {
947 DEBUG2(printk("scsi%ld: %s - Initializing adapter..\n", 1104 /* For ISP-4xxx, force function 1 to always initialize
948 ha->host_no, __func__)); 1105 * before function 3 to prevent both funcions from
949 1106 * stepping on top of the other */
950 /* If successful, AF_ONLINE flag set in 1107 if (!is_qla8022(ha) && (ha->mac_index == 3))
951 * qla4xxx_initialize_adapter */ 1108 ssleep(6);
952 status = qla4xxx_initialize_adapter(ha, renew_ddb_list); 1109
1110 /* NOTE: AF_ONLINE flag set upon successful completion of
1111 * qla4xxx_initialize_adapter */
1112 status = qla4xxx_initialize_adapter(ha, PRESERVE_DDB_LIST);
953 } 1113 }
954 1114
955 /* Failed adapter initialization? 1115 /* Retry failed adapter initialization, if necessary
956 * Retry reset_ha only if invoked via DPC (DPC_RESET_HA) */ 1116 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
957 if ((test_bit(AF_ONLINE, &ha->flags) == 0) && 1117 * case to prevent ping-pong resets between functions */
958 (test_bit(DPC_RESET_HA, &ha->dpc_flags))) { 1118 if (!test_bit(AF_ONLINE, &ha->flags) &&
1119 !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
959 /* Adapter initialization failed, see if we can retry 1120 /* Adapter initialization failed, see if we can retry
960 * resetting the ha */ 1121 * resetting the ha.
1122 * Since we don't want to block the DPC for too long
1123 * with multiple resets in the same thread,
1124 * utilize DPC to retry */
961 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) { 1125 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
962 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES; 1126 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
963 DEBUG2(printk("scsi%ld: recover adapter - retrying " 1127 DEBUG2(printk("scsi%ld: recover adapter - retrying "
@@ -982,29 +1146,43 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha,
982 DEBUG2(printk("scsi%ld: recover adapter " 1146 DEBUG2(printk("scsi%ld: recover adapter "
983 "failed - board disabled\n", 1147 "failed - board disabled\n",
984 ha->host_no)); 1148 ha->host_no));
985 qla4xxx_flush_active_srbs(ha); 1149 qla4xxx_dead_adapter_cleanup(ha);
986 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 1150 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
987 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 1151 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
988 clear_bit(DPC_RESET_HA_DESTROY_DDB_LIST, 1152 clear_bit(DPC_RESET_HA_FW_CONTEXT,
989 &ha->dpc_flags); 1153 &ha->dpc_flags);
990 status = QLA_ERROR; 1154 status = QLA_ERROR;
991 } 1155 }
992 } 1156 }
993 } else { 1157 } else {
994 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 1158 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
995 clear_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags); 1159 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
996 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 1160 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
997 } 1161 }
998 1162
999 ha->adapter_error_count++; 1163 ha->adapter_error_count++;
1000 1164
1001 if (status == QLA_SUCCESS) 1165 if (test_bit(AF_ONLINE, &ha->flags))
1002 qla4xxx_enable_intrs(ha); 1166 ha->isp_ops->enable_intrs(ha);
1167
1168 scsi_unblock_requests(ha->host);
1169
1170 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
1171 DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
1172 status == QLA_ERROR ? "FAILED" : "SUCCEDED"));
1003 1173
1004 DEBUG2(printk("scsi%ld: recover adapter .. DONE\n", ha->host_no));
1005 return status; 1174 return status;
1006} 1175}
1007 1176
1177void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
1178{
1179 if (ha->dpc_thread &&
1180 !test_bit(AF_DPC_SCHEDULED, &ha->flags)) {
1181 set_bit(AF_DPC_SCHEDULED, &ha->flags);
1182 queue_work(ha->dpc_thread, &ha->dpc_work);
1183 }
1184}
1185
1008/** 1186/**
1009 * qla4xxx_do_dpc - dpc routine 1187 * qla4xxx_do_dpc - dpc routine
1010 * @data: in our case pointer to adapter structure 1188 * @data: in our case pointer to adapter structure
@@ -1024,21 +1202,47 @@ static void qla4xxx_do_dpc(struct work_struct *work)
1024 int status = QLA_ERROR; 1202 int status = QLA_ERROR;
1025 1203
1026 DEBUG2(printk("scsi%ld: %s: DPC handler waking up." 1204 DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
1027 "flags = 0x%08lx, dpc_flags = 0x%08lx ctrl_stat = 0x%08x\n", 1205 "flags = 0x%08lx, dpc_flags = 0x%08lx\n",
1028 ha->host_no, __func__, ha->flags, ha->dpc_flags, 1206 ha->host_no, __func__, ha->flags, ha->dpc_flags))
1029 readw(&ha->reg->ctrl_status)));
1030 1207
1031 /* Initialization not yet finished. Don't do anything yet. */ 1208 /* Initialization not yet finished. Don't do anything yet. */
1032 if (!test_bit(AF_INIT_DONE, &ha->flags)) 1209 if (!test_bit(AF_INIT_DONE, &ha->flags))
1033 return; 1210 return;
1034 1211
1035 if (adapter_up(ha) || 1212 /* HBA is in the process of being permanently disabled.
1036 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 1213 * Don't process anything */
1214 if (test_bit(AF_HBA_GOING_AWAY, &ha->flags))
1215 return;
1216
1217 if (is_qla8022(ha)) {
1218 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
1219 qla4_8xxx_idc_lock(ha);
1220 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
1221 QLA82XX_DEV_FAILED);
1222 qla4_8xxx_idc_unlock(ha);
1223 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
1224 qla4_8xxx_device_state_handler(ha);
1225 }
1226 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
1227 qla4_8xxx_need_qsnt_handler(ha);
1228 }
1229 }
1230
1231 if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
1232 (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
1037 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 1233 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
1038 test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags)) { 1234 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
1039 if (test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags) || 1235 if (ql4xdontresethba) {
1040 test_bit(DPC_RESET_HA, &ha->dpc_flags)) 1236 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
1041 qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST); 1237 ha->host_no, __func__));
1238 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
1239 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
1240 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
1241 goto dpc_post_reset_ha;
1242 }
1243 if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
1244 test_bit(DPC_RESET_HA, &ha->dpc_flags))
1245 qla4xxx_recover_adapter(ha);
1042 1246
1043 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 1247 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
1044 uint8_t wait_time = RESET_INTR_TOV; 1248 uint8_t wait_time = RESET_INTR_TOV;
@@ -1053,18 +1257,18 @@ static void qla4xxx_do_dpc(struct work_struct *work)
1053 DEBUG2(printk("scsi%ld: %s: SR|FSR " 1257 DEBUG2(printk("scsi%ld: %s: SR|FSR "
1054 "bit not cleared-- resetting\n", 1258 "bit not cleared-- resetting\n",
1055 ha->host_no, __func__)); 1259 ha->host_no, __func__));
1056 qla4xxx_flush_active_srbs(ha); 1260 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
1057 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) { 1261 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
1058 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 1262 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
1059 status = qla4xxx_initialize_adapter(ha, 1263 status = qla4xxx_recover_adapter(ha);
1060 PRESERVE_DDB_LIST);
1061 } 1264 }
1062 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); 1265 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
1063 if (status == QLA_SUCCESS) 1266 if (status == QLA_SUCCESS)
1064 qla4xxx_enable_intrs(ha); 1267 ha->isp_ops->enable_intrs(ha);
1065 } 1268 }
1066 } 1269 }
1067 1270
1271dpc_post_reset_ha:
1068 /* ---- process AEN? --- */ 1272 /* ---- process AEN? --- */
1069 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) 1273 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
1070 qla4xxx_process_aen(ha, PROCESS_ALL_AENS); 1274 qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
@@ -1102,13 +1306,11 @@ static void qla4xxx_do_dpc(struct work_struct *work)
1102 DDB_DS_SESSION_ACTIVE) { 1306 DDB_DS_SESSION_ACTIVE) {
1103 atomic_set(&ddb_entry->state, 1307 atomic_set(&ddb_entry->state,
1104 DDB_STATE_ONLINE); 1308 DDB_STATE_ONLINE);
1105 dev_info(&ha->pdev->dev, 1309 ql4_printk(KERN_INFO, ha,
1106 "scsi%ld: %s: ddb[%d]" 1310 "scsi%ld: %s: ddb[%d]"
1107 " os[%d] marked" 1311 " marked ONLINE\n",
1108 " ONLINE\n",
1109 ha->host_no, __func__, 1312 ha->host_no, __func__,
1110 ddb_entry->fw_ddb_index, 1313 ddb_entry->fw_ddb_index);
1111 ddb_entry->os_target_id);
1112 1314
1113 iscsi_unblock_session( 1315 iscsi_unblock_session(
1114 ddb_entry->sess); 1316 ddb_entry->sess);
@@ -1144,6 +1346,7 @@ static void qla4xxx_do_dpc(struct work_struct *work)
1144 } 1346 }
1145 } 1347 }
1146 } 1348 }
1349 clear_bit(AF_DPC_SCHEDULED, &ha->flags);
1147} 1350}
1148 1351
1149/** 1352/**
@@ -1155,30 +1358,99 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
1155 1358
1156 if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) { 1359 if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
1157 /* Turn-off interrupts on the card. */ 1360 /* Turn-off interrupts on the card. */
1158 qla4xxx_disable_intrs(ha); 1361 ha->isp_ops->disable_intrs(ha);
1159 } 1362 }
1160 1363
1364 /* Remove timer thread, if present */
1365 if (ha->timer_active)
1366 qla4xxx_stop_timer(ha);
1367
1161 /* Kill the kernel thread for this host */ 1368 /* Kill the kernel thread for this host */
1162 if (ha->dpc_thread) 1369 if (ha->dpc_thread)
1163 destroy_workqueue(ha->dpc_thread); 1370 destroy_workqueue(ha->dpc_thread);
1164 1371
1165 /* Issue Soft Reset to put firmware in unknown state */ 1372 /* Put firmware in known state */
1166 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) 1373 ha->isp_ops->reset_firmware(ha);
1167 qla4xxx_hw_reset(ha);
1168 1374
1169 /* Remove timer thread, if present */ 1375 if (is_qla8022(ha)) {
1170 if (ha->timer_active) 1376 qla4_8xxx_idc_lock(ha);
1171 qla4xxx_stop_timer(ha); 1377 qla4_8xxx_clear_drv_active(ha);
1378 qla4_8xxx_idc_unlock(ha);
1379 }
1172 1380
1173 /* Detach interrupts */ 1381 /* Detach interrupts */
1174 if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags)) 1382 if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
1175 free_irq(ha->pdev->irq, ha); 1383 qla4xxx_free_irqs(ha);
1176 1384
1177 /* free extra memory */ 1385 /* free extra memory */
1178 qla4xxx_mem_free(ha); 1386 qla4xxx_mem_free(ha);
1387}
1388
1389int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
1390{
1391 int status = 0;
1392 uint8_t revision_id;
1393 unsigned long mem_base, mem_len, db_base, db_len;
1394 struct pci_dev *pdev = ha->pdev;
1395
1396 status = pci_request_regions(pdev, DRIVER_NAME);
1397 if (status) {
1398 printk(KERN_WARNING
1399 "scsi(%ld) Failed to reserve PIO regions (%s) "
1400 "status=%d\n", ha->host_no, pci_name(pdev), status);
1401 goto iospace_error_exit;
1402 }
1403
1404 pci_read_config_byte(pdev, PCI_REVISION_ID, &revision_id);
1405 DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
1406 __func__, revision_id));
1407 ha->revision_id = revision_id;
1408
1409 /* remap phys address */
1410 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
1411 mem_len = pci_resource_len(pdev, 0);
1412 DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
1413 __func__, mem_base, mem_len));
1414
1415 /* mapping of pcibase pointer */
1416 ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
1417 if (!ha->nx_pcibase) {
1418 printk(KERN_ERR
1419 "cannot remap MMIO (%s), aborting\n", pci_name(pdev));
1420 pci_release_regions(ha->pdev);
1421 goto iospace_error_exit;
1422 }
1179 1423
1180 pci_disable_device(ha->pdev); 1424 /* Mapping of IO base pointer, door bell read and write pointer */
1181 1425
1426 /* mapping of IO base pointer */
1427 ha->qla4_8xxx_reg =
1428 (struct device_reg_82xx __iomem *)((uint8_t *)ha->nx_pcibase +
1429 0xbc000 + (ha->pdev->devfn << 11));
1430
1431 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
1432 db_len = pci_resource_len(pdev, 4);
1433
1434 /* mapping of doorbell write pointer */
1435 ha->nx_db_wr_ptr = (unsigned long)ioremap(db_base +
1436 (ha->pdev->devfn << 12), 4);
1437 if (!ha->nx_db_wr_ptr) {
1438 printk(KERN_ERR
1439 "cannot remap MMIO doorbell-write (%s), aborting\n",
1440 pci_name(pdev));
1441 goto iospace_error_exit;
1442 }
1443 /* mapping of doorbell read pointer */
1444 ha->nx_db_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) +
1445 (ha->pdev->devfn * 8);
1446 if (!ha->nx_db_rd_ptr)
1447 printk(KERN_ERR
1448 "cannot remap MMIO doorbell-read (%s), aborting\n",
1449 pci_name(pdev));
1450 return 0;
1451
1452iospace_error_exit:
1453 return -ENOMEM;
1182} 1454}
1183 1455
1184/*** 1456/***
@@ -1188,7 +1460,7 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
1188 * This routines maps HBA's registers from the pci address space 1460 * This routines maps HBA's registers from the pci address space
1189 * into the kernel virtual address space for memory mapped i/o. 1461 * into the kernel virtual address space for memory mapped i/o.
1190 **/ 1462 **/
1191static int qla4xxx_iospace_config(struct scsi_qla_host *ha) 1463int qla4xxx_iospace_config(struct scsi_qla_host *ha)
1192{ 1464{
1193 unsigned long pio, pio_len, pio_flags; 1465 unsigned long pio, pio_len, pio_flags;
1194 unsigned long mmio, mmio_len, mmio_flags; 1466 unsigned long mmio, mmio_len, mmio_flags;
@@ -1198,12 +1470,12 @@ static int qla4xxx_iospace_config(struct scsi_qla_host *ha)
1198 pio_flags = pci_resource_flags(ha->pdev, 0); 1470 pio_flags = pci_resource_flags(ha->pdev, 0);
1199 if (pio_flags & IORESOURCE_IO) { 1471 if (pio_flags & IORESOURCE_IO) {
1200 if (pio_len < MIN_IOBASE_LEN) { 1472 if (pio_len < MIN_IOBASE_LEN) {
1201 dev_warn(&ha->pdev->dev, 1473 ql4_printk(KERN_WARNING, ha,
1202 "Invalid PCI I/O region size\n"); 1474 "Invalid PCI I/O region size\n");
1203 pio = 0; 1475 pio = 0;
1204 } 1476 }
1205 } else { 1477 } else {
1206 dev_warn(&ha->pdev->dev, "region #0 not a PIO resource\n"); 1478 ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
1207 pio = 0; 1479 pio = 0;
1208 } 1480 }
1209 1481
@@ -1213,20 +1485,21 @@ static int qla4xxx_iospace_config(struct scsi_qla_host *ha)
1213 mmio_flags = pci_resource_flags(ha->pdev, 1); 1485 mmio_flags = pci_resource_flags(ha->pdev, 1);
1214 1486
1215 if (!(mmio_flags & IORESOURCE_MEM)) { 1487 if (!(mmio_flags & IORESOURCE_MEM)) {
1216 dev_err(&ha->pdev->dev, 1488 ql4_printk(KERN_ERR, ha,
1217 "region #0 not an MMIO resource, aborting\n"); 1489 "region #0 not an MMIO resource, aborting\n");
1218 1490
1219 goto iospace_error_exit; 1491 goto iospace_error_exit;
1220 } 1492 }
1493
1221 if (mmio_len < MIN_IOBASE_LEN) { 1494 if (mmio_len < MIN_IOBASE_LEN) {
1222 dev_err(&ha->pdev->dev, 1495 ql4_printk(KERN_ERR, ha,
1223 "Invalid PCI mem region size, aborting\n"); 1496 "Invalid PCI mem region size, aborting\n");
1224 goto iospace_error_exit; 1497 goto iospace_error_exit;
1225 } 1498 }
1226 1499
1227 if (pci_request_regions(ha->pdev, DRIVER_NAME)) { 1500 if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
1228 dev_warn(&ha->pdev->dev, 1501 ql4_printk(KERN_WARNING, ha,
1229 "Failed to reserve PIO/MMIO regions\n"); 1502 "Failed to reserve PIO/MMIO regions\n");
1230 1503
1231 goto iospace_error_exit; 1504 goto iospace_error_exit;
1232 } 1505 }
@@ -1235,8 +1508,8 @@ static int qla4xxx_iospace_config(struct scsi_qla_host *ha)
1235 ha->pio_length = pio_len; 1508 ha->pio_length = pio_len;
1236 ha->reg = ioremap(mmio, MIN_IOBASE_LEN); 1509 ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
1237 if (!ha->reg) { 1510 if (!ha->reg) {
1238 dev_err(&ha->pdev->dev, 1511 ql4_printk(KERN_ERR, ha,
1239 "cannot remap MMIO, aborting\n"); 1512 "cannot remap MMIO, aborting\n");
1240 1513
1241 goto iospace_error_exit; 1514 goto iospace_error_exit;
1242 } 1515 }
@@ -1247,6 +1520,60 @@ iospace_error_exit:
1247 return -ENOMEM; 1520 return -ENOMEM;
1248} 1521}
1249 1522
1523static struct isp_operations qla4xxx_isp_ops = {
1524 .iospace_config = qla4xxx_iospace_config,
1525 .pci_config = qla4xxx_pci_config,
1526 .disable_intrs = qla4xxx_disable_intrs,
1527 .enable_intrs = qla4xxx_enable_intrs,
1528 .start_firmware = qla4xxx_start_firmware,
1529 .intr_handler = qla4xxx_intr_handler,
1530 .interrupt_service_routine = qla4xxx_interrupt_service_routine,
1531 .reset_chip = qla4xxx_soft_reset,
1532 .reset_firmware = qla4xxx_hw_reset,
1533 .queue_iocb = qla4xxx_queue_iocb,
1534 .complete_iocb = qla4xxx_complete_iocb,
1535 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
1536 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
1537 .get_sys_info = qla4xxx_get_sys_info,
1538};
1539
1540static struct isp_operations qla4_8xxx_isp_ops = {
1541 .iospace_config = qla4_8xxx_iospace_config,
1542 .pci_config = qla4_8xxx_pci_config,
1543 .disable_intrs = qla4_8xxx_disable_intrs,
1544 .enable_intrs = qla4_8xxx_enable_intrs,
1545 .start_firmware = qla4_8xxx_load_risc,
1546 .intr_handler = qla4_8xxx_intr_handler,
1547 .interrupt_service_routine = qla4_8xxx_interrupt_service_routine,
1548 .reset_chip = qla4_8xxx_isp_reset,
1549 .reset_firmware = qla4_8xxx_stop_firmware,
1550 .queue_iocb = qla4_8xxx_queue_iocb,
1551 .complete_iocb = qla4_8xxx_complete_iocb,
1552 .rd_shdw_req_q_out = qla4_8xxx_rd_shdw_req_q_out,
1553 .rd_shdw_rsp_q_in = qla4_8xxx_rd_shdw_rsp_q_in,
1554 .get_sys_info = qla4_8xxx_get_sys_info,
1555};
1556
1557uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
1558{
1559 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
1560}
1561
1562uint16_t qla4_8xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
1563{
1564 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->req_q_out));
1565}
1566
1567uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
1568{
1569 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
1570}
1571
1572uint16_t qla4_8xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
1573{
1574 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->rsp_q_in));
1575}
1576
1250/** 1577/**
1251 * qla4xxx_probe_adapter - callback function to probe HBA 1578 * qla4xxx_probe_adapter - callback function to probe HBA
1252 * @pdev: pointer to pci_dev structure 1579 * @pdev: pointer to pci_dev structure
@@ -1264,6 +1591,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1264 struct scsi_qla_host *ha; 1591 struct scsi_qla_host *ha;
1265 uint8_t init_retry_count = 0; 1592 uint8_t init_retry_count = 0;
1266 char buf[34]; 1593 char buf[34];
1594 struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
1267 1595
1268 if (pci_enable_device(pdev)) 1596 if (pci_enable_device(pdev))
1269 return -1; 1597 return -1;
@@ -1284,12 +1612,30 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1284 ha->host = host; 1612 ha->host = host;
1285 ha->host_no = host->host_no; 1613 ha->host_no = host->host_no;
1286 1614
1615 /* Setup Runtime configurable options */
1616 if (is_qla8022(ha)) {
1617 ha->isp_ops = &qla4_8xxx_isp_ops;
1618 rwlock_init(&ha->hw_lock);
1619 ha->qdr_sn_window = -1;
1620 ha->ddr_mn_window = -1;
1621 ha->curr_window = 255;
1622 ha->func_num = PCI_FUNC(ha->pdev->devfn);
1623 nx_legacy_intr = &legacy_intr[ha->func_num];
1624 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
1625 ha->nx_legacy_intr.tgt_status_reg =
1626 nx_legacy_intr->tgt_status_reg;
1627 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
1628 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
1629 } else {
1630 ha->isp_ops = &qla4xxx_isp_ops;
1631 }
1632
1287 /* Configure PCI I/O space. */ 1633 /* Configure PCI I/O space. */
1288 ret = qla4xxx_iospace_config(ha); 1634 ret = ha->isp_ops->iospace_config(ha);
1289 if (ret) 1635 if (ret)
1290 goto probe_failed; 1636 goto probe_failed_ioconfig;
1291 1637
1292 dev_info(&ha->pdev->dev, "Found an ISP%04x, irq %d, iobase 0x%p\n", 1638 ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
1293 pdev->device, pdev->irq, ha->reg); 1639 pdev->device, pdev->irq, ha->reg);
1294 1640
1295 qla4xxx_config_dma_addressing(ha); 1641 qla4xxx_config_dma_addressing(ha);
@@ -1299,32 +1645,41 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1299 INIT_LIST_HEAD(&ha->free_srb_q); 1645 INIT_LIST_HEAD(&ha->free_srb_q);
1300 1646
1301 mutex_init(&ha->mbox_sem); 1647 mutex_init(&ha->mbox_sem);
1648 init_completion(&ha->mbx_intr_comp);
1302 1649
1303 spin_lock_init(&ha->hardware_lock); 1650 spin_lock_init(&ha->hardware_lock);
1304 1651
1305 /* Allocate dma buffers */ 1652 /* Allocate dma buffers */
1306 if (qla4xxx_mem_alloc(ha)) { 1653 if (qla4xxx_mem_alloc(ha)) {
1307 dev_warn(&ha->pdev->dev, 1654 ql4_printk(KERN_WARNING, ha,
1308 "[ERROR] Failed to allocate memory for adapter\n"); 1655 "[ERROR] Failed to allocate memory for adapter\n");
1309 1656
1310 ret = -ENOMEM; 1657 ret = -ENOMEM;
1311 goto probe_failed; 1658 goto probe_failed;
1312 } 1659 }
1313 1660
1661 if (is_qla8022(ha))
1662 (void) qla4_8xxx_get_flash_info(ha);
1663
1314 /* 1664 /*
1315 * Initialize the Host adapter request/response queues and 1665 * Initialize the Host adapter request/response queues and
1316 * firmware 1666 * firmware
1317 * NOTE: interrupts enabled upon successful completion 1667 * NOTE: interrupts enabled upon successful completion
1318 */ 1668 */
1319 status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST); 1669 status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST);
1320 while (status == QLA_ERROR && init_retry_count++ < MAX_INIT_RETRIES) { 1670 while ((!test_bit(AF_ONLINE, &ha->flags)) &&
1671 init_retry_count++ < MAX_INIT_RETRIES) {
1321 DEBUG2(printk("scsi: %s: retrying adapter initialization " 1672 DEBUG2(printk("scsi: %s: retrying adapter initialization "
1322 "(%d)\n", __func__, init_retry_count)); 1673 "(%d)\n", __func__, init_retry_count));
1323 qla4xxx_soft_reset(ha); 1674
1675 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
1676 continue;
1677
1324 status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST); 1678 status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST);
1325 } 1679 }
1326 if (status == QLA_ERROR) { 1680
1327 dev_warn(&ha->pdev->dev, "Failed to initialize adapter\n"); 1681 if (!test_bit(AF_ONLINE, &ha->flags)) {
1682 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
1328 1683
1329 ret = -ENODEV; 1684 ret = -ENODEV;
1330 goto probe_failed; 1685 goto probe_failed;
@@ -1340,8 +1695,9 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1340 1695
1341 ret = scsi_init_shared_tag_map(host, MAX_SRBS); 1696 ret = scsi_init_shared_tag_map(host, MAX_SRBS);
1342 if (ret) { 1697 if (ret) {
1343 dev_warn(&ha->pdev->dev, "scsi_init_shared_tag_map failed\n"); 1698 ql4_printk(KERN_WARNING, ha,
1344 goto probe_failed; 1699 "scsi_init_shared_tag_map failed\n");
1700 goto probe_failed;
1345 } 1701 }
1346 1702
1347 /* Startup the kernel thread for this host adapter. */ 1703 /* Startup the kernel thread for this host adapter. */
@@ -1350,24 +1706,27 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1350 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no); 1706 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
1351 ha->dpc_thread = create_singlethread_workqueue(buf); 1707 ha->dpc_thread = create_singlethread_workqueue(buf);
1352 if (!ha->dpc_thread) { 1708 if (!ha->dpc_thread) {
1353 dev_warn(&ha->pdev->dev, "Unable to start DPC thread!\n"); 1709 ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
1354 ret = -ENODEV; 1710 ret = -ENODEV;
1355 goto probe_failed; 1711 goto probe_failed;
1356 } 1712 }
1357 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc); 1713 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
1358 1714
1359 ret = request_irq(pdev->irq, qla4xxx_intr_handler, 1715 /* For ISP-82XX, request_irqs is called in qla4_8xxx_load_risc
1360 IRQF_DISABLED | IRQF_SHARED, "qla4xxx", ha); 1716 * (which is called indirectly by qla4xxx_initialize_adapter),
1361 if (ret) { 1717 * so that irqs will be registered after crbinit but before
1362 dev_warn(&ha->pdev->dev, "Failed to reserve interrupt %d" 1718 * mbx_intr_enable.
1363 " already in use.\n", pdev->irq); 1719 */
1364 goto probe_failed; 1720 if (!is_qla8022(ha)) {
1721 ret = qla4xxx_request_irqs(ha);
1722 if (ret) {
1723 ql4_printk(KERN_WARNING, ha, "Failed to reserve "
1724 "interrupt %d already in use.\n", pdev->irq);
1725 goto probe_failed;
1726 }
1365 } 1727 }
1366 set_bit(AF_IRQ_ATTACHED, &ha->flags);
1367 host->irq = pdev->irq;
1368 DEBUG(printk("scsi%d: irq %d attached\n", ha->host_no, ha->pdev->irq));
1369 1728
1370 qla4xxx_enable_intrs(ha); 1729 ha->isp_ops->enable_intrs(ha);
1371 1730
1372 /* Start timer thread. */ 1731 /* Start timer thread. */
1373 qla4xxx_start_timer(ha, qla4xxx_timer, 1); 1732 qla4xxx_start_timer(ha, qla4xxx_timer, 1);
@@ -1391,6 +1750,8 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1391 1750
1392probe_failed: 1751probe_failed:
1393 qla4xxx_free_adapter(ha); 1752 qla4xxx_free_adapter(ha);
1753
1754probe_failed_ioconfig:
1394 scsi_host_put(ha->host); 1755 scsi_host_put(ha->host);
1395 1756
1396probe_disable_device: 1757probe_disable_device:
@@ -1409,10 +1770,7 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
1409 1770
1410 ha = pci_get_drvdata(pdev); 1771 ha = pci_get_drvdata(pdev);
1411 1772
1412 qla4xxx_disable_intrs(ha); 1773 set_bit(AF_HBA_GOING_AWAY, &ha->flags);
1413
1414 while (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags))
1415 ssleep(1);
1416 1774
1417 /* remove devs from iscsi_sessions to scsi_devices */ 1775 /* remove devs from iscsi_sessions to scsi_devices */
1418 qla4xxx_free_ddb_list(ha); 1776 qla4xxx_free_ddb_list(ha);
@@ -1423,6 +1781,7 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
1423 1781
1424 scsi_host_put(ha->host); 1782 scsi_host_put(ha->host);
1425 1783
1784 pci_disable_device(pdev);
1426 pci_set_drvdata(pdev, NULL); 1785 pci_set_drvdata(pdev, NULL);
1427} 1786}
1428 1787
@@ -1479,7 +1838,8 @@ static void qla4xxx_slave_destroy(struct scsi_device *sdev)
1479 * 1838 *
1480 * This routine removes and returns the srb at the specified index 1839 * This routine removes and returns the srb at the specified index
1481 **/ 1840 **/
1482struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t index) 1841struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
1842 uint32_t index)
1483{ 1843{
1484 struct srb *srb = NULL; 1844 struct srb *srb = NULL;
1485 struct scsi_cmnd *cmd = NULL; 1845 struct scsi_cmnd *cmd = NULL;
@@ -1605,7 +1965,7 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
1605 int ret = SUCCESS; 1965 int ret = SUCCESS;
1606 int wait = 0; 1966 int wait = 0;
1607 1967
1608 dev_info(&ha->pdev->dev, 1968 ql4_printk(KERN_INFO, ha,
1609 "scsi%ld:%d:%d: Abort command issued cmd=%p, pid=%ld\n", 1969 "scsi%ld:%d:%d: Abort command issued cmd=%p, pid=%ld\n",
1610 ha->host_no, id, lun, cmd, serial); 1970 ha->host_no, id, lun, cmd, serial);
1611 1971
@@ -1637,7 +1997,7 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
1637 } 1997 }
1638 } 1998 }
1639 1999
1640 dev_info(&ha->pdev->dev, 2000 ql4_printk(KERN_INFO, ha,
1641 "scsi%ld:%d:%d: Abort command - %s\n", 2001 "scsi%ld:%d:%d: Abort command - %s\n",
1642 ha->host_no, id, lun, (ret == SUCCESS) ? "succeded" : "failed"); 2002 ha->host_no, id, lun, (ret == SUCCESS) ? "succeded" : "failed");
1643 2003
@@ -1660,7 +2020,12 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
1660 if (!ddb_entry) 2020 if (!ddb_entry)
1661 return ret; 2021 return ret;
1662 2022
1663 dev_info(&ha->pdev->dev, 2023 ret = iscsi_block_scsi_eh(cmd);
2024 if (ret)
2025 return ret;
2026 ret = FAILED;
2027
2028 ql4_printk(KERN_INFO, ha,
1664 "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no, 2029 "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no,
1665 cmd->device->channel, cmd->device->id, cmd->device->lun); 2030 cmd->device->channel, cmd->device->id, cmd->device->lun);
1666 2031
@@ -1673,13 +2038,13 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
1673 /* FIXME: wait for hba to go online */ 2038 /* FIXME: wait for hba to go online */
1674 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun); 2039 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
1675 if (stat != QLA_SUCCESS) { 2040 if (stat != QLA_SUCCESS) {
1676 dev_info(&ha->pdev->dev, "DEVICE RESET FAILED. %d\n", stat); 2041 ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
1677 goto eh_dev_reset_done; 2042 goto eh_dev_reset_done;
1678 } 2043 }
1679 2044
1680 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device), 2045 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
1681 cmd->device)) { 2046 cmd->device)) {
1682 dev_info(&ha->pdev->dev, 2047 ql4_printk(KERN_INFO, ha,
1683 "DEVICE RESET FAILED - waiting for " 2048 "DEVICE RESET FAILED - waiting for "
1684 "commands.\n"); 2049 "commands.\n");
1685 goto eh_dev_reset_done; 2050 goto eh_dev_reset_done;
@@ -1690,7 +2055,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
1690 MM_LUN_RESET) != QLA_SUCCESS) 2055 MM_LUN_RESET) != QLA_SUCCESS)
1691 goto eh_dev_reset_done; 2056 goto eh_dev_reset_done;
1692 2057
1693 dev_info(&ha->pdev->dev, 2058 ql4_printk(KERN_INFO, ha,
1694 "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n", 2059 "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
1695 ha->host_no, cmd->device->channel, cmd->device->id, 2060 ha->host_no, cmd->device->channel, cmd->device->id,
1696 cmd->device->lun); 2061 cmd->device->lun);
@@ -1712,11 +2077,15 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
1712{ 2077{
1713 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 2078 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
1714 struct ddb_entry *ddb_entry = cmd->device->hostdata; 2079 struct ddb_entry *ddb_entry = cmd->device->hostdata;
1715 int stat; 2080 int stat, ret;
1716 2081
1717 if (!ddb_entry) 2082 if (!ddb_entry)
1718 return FAILED; 2083 return FAILED;
1719 2084
2085 ret = iscsi_block_scsi_eh(cmd);
2086 if (ret)
2087 return ret;
2088
1720 starget_printk(KERN_INFO, scsi_target(cmd->device), 2089 starget_printk(KERN_INFO, scsi_target(cmd->device),
1721 "WARM TARGET RESET ISSUED.\n"); 2090 "WARM TARGET RESET ISSUED.\n");
1722 2091
@@ -1769,7 +2138,13 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
1769 2138
1770 ha = (struct scsi_qla_host *) cmd->device->host->hostdata; 2139 ha = (struct scsi_qla_host *) cmd->device->host->hostdata;
1771 2140
1772 dev_info(&ha->pdev->dev, 2141 if (ql4xdontresethba) {
2142 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
2143 ha->host_no, __func__));
2144 return FAILED;
2145 }
2146
2147 ql4_printk(KERN_INFO, ha,
1773 "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no, 2148 "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no,
1774 cmd->device->channel, cmd->device->id, cmd->device->lun); 2149 cmd->device->channel, cmd->device->id, cmd->device->lun);
1775 2150
@@ -1781,20 +2156,22 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
1781 return FAILED; 2156 return FAILED;
1782 } 2157 }
1783 2158
1784 /* make sure the dpc thread is stopped while we reset the hba */ 2159 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
1785 clear_bit(AF_ONLINE, &ha->flags); 2160 if (is_qla8022(ha))
1786 flush_workqueue(ha->dpc_thread); 2161 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
2162 else
2163 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2164 }
1787 2165
1788 if (qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST) == QLA_SUCCESS) 2166 if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
1789 return_status = SUCCESS; 2167 return_status = SUCCESS;
1790 2168
1791 dev_info(&ha->pdev->dev, "HOST RESET %s.\n", 2169 ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
1792 return_status == FAILED ? "FAILED" : "SUCCEDED"); 2170 return_status == FAILED ? "FAILED" : "SUCCEDED");
1793 2171
1794 return return_status; 2172 return return_status;
1795} 2173}
1796 2174
1797
1798static struct pci_device_id qla4xxx_pci_tbl[] = { 2175static struct pci_device_id qla4xxx_pci_tbl[] = {
1799 { 2176 {
1800 .vendor = PCI_VENDOR_ID_QLOGIC, 2177 .vendor = PCI_VENDOR_ID_QLOGIC,
@@ -1814,6 +2191,12 @@ static struct pci_device_id qla4xxx_pci_tbl[] = {
1814 .subvendor = PCI_ANY_ID, 2191 .subvendor = PCI_ANY_ID,
1815 .subdevice = PCI_ANY_ID, 2192 .subdevice = PCI_ANY_ID,
1816 }, 2193 },
2194 {
2195 .vendor = PCI_VENDOR_ID_QLOGIC,
2196 .device = PCI_DEVICE_ID_QLOGIC_ISP8022,
2197 .subvendor = PCI_ANY_ID,
2198 .subdevice = PCI_ANY_ID,
2199 },
1817 {0, 0}, 2200 {0, 0},
1818}; 2201};
1819MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl); 2202MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
@@ -1869,7 +2252,6 @@ no_srp_cache:
1869 2252
1870static void __exit qla4xxx_module_exit(void) 2253static void __exit qla4xxx_module_exit(void)
1871{ 2254{
1872 ql4_mod_unload = 1;
1873 pci_unregister_driver(&qla4xxx_pci_driver); 2255 pci_unregister_driver(&qla4xxx_pci_driver);
1874 iscsi_unregister_transport(&qla4xxx_iscsi_transport); 2256 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
1875 kmem_cache_destroy(srb_cachep); 2257 kmem_cache_destroy(srb_cachep);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 28a6c494a2e8..c905dbd75331 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.02.00-k1" 8#define QLA4XXX_DRIVER_VERSION "5.02.00-k2"
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 136329b4027b..b02bdc6c2cd1 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1991,7 +1991,8 @@ static void map_region(sector_t lba, unsigned int len)
1991 block = lba + alignment; 1991 block = lba + alignment;
1992 rem = do_div(block, granularity); 1992 rem = do_div(block, granularity);
1993 1993
1994 set_bit(block, map_storep); 1994 if (block < map_size)
1995 set_bit(block, map_storep);
1995 1996
1996 lba += granularity - rem; 1997 lba += granularity - rem;
1997 } 1998 }
@@ -2011,7 +2012,8 @@ static void unmap_region(sector_t lba, unsigned int len)
2011 block = lba + alignment; 2012 block = lba + alignment;
2012 rem = do_div(block, granularity); 2013 rem = do_div(block, granularity);
2013 2014
2014 if (rem == 0 && lba + granularity <= end) 2015 if (rem == 0 && lba + granularity <= end &&
2016 block < map_size)
2015 clear_bit(block, map_storep); 2017 clear_bit(block, map_storep);
2016 2018
2017 lba += granularity - rem; 2019 lba += granularity - rem;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index a5d630f5f519..2bf98469dc4c 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -307,6 +307,19 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
307 (sshdr.asc == 0x04) && (sshdr.ascq == 0x02)) 307 (sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
308 return FAILED; 308 return FAILED;
309 309
310 if (sshdr.asc == 0x3f && sshdr.ascq == 0x0e)
311 scmd_printk(KERN_WARNING, scmd,
312 "Warning! Received an indication that the "
313 "LUN assignments on this target have "
314 "changed. The Linux SCSI layer does not "
315 "automatically remap LUN assignments.\n");
316 else if (sshdr.asc == 0x3f)
317 scmd_printk(KERN_WARNING, scmd,
318 "Warning! Received an indication that the "
319 "operating parameters on this target have "
320 "changed. The Linux SCSI layer does not "
321 "automatically adjust these parameters.\n");
322
310 if (blk_barrier_rq(scmd->request)) 323 if (blk_barrier_rq(scmd->request))
311 /* 324 /*
312 * barrier requests should always retry on UA 325 * barrier requests should always retry on UA
@@ -1762,6 +1775,14 @@ int scsi_error_handler(void *data)
1762 * what we need to do to get it up and online again (if we can). 1775 * what we need to do to get it up and online again (if we can).
1763 * If we fail, we end up taking the thing offline. 1776 * If we fail, we end up taking the thing offline.
1764 */ 1777 */
1778 if (scsi_autopm_get_host(shost) != 0) {
1779 SCSI_LOG_ERROR_RECOVERY(1,
1780 printk(KERN_ERR "Error handler scsi_eh_%d "
1781 "unable to autoresume\n",
1782 shost->host_no));
1783 continue;
1784 }
1785
1765 if (shost->transportt->eh_strategy_handler) 1786 if (shost->transportt->eh_strategy_handler)
1766 shost->transportt->eh_strategy_handler(shost); 1787 shost->transportt->eh_strategy_handler(shost);
1767 else 1788 else
@@ -1775,6 +1796,7 @@ int scsi_error_handler(void *data)
1775 * which are still online. 1796 * which are still online.
1776 */ 1797 */
1777 scsi_restart_operations(shost); 1798 scsi_restart_operations(shost);
1799 scsi_autopm_put_host(shost);
1778 set_current_state(TASK_INTERRUPTIBLE); 1800 set_current_state(TASK_INTERRUPTIBLE);
1779 } 1801 }
1780 __set_current_state(TASK_RUNNING); 1802 __set_current_state(TASK_RUNNING);
@@ -1872,12 +1894,16 @@ scsi_reset_provider_done_command(struct scsi_cmnd *scmd)
1872int 1894int
1873scsi_reset_provider(struct scsi_device *dev, int flag) 1895scsi_reset_provider(struct scsi_device *dev, int flag)
1874{ 1896{
1875 struct scsi_cmnd *scmd = scsi_get_command(dev, GFP_KERNEL); 1897 struct scsi_cmnd *scmd;
1876 struct Scsi_Host *shost = dev->host; 1898 struct Scsi_Host *shost = dev->host;
1877 struct request req; 1899 struct request req;
1878 unsigned long flags; 1900 unsigned long flags;
1879 int rtn; 1901 int rtn;
1880 1902
1903 if (scsi_autopm_get_host(shost) < 0)
1904 return FAILED;
1905
1906 scmd = scsi_get_command(dev, GFP_KERNEL);
1881 blk_rq_init(NULL, &req); 1907 blk_rq_init(NULL, &req);
1882 scmd->request = &req; 1908 scmd->request = &req;
1883 1909
@@ -1934,6 +1960,7 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
1934 scsi_run_host_queues(shost); 1960 scsi_run_host_queues(shost);
1935 1961
1936 scsi_next_command(scmd); 1962 scsi_next_command(scmd);
1963 scsi_autopm_put_host(shost);
1937 return rtn; 1964 return rtn;
1938} 1965}
1939EXPORT_SYMBOL(scsi_reset_provider); 1966EXPORT_SYMBOL(scsi_reset_provider);
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
new file mode 100644
index 000000000000..d70e91ae60af
--- /dev/null
+++ b/drivers/scsi/scsi_pm.c
@@ -0,0 +1,206 @@
1/*
2 * scsi_pm.c Copyright (C) 2010 Alan Stern
3 *
4 * SCSI dynamic Power Management
5 * Initial version: Alan Stern <stern@rowland.harvard.edu>
6 */
7
8#include <linux/pm_runtime.h>
9
10#include <scsi/scsi.h>
11#include <scsi/scsi_device.h>
12#include <scsi/scsi_driver.h>
13#include <scsi/scsi_host.h>
14
15#include "scsi_priv.h"
16
17static int scsi_dev_type_suspend(struct device *dev, pm_message_t msg)
18{
19 struct device_driver *drv;
20 int err;
21
22 err = scsi_device_quiesce(to_scsi_device(dev));
23 if (err == 0) {
24 drv = dev->driver;
25 if (drv && drv->suspend)
26 err = drv->suspend(dev, msg);
27 }
28 dev_dbg(dev, "scsi suspend: %d\n", err);
29 return err;
30}
31
32static int scsi_dev_type_resume(struct device *dev)
33{
34 struct device_driver *drv;
35 int err = 0;
36
37 drv = dev->driver;
38 if (drv && drv->resume)
39 err = drv->resume(dev);
40 scsi_device_resume(to_scsi_device(dev));
41 dev_dbg(dev, "scsi resume: %d\n", err);
42 return err;
43}
44
45#ifdef CONFIG_PM_SLEEP
46
47static int scsi_bus_suspend_common(struct device *dev, pm_message_t msg)
48{
49 int err = 0;
50
51 if (scsi_is_sdev_device(dev))
52 err = scsi_dev_type_suspend(dev, msg);
53 return err;
54}
55
56static int scsi_bus_resume_common(struct device *dev)
57{
58 int err = 0;
59
60 if (scsi_is_sdev_device(dev))
61 err = scsi_dev_type_resume(dev);
62
63 if (err == 0) {
64 pm_runtime_disable(dev);
65 pm_runtime_set_active(dev);
66 pm_runtime_enable(dev);
67 }
68 return err;
69}
70
71static int scsi_bus_suspend(struct device *dev)
72{
73 return scsi_bus_suspend_common(dev, PMSG_SUSPEND);
74}
75
76static int scsi_bus_freeze(struct device *dev)
77{
78 return scsi_bus_suspend_common(dev, PMSG_FREEZE);
79}
80
81static int scsi_bus_poweroff(struct device *dev)
82{
83 return scsi_bus_suspend_common(dev, PMSG_HIBERNATE);
84}
85
86#else /* CONFIG_PM_SLEEP */
87
88#define scsi_bus_resume_common NULL
89#define scsi_bus_suspend NULL
90#define scsi_bus_freeze NULL
91#define scsi_bus_poweroff NULL
92
93#endif /* CONFIG_PM_SLEEP */
94
95#ifdef CONFIG_PM_RUNTIME
96
97static int scsi_runtime_suspend(struct device *dev)
98{
99 int err = 0;
100
101 dev_dbg(dev, "scsi_runtime_suspend\n");
102 if (scsi_is_sdev_device(dev)) {
103 err = scsi_dev_type_suspend(dev, PMSG_AUTO_SUSPEND);
104 if (err == -EAGAIN)
105 pm_schedule_suspend(dev, jiffies_to_msecs(
106 round_jiffies_up_relative(HZ/10)));
107 }
108
109 /* Insert hooks here for targets, hosts, and transport classes */
110
111 return err;
112}
113
114static int scsi_runtime_resume(struct device *dev)
115{
116 int err = 0;
117
118 dev_dbg(dev, "scsi_runtime_resume\n");
119 if (scsi_is_sdev_device(dev))
120 err = scsi_dev_type_resume(dev);
121
122 /* Insert hooks here for targets, hosts, and transport classes */
123
124 return err;
125}
126
127static int scsi_runtime_idle(struct device *dev)
128{
129 int err;
130
131 dev_dbg(dev, "scsi_runtime_idle\n");
132
133 /* Insert hooks here for targets, hosts, and transport classes */
134
135 if (scsi_is_sdev_device(dev))
136 err = pm_schedule_suspend(dev, 100);
137 else
138 err = pm_runtime_suspend(dev);
139 return err;
140}
141
142int scsi_autopm_get_device(struct scsi_device *sdev)
143{
144 int err;
145
146 err = pm_runtime_get_sync(&sdev->sdev_gendev);
147 if (err < 0)
148 pm_runtime_put_sync(&sdev->sdev_gendev);
149 else if (err > 0)
150 err = 0;
151 return err;
152}
153EXPORT_SYMBOL_GPL(scsi_autopm_get_device);
154
155void scsi_autopm_put_device(struct scsi_device *sdev)
156{
157 pm_runtime_put_sync(&sdev->sdev_gendev);
158}
159EXPORT_SYMBOL_GPL(scsi_autopm_put_device);
160
161void scsi_autopm_get_target(struct scsi_target *starget)
162{
163 pm_runtime_get_sync(&starget->dev);
164}
165
166void scsi_autopm_put_target(struct scsi_target *starget)
167{
168 pm_runtime_put_sync(&starget->dev);
169}
170
171int scsi_autopm_get_host(struct Scsi_Host *shost)
172{
173 int err;
174
175 err = pm_runtime_get_sync(&shost->shost_gendev);
176 if (err < 0)
177 pm_runtime_put_sync(&shost->shost_gendev);
178 else if (err > 0)
179 err = 0;
180 return err;
181}
182
183void scsi_autopm_put_host(struct Scsi_Host *shost)
184{
185 pm_runtime_put_sync(&shost->shost_gendev);
186}
187
188#else
189
190#define scsi_runtime_suspend NULL
191#define scsi_runtime_resume NULL
192#define scsi_runtime_idle NULL
193
194#endif /* CONFIG_PM_RUNTIME */
195
196const struct dev_pm_ops scsi_bus_pm_ops = {
197 .suspend = scsi_bus_suspend,
198 .resume = scsi_bus_resume_common,
199 .freeze = scsi_bus_freeze,
200 .thaw = scsi_bus_resume_common,
201 .poweroff = scsi_bus_poweroff,
202 .restore = scsi_bus_resume_common,
203 .runtime_suspend = scsi_runtime_suspend,
204 .runtime_resume = scsi_runtime_resume,
205 .runtime_idle = scsi_runtime_idle,
206};
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 1fbf7c78bba0..026295e2c539 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -7,6 +7,7 @@ struct request_queue;
7struct request; 7struct request;
8struct scsi_cmnd; 8struct scsi_cmnd;
9struct scsi_device; 9struct scsi_device;
10struct scsi_target;
10struct scsi_host_template; 11struct scsi_host_template;
11struct Scsi_Host; 12struct Scsi_Host;
12struct scsi_nl_hdr; 13struct scsi_nl_hdr;
@@ -144,6 +145,24 @@ static inline void scsi_netlink_init(void) {}
144static inline void scsi_netlink_exit(void) {} 145static inline void scsi_netlink_exit(void) {}
145#endif 146#endif
146 147
148/* scsi_pm.c */
149#ifdef CONFIG_PM_OPS
150extern const struct dev_pm_ops scsi_bus_pm_ops;
151#else /* CONFIG_PM_OPS */
152#define scsi_bus_pm_ops (*NULL)
153#endif
154#ifdef CONFIG_PM_RUNTIME
155extern void scsi_autopm_get_target(struct scsi_target *);
156extern void scsi_autopm_put_target(struct scsi_target *);
157extern int scsi_autopm_get_host(struct Scsi_Host *);
158extern void scsi_autopm_put_host(struct Scsi_Host *);
159#else
160static inline void scsi_autopm_get_target(struct scsi_target *t) {}
161static inline void scsi_autopm_put_target(struct scsi_target *t) {}
162static inline int scsi_autopm_get_host(struct Scsi_Host *h) { return 0; }
163static inline void scsi_autopm_put_host(struct Scsi_Host *h) {}
164#endif /* CONFIG_PM_RUNTIME */
165
147/* 166/*
148 * internal scsi timeout functions: for use by mid-layer and transport 167 * internal scsi timeout functions: for use by mid-layer and transport
149 * classes. 168 * classes.
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 1c027a97d8b9..3d0a1e6e9c48 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1513,14 +1513,18 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1513 starget = scsi_alloc_target(parent, channel, id); 1513 starget = scsi_alloc_target(parent, channel, id);
1514 if (!starget) 1514 if (!starget)
1515 return ERR_PTR(-ENOMEM); 1515 return ERR_PTR(-ENOMEM);
1516 scsi_autopm_get_target(starget);
1516 1517
1517 mutex_lock(&shost->scan_mutex); 1518 mutex_lock(&shost->scan_mutex);
1518 if (!shost->async_scan) 1519 if (!shost->async_scan)
1519 scsi_complete_async_scans(); 1520 scsi_complete_async_scans();
1520 1521
1521 if (scsi_host_scan_allowed(shost)) 1522 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1522 scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata); 1523 scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata);
1524 scsi_autopm_put_host(shost);
1525 }
1523 mutex_unlock(&shost->scan_mutex); 1526 mutex_unlock(&shost->scan_mutex);
1527 scsi_autopm_put_target(starget);
1524 scsi_target_reap(starget); 1528 scsi_target_reap(starget);
1525 put_device(&starget->dev); 1529 put_device(&starget->dev);
1526 1530
@@ -1574,6 +1578,7 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
1574 starget = scsi_alloc_target(parent, channel, id); 1578 starget = scsi_alloc_target(parent, channel, id);
1575 if (!starget) 1579 if (!starget)
1576 return; 1580 return;
1581 scsi_autopm_get_target(starget);
1577 1582
1578 if (lun != SCAN_WILD_CARD) { 1583 if (lun != SCAN_WILD_CARD) {
1579 /* 1584 /*
@@ -1599,6 +1604,7 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
1599 } 1604 }
1600 1605
1601 out_reap: 1606 out_reap:
1607 scsi_autopm_put_target(starget);
1602 /* now determine if the target has any children at all 1608 /* now determine if the target has any children at all
1603 * and if not, nuke it */ 1609 * and if not, nuke it */
1604 scsi_target_reap(starget); 1610 scsi_target_reap(starget);
@@ -1633,8 +1639,10 @@ void scsi_scan_target(struct device *parent, unsigned int channel,
1633 if (!shost->async_scan) 1639 if (!shost->async_scan)
1634 scsi_complete_async_scans(); 1640 scsi_complete_async_scans();
1635 1641
1636 if (scsi_host_scan_allowed(shost)) 1642 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1637 __scsi_scan_target(parent, channel, id, lun, rescan); 1643 __scsi_scan_target(parent, channel, id, lun, rescan);
1644 scsi_autopm_put_host(shost);
1645 }
1638 mutex_unlock(&shost->scan_mutex); 1646 mutex_unlock(&shost->scan_mutex);
1639} 1647}
1640EXPORT_SYMBOL(scsi_scan_target); 1648EXPORT_SYMBOL(scsi_scan_target);
@@ -1686,7 +1694,7 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
1686 if (!shost->async_scan) 1694 if (!shost->async_scan)
1687 scsi_complete_async_scans(); 1695 scsi_complete_async_scans();
1688 1696
1689 if (scsi_host_scan_allowed(shost)) { 1697 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1690 if (channel == SCAN_WILD_CARD) 1698 if (channel == SCAN_WILD_CARD)
1691 for (channel = 0; channel <= shost->max_channel; 1699 for (channel = 0; channel <= shost->max_channel;
1692 channel++) 1700 channel++)
@@ -1694,6 +1702,7 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
1694 rescan); 1702 rescan);
1695 else 1703 else
1696 scsi_scan_channel(shost, channel, id, lun, rescan); 1704 scsi_scan_channel(shost, channel, id, lun, rescan);
1705 scsi_autopm_put_host(shost);
1697 } 1706 }
1698 mutex_unlock(&shost->scan_mutex); 1707 mutex_unlock(&shost->scan_mutex);
1699 1708
@@ -1831,8 +1840,11 @@ static void do_scsi_scan_host(struct Scsi_Host *shost)
1831static int do_scan_async(void *_data) 1840static int do_scan_async(void *_data)
1832{ 1841{
1833 struct async_scan_data *data = _data; 1842 struct async_scan_data *data = _data;
1834 do_scsi_scan_host(data->shost); 1843 struct Scsi_Host *shost = data->shost;
1844
1845 do_scsi_scan_host(shost);
1835 scsi_finish_async_scan(data); 1846 scsi_finish_async_scan(data);
1847 scsi_autopm_put_host(shost);
1836 return 0; 1848 return 0;
1837} 1849}
1838 1850
@@ -1847,16 +1859,20 @@ void scsi_scan_host(struct Scsi_Host *shost)
1847 1859
1848 if (strncmp(scsi_scan_type, "none", 4) == 0) 1860 if (strncmp(scsi_scan_type, "none", 4) == 0)
1849 return; 1861 return;
1862 if (scsi_autopm_get_host(shost) < 0)
1863 return;
1850 1864
1851 data = scsi_prep_async_scan(shost); 1865 data = scsi_prep_async_scan(shost);
1852 if (!data) { 1866 if (!data) {
1853 do_scsi_scan_host(shost); 1867 do_scsi_scan_host(shost);
1868 scsi_autopm_put_host(shost);
1854 return; 1869 return;
1855 } 1870 }
1856 1871
1857 p = kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no); 1872 p = kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no);
1858 if (IS_ERR(p)) 1873 if (IS_ERR(p))
1859 do_scan_async(data); 1874 do_scan_async(data);
1875 /* scsi_autopm_put_host(shost) is called in do_scan_async() */
1860} 1876}
1861EXPORT_SYMBOL(scsi_scan_host); 1877EXPORT_SYMBOL(scsi_scan_host);
1862 1878
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index c23ab978c3ba..562fb3bce261 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -11,6 +11,7 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/blkdev.h> 12#include <linux/blkdev.h>
13#include <linux/device.h> 13#include <linux/device.h>
14#include <linux/pm_runtime.h>
14 15
15#include <scsi/scsi.h> 16#include <scsi/scsi.h>
16#include <scsi/scsi_device.h> 17#include <scsi/scsi_device.h>
@@ -376,57 +377,11 @@ static int scsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
376 return 0; 377 return 0;
377} 378}
378 379
379static int scsi_bus_suspend(struct device * dev, pm_message_t state)
380{
381 struct device_driver *drv;
382 struct scsi_device *sdev;
383 int err;
384
385 if (dev->type != &scsi_dev_type)
386 return 0;
387
388 drv = dev->driver;
389 sdev = to_scsi_device(dev);
390
391 err = scsi_device_quiesce(sdev);
392 if (err)
393 return err;
394
395 if (drv && drv->suspend) {
396 err = drv->suspend(dev, state);
397 if (err)
398 return err;
399 }
400
401 return 0;
402}
403
404static int scsi_bus_resume(struct device * dev)
405{
406 struct device_driver *drv;
407 struct scsi_device *sdev;
408 int err = 0;
409
410 if (dev->type != &scsi_dev_type)
411 return 0;
412
413 drv = dev->driver;
414 sdev = to_scsi_device(dev);
415
416 if (drv && drv->resume)
417 err = drv->resume(dev);
418
419 scsi_device_resume(sdev);
420
421 return err;
422}
423
424struct bus_type scsi_bus_type = { 380struct bus_type scsi_bus_type = {
425 .name = "scsi", 381 .name = "scsi",
426 .match = scsi_bus_match, 382 .match = scsi_bus_match,
427 .uevent = scsi_bus_uevent, 383 .uevent = scsi_bus_uevent,
428 .suspend = scsi_bus_suspend, 384 .pm = &scsi_bus_pm_ops,
429 .resume = scsi_bus_resume,
430}; 385};
431EXPORT_SYMBOL_GPL(scsi_bus_type); 386EXPORT_SYMBOL_GPL(scsi_bus_type);
432 387
@@ -848,8 +803,6 @@ static int scsi_target_add(struct scsi_target *starget)
848 if (starget->state != STARGET_CREATED) 803 if (starget->state != STARGET_CREATED)
849 return 0; 804 return 0;
850 805
851 device_enable_async_suspend(&starget->dev);
852
853 error = device_add(&starget->dev); 806 error = device_add(&starget->dev);
854 if (error) { 807 if (error) {
855 dev_err(&starget->dev, "target device_add failed, error %d\n", error); 808 dev_err(&starget->dev, "target device_add failed, error %d\n", error);
@@ -858,6 +811,10 @@ static int scsi_target_add(struct scsi_target *starget)
858 transport_add_device(&starget->dev); 811 transport_add_device(&starget->dev);
859 starget->state = STARGET_RUNNING; 812 starget->state = STARGET_RUNNING;
860 813
814 pm_runtime_set_active(&starget->dev);
815 pm_runtime_enable(&starget->dev);
816 device_enable_async_suspend(&starget->dev);
817
861 return 0; 818 return 0;
862} 819}
863 820
@@ -887,7 +844,20 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
887 return error; 844 return error;
888 845
889 transport_configure_device(&starget->dev); 846 transport_configure_device(&starget->dev);
847
890 device_enable_async_suspend(&sdev->sdev_gendev); 848 device_enable_async_suspend(&sdev->sdev_gendev);
849 scsi_autopm_get_target(starget);
850 pm_runtime_set_active(&sdev->sdev_gendev);
851 pm_runtime_forbid(&sdev->sdev_gendev);
852 pm_runtime_enable(&sdev->sdev_gendev);
853 scsi_autopm_put_target(starget);
854
855 /* The following call will keep sdev active indefinitely, until
856 * its driver does a corresponding scsi_autopm_pm_device(). Only
857 * drivers supporting autosuspend will do this.
858 */
859 scsi_autopm_get_device(sdev);
860
891 error = device_add(&sdev->sdev_gendev); 861 error = device_add(&sdev->sdev_gendev);
892 if (error) { 862 if (error) {
893 printk(KERN_INFO "error 1\n"); 863 printk(KERN_INFO "error 1\n");
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 1e6d4793542c..e84026def1f4 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -30,8 +30,9 @@
30#include <scsi/scsi_transport.h> 30#include <scsi/scsi_transport.h>
31#include <scsi/scsi_transport_iscsi.h> 31#include <scsi/scsi_transport_iscsi.h>
32#include <scsi/iscsi_if.h> 32#include <scsi/iscsi_if.h>
33#include <scsi/scsi_cmnd.h>
33 34
34#define ISCSI_SESSION_ATTRS 22 35#define ISCSI_SESSION_ATTRS 23
35#define ISCSI_CONN_ATTRS 13 36#define ISCSI_CONN_ATTRS 13
36#define ISCSI_HOST_ATTRS 4 37#define ISCSI_HOST_ATTRS 4
37 38
@@ -534,6 +535,37 @@ static void iscsi_scan_session(struct work_struct *work)
534 atomic_dec(&ihost->nr_scans); 535 atomic_dec(&ihost->nr_scans);
535} 536}
536 537
538/**
539 * iscsi_block_scsi_eh - block scsi eh until session state has transistioned
540 * cmd: scsi cmd passed to scsi eh handler
541 *
542 * If the session is down this function will wait for the recovery
543 * timer to fire or for the session to be logged back in. If the
544 * recovery timer fires then FAST_IO_FAIL is returned. The caller
545 * should pass this error value to the scsi eh.
546 */
547int iscsi_block_scsi_eh(struct scsi_cmnd *cmd)
548{
549 struct iscsi_cls_session *session =
550 starget_to_session(scsi_target(cmd->device));
551 unsigned long flags;
552 int ret = 0;
553
554 spin_lock_irqsave(&session->lock, flags);
555 while (session->state != ISCSI_SESSION_LOGGED_IN) {
556 if (session->state == ISCSI_SESSION_FREE) {
557 ret = FAST_IO_FAIL;
558 break;
559 }
560 spin_unlock_irqrestore(&session->lock, flags);
561 msleep(1000);
562 spin_lock_irqsave(&session->lock, flags);
563 }
564 spin_unlock_irqrestore(&session->lock, flags);
565 return ret;
566}
567EXPORT_SYMBOL_GPL(iscsi_block_scsi_eh);
568
537static void session_recovery_timedout(struct work_struct *work) 569static void session_recovery_timedout(struct work_struct *work)
538{ 570{
539 struct iscsi_cls_session *session = 571 struct iscsi_cls_session *session =
@@ -1763,7 +1795,8 @@ iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
1763iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0); 1795iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
1764iscsi_session_attr(tgt_reset_tmo, ISCSI_PARAM_TGT_RESET_TMO, 0); 1796iscsi_session_attr(tgt_reset_tmo, ISCSI_PARAM_TGT_RESET_TMO, 0);
1765iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0); 1797iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0);
1766iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0) 1798iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0);
1799iscsi_session_attr(targetalias, ISCSI_PARAM_TARGET_ALIAS, 0);
1767 1800
1768static ssize_t 1801static ssize_t
1769show_priv_session_state(struct device *dev, struct device_attribute *attr, 1802show_priv_session_state(struct device *dev, struct device_attribute *attr,
@@ -1782,14 +1815,42 @@ show_priv_session_##field(struct device *dev, \
1782{ \ 1815{ \
1783 struct iscsi_cls_session *session = \ 1816 struct iscsi_cls_session *session = \
1784 iscsi_dev_to_session(dev->parent); \ 1817 iscsi_dev_to_session(dev->parent); \
1818 if (session->field == -1) \
1819 return sprintf(buf, "off\n"); \
1785 return sprintf(buf, format"\n", session->field); \ 1820 return sprintf(buf, format"\n", session->field); \
1786} 1821}
1787 1822
1788#define iscsi_priv_session_attr(field, format) \ 1823#define iscsi_priv_session_attr_store(field) \
1824static ssize_t \
1825store_priv_session_##field(struct device *dev, \
1826 struct device_attribute *attr, \
1827 const char *buf, size_t count) \
1828{ \
1829 int val; \
1830 char *cp; \
1831 struct iscsi_cls_session *session = \
1832 iscsi_dev_to_session(dev->parent); \
1833 if ((session->state == ISCSI_SESSION_FREE) || \
1834 (session->state == ISCSI_SESSION_FAILED)) \
1835 return -EBUSY; \
1836 if (strncmp(buf, "off", 3) == 0) \
1837 session->field = -1; \
1838 else { \
1839 val = simple_strtoul(buf, &cp, 0); \
1840 if (*cp != '\0' && *cp != '\n') \
1841 return -EINVAL; \
1842 session->field = val; \
1843 } \
1844 return count; \
1845}
1846
1847#define iscsi_priv_session_rw_attr(field, format) \
1789 iscsi_priv_session_attr_show(field, format) \ 1848 iscsi_priv_session_attr_show(field, format) \
1790static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO, show_priv_session_##field, \ 1849 iscsi_priv_session_attr_store(field) \
1791 NULL) 1850static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUGO, \
1792iscsi_priv_session_attr(recovery_tmo, "%d"); 1851 show_priv_session_##field, \
1852 store_priv_session_##field)
1853iscsi_priv_session_rw_attr(recovery_tmo, "%d");
1793 1854
1794/* 1855/*
1795 * iSCSI host attrs 1856 * iSCSI host attrs
@@ -1820,6 +1881,11 @@ do { \
1820 count++; \ 1881 count++; \
1821} while (0) 1882} while (0)
1822 1883
1884#define SETUP_PRIV_SESSION_RW_ATTR(field) \
1885do { \
1886 priv->session_attrs[count] = &dev_attr_priv_sess_##field; \
1887 count++; \
1888} while (0)
1823 1889
1824#define SETUP_SESSION_RD_ATTR(field, param_flag) \ 1890#define SETUP_SESSION_RD_ATTR(field, param_flag) \
1825do { \ 1891do { \
@@ -2006,7 +2072,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
2006 SETUP_SESSION_RD_ATTR(tgt_reset_tmo,ISCSI_TGT_RESET_TMO); 2072 SETUP_SESSION_RD_ATTR(tgt_reset_tmo,ISCSI_TGT_RESET_TMO);
2007 SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME); 2073 SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME);
2008 SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME); 2074 SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME);
2009 SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo); 2075 SETUP_SESSION_RD_ATTR(targetalias, ISCSI_TARGET_ALIAS);
2076 SETUP_PRIV_SESSION_RW_ATTR(recovery_tmo);
2010 SETUP_PRIV_SESSION_RD_ATTR(state); 2077 SETUP_PRIV_SESSION_RD_ATTR(state);
2011 2078
2012 BUG_ON(count > ISCSI_SESSION_ATTRS); 2079 BUG_ON(count > ISCSI_SESSION_ATTRS);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 8802e48bc063..cc8a1d1d915a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -759,6 +759,10 @@ static int sd_open(struct block_device *bdev, fmode_t mode)
759 759
760 sdev = sdkp->device; 760 sdev = sdkp->device;
761 761
762 retval = scsi_autopm_get_device(sdev);
763 if (retval)
764 goto error_autopm;
765
762 /* 766 /*
763 * If the device is in error recovery, wait until it is done. 767 * If the device is in error recovery, wait until it is done.
764 * If the device is offline, then disallow any access to it. 768 * If the device is offline, then disallow any access to it.
@@ -803,6 +807,8 @@ static int sd_open(struct block_device *bdev, fmode_t mode)
803 return 0; 807 return 0;
804 808
805error_out: 809error_out:
810 scsi_autopm_put_device(sdev);
811error_autopm:
806 scsi_disk_put(sdkp); 812 scsi_disk_put(sdkp);
807 return retval; 813 return retval;
808} 814}
@@ -834,6 +840,8 @@ static int sd_release(struct gendisk *disk, fmode_t mode)
834 * XXX and what if there are packets in flight and this close() 840 * XXX and what if there are packets in flight and this close()
835 * XXX is followed by a "rmmod sd_mod"? 841 * XXX is followed by a "rmmod sd_mod"?
836 */ 842 */
843
844 scsi_autopm_put_device(sdev);
837 scsi_disk_put(sdkp); 845 scsi_disk_put(sdkp);
838 return 0; 846 return 0;
839} 847}
@@ -2232,7 +2240,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
2232 if (sdp->removable) 2240 if (sdp->removable)
2233 gd->flags |= GENHD_FL_REMOVABLE; 2241 gd->flags |= GENHD_FL_REMOVABLE;
2234 2242
2235 dev_set_drvdata(dev, sdkp);
2236 add_disk(gd); 2243 add_disk(gd);
2237 sd_dif_config_host(sdkp); 2244 sd_dif_config_host(sdkp);
2238 2245
@@ -2240,6 +2247,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
2240 2247
2241 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", 2248 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
2242 sdp->removable ? "removable " : ""); 2249 sdp->removable ? "removable " : "");
2250 scsi_autopm_put_device(sdp);
2243 put_device(&sdkp->dev); 2251 put_device(&sdkp->dev);
2244} 2252}
2245 2253
@@ -2317,14 +2325,15 @@ static int sd_probe(struct device *dev)
2317 } 2325 }
2318 2326
2319 device_initialize(&sdkp->dev); 2327 device_initialize(&sdkp->dev);
2320 sdkp->dev.parent = &sdp->sdev_gendev; 2328 sdkp->dev.parent = dev;
2321 sdkp->dev.class = &sd_disk_class; 2329 sdkp->dev.class = &sd_disk_class;
2322 dev_set_name(&sdkp->dev, dev_name(&sdp->sdev_gendev)); 2330 dev_set_name(&sdkp->dev, dev_name(dev));
2323 2331
2324 if (device_add(&sdkp->dev)) 2332 if (device_add(&sdkp->dev))
2325 goto out_free_index; 2333 goto out_free_index;
2326 2334
2327 get_device(&sdp->sdev_gendev); 2335 get_device(dev);
2336 dev_set_drvdata(dev, sdkp);
2328 2337
2329 get_device(&sdkp->dev); /* prevent release before async_schedule */ 2338 get_device(&sdkp->dev); /* prevent release before async_schedule */
2330 async_schedule(sd_probe_async, sdkp); 2339 async_schedule(sd_probe_async, sdkp);
@@ -2358,8 +2367,10 @@ static int sd_remove(struct device *dev)
2358{ 2367{
2359 struct scsi_disk *sdkp; 2368 struct scsi_disk *sdkp;
2360 2369
2361 async_synchronize_full();
2362 sdkp = dev_get_drvdata(dev); 2370 sdkp = dev_get_drvdata(dev);
2371 scsi_autopm_get_device(sdkp->device);
2372
2373 async_synchronize_full();
2363 blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn); 2374 blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn);
2364 device_del(&sdkp->dev); 2375 device_del(&sdkp->dev);
2365 del_gendisk(sdkp->disk); 2376 del_gendisk(sdkp->disk);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index ef752b248c4d..2968c6b83ddb 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -245,6 +245,10 @@ sg_open(struct inode *inode, struct file *filp)
245 if (retval) 245 if (retval)
246 goto sg_put; 246 goto sg_put;
247 247
248 retval = scsi_autopm_get_device(sdp->device);
249 if (retval)
250 goto sdp_put;
251
248 if (!((flags & O_NONBLOCK) || 252 if (!((flags & O_NONBLOCK) ||
249 scsi_block_when_processing_errors(sdp->device))) { 253 scsi_block_when_processing_errors(sdp->device))) {
250 retval = -ENXIO; 254 retval = -ENXIO;
@@ -302,8 +306,11 @@ sg_open(struct inode *inode, struct file *filp)
302 } 306 }
303 retval = 0; 307 retval = 0;
304error_out: 308error_out:
305 if (retval) 309 if (retval) {
310 scsi_autopm_put_device(sdp->device);
311sdp_put:
306 scsi_device_put(sdp->device); 312 scsi_device_put(sdp->device);
313 }
307sg_put: 314sg_put:
308 if (sdp) 315 if (sdp)
309 sg_put_dev(sdp); 316 sg_put_dev(sdp);
@@ -327,6 +334,7 @@ sg_release(struct inode *inode, struct file *filp)
327 sdp->exclude = 0; 334 sdp->exclude = 0;
328 wake_up_interruptible(&sdp->o_excl_wait); 335 wake_up_interruptible(&sdp->o_excl_wait);
329 336
337 scsi_autopm_put_device(sdp->device);
330 kref_put(&sfp->f_ref, sg_remove_sfp); 338 kref_put(&sfp->f_ref, sg_remove_sfp);
331 return 0; 339 return 0;
332} 340}
@@ -729,6 +737,8 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
729 return k; /* probably out of space --> ENOMEM */ 737 return k; /* probably out of space --> ENOMEM */
730 } 738 }
731 if (sdp->detached) { 739 if (sdp->detached) {
740 if (srp->bio)
741 blk_end_request_all(srp->rq, -EIO);
732 sg_finish_rem_req(srp); 742 sg_finish_rem_req(srp);
733 return -ENODEV; 743 return -ENODEV;
734 } 744 }
diff --git a/include/scsi/fc/fc_els.h b/include/scsi/fc/fc_els.h
index f94328132a26..481abbd48e39 100644
--- a/include/scsi/fc/fc_els.h
+++ b/include/scsi/fc/fc_els.h
@@ -191,6 +191,7 @@ enum fc_els_rjt_reason {
191 ELS_RJT_UNAB = 0x09, /* unable to perform command request */ 191 ELS_RJT_UNAB = 0x09, /* unable to perform command request */
192 ELS_RJT_UNSUP = 0x0b, /* command not supported */ 192 ELS_RJT_UNSUP = 0x0b, /* command not supported */
193 ELS_RJT_INPROG = 0x0e, /* command already in progress */ 193 ELS_RJT_INPROG = 0x0e, /* command already in progress */
194 ELS_RJT_FIP = 0x20, /* FIP error */
194 ELS_RJT_VENDOR = 0xff, /* vendor specific error */ 195 ELS_RJT_VENDOR = 0xff, /* vendor specific error */
195}; 196};
196 197
@@ -212,6 +213,7 @@ enum fc_els_rjt_explan {
212 ELS_EXPL_UNAB_DATA = 0x2a, /* unable to supply requested data */ 213 ELS_EXPL_UNAB_DATA = 0x2a, /* unable to supply requested data */
213 ELS_EXPL_UNSUPR = 0x2c, /* Request not supported */ 214 ELS_EXPL_UNSUPR = 0x2c, /* Request not supported */
214 ELS_EXPL_INV_LEN = 0x2d, /* Invalid payload length */ 215 ELS_EXPL_INV_LEN = 0x2d, /* Invalid payload length */
216 ELS_EXPL_NOT_NEIGHBOR = 0x62, /* VN2VN_Port not in neighbor set */
215 /* TBD - above definitions incomplete */ 217 /* TBD - above definitions incomplete */
216}; 218};
217 219
@@ -405,6 +407,15 @@ struct fc_els_prli {
405}; 407};
406 408
407/* 409/*
410 * ELS_PRLO - Process logout request and response.
411 */
412struct fc_els_prlo {
413 __u8 prlo_cmd; /* command */
414 __u8 prlo_obs; /* obsolete, but shall be set to 10h */
415 __be16 prlo_len; /* payload length */
416};
417
418/*
408 * ELS_ADISC payload 419 * ELS_ADISC payload
409 */ 420 */
410struct fc_els_adisc { 421struct fc_els_adisc {
diff --git a/include/scsi/fc/fc_fcoe.h b/include/scsi/fc/fc_fcoe.h
index e6ad3d2ae475..d5dcd6062815 100644
--- a/include/scsi/fc/fc_fcoe.h
+++ b/include/scsi/fc/fc_fcoe.h
@@ -22,23 +22,18 @@
22 22
23/* 23/*
24 * FCoE - Fibre Channel over Ethernet. 24 * FCoE - Fibre Channel over Ethernet.
25 * See T11 FC-BB-5 Rev 2.00 (09-056v5.pdf)
25 */ 26 */
26 27
27/* 28/*
28 * FC_FCOE_OUI hasn't been standardized yet. XXX TBD. 29 * Default FC_FCOE_OUI / FC-MAP value.
29 */ 30 */
30#ifndef FC_FCOE_OUI 31#define FC_FCOE_OUI 0x0efc00 /* upper 24 bits of FCOE MAC */
31#define FC_FCOE_OUI 0x0efc00 /* upper 24 bits of FCOE dest MAC TBD */
32#endif
33 32
34/* 33/*
35 * The destination MAC address for the fabric login may get a different OUI. 34 * Fabric Login (FLOGI) MAC for non-FIP use. Non-FIP use is deprecated.
36 * This isn't standardized yet.
37 */ 35 */
38#ifndef FC_FCOE_FLOGI_MAC
39/* gateway MAC - TBD */
40#define FC_FCOE_FLOGI_MAC { 0x0e, 0xfc, 0x00, 0xff, 0xff, 0xfe } 36#define FC_FCOE_FLOGI_MAC { 0x0e, 0xfc, 0x00, 0xff, 0xff, 0xfe }
41#endif
42 37
43#define FC_FCOE_VER 0 /* version */ 38#define FC_FCOE_VER 0 /* version */
44 39
@@ -51,8 +46,6 @@
51 46
52/* 47/*
53 * FCoE frame header - 14 bytes 48 * FCoE frame header - 14 bytes
54 *
55 * This is the August 2007 version of the FCoE header as defined by T11.
56 * This follows the VLAN header, which includes the ethertype. 49 * This follows the VLAN header, which includes the ethertype.
57 */ 50 */
58struct fcoe_hdr { 51struct fcoe_hdr {
diff --git a/include/scsi/fc/fc_fip.h b/include/scsi/fc/fc_fip.h
index 17baa19380f0..ae25d4ab2548 100644
--- a/include/scsi/fc/fc_fip.h
+++ b/include/scsi/fc/fc_fip.h
@@ -17,9 +17,12 @@
17#ifndef _FC_FIP_H_ 17#ifndef _FC_FIP_H_
18#define _FC_FIP_H_ 18#define _FC_FIP_H_
19 19
20#include <scsi/fc/fc_ns.h>
21
20/* 22/*
21 * This version is based on: 23 * This version is based on:
22 * http://www.t11.org/ftp/t11/pub/fc/bb-5/08-543v1.pdf 24 * http://www.t11.org/ftp/t11/pub/fc/bb-5/08-543v1.pdf
25 * and T11 FC-BB-6 10-019v4.pdf (June 2010 VN2VN proposal)
23 */ 26 */
24 27
25#define FIP_DEF_PRI 128 /* default selection priority */ 28#define FIP_DEF_PRI 128 /* default selection priority */
@@ -29,11 +32,24 @@
29#define FIP_FCF_FUZZ 100 /* random time added by FCF (mS) */ 32#define FIP_FCF_FUZZ 100 /* random time added by FCF (mS) */
30 33
31/* 34/*
35 * VN2VN proposed-standard values.
36 */
37#define FIP_VN_FC_MAP 0x0efd00 /* MAC OUI for VN2VN use */
38#define FIP_VN_PROBE_WAIT 100 /* interval between VN2VN probes (ms) */
39#define FIP_VN_ANN_WAIT 400 /* interval between VN2VN announcements (ms) */
40#define FIP_VN_RLIM_INT 10000 /* interval between probes when rate limited */
41#define FIP_VN_RLIM_COUNT 10 /* number of probes before rate limiting */
42#define FIP_VN_BEACON_INT 8000 /* interval between VN2VN beacons */
43#define FIP_VN_BEACON_FUZZ 100 /* random time to add to beacon period (ms) */
44
45/*
32 * Multicast MAC addresses. T11-adopted. 46 * Multicast MAC addresses. T11-adopted.
33 */ 47 */
34#define FIP_ALL_FCOE_MACS ((u8[6]) { 1, 0x10, 0x18, 1, 0, 0 }) 48#define FIP_ALL_FCOE_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 0 })
35#define FIP_ALL_ENODE_MACS ((u8[6]) { 1, 0x10, 0x18, 1, 0, 1 }) 49#define FIP_ALL_ENODE_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 1 })
36#define FIP_ALL_FCF_MACS ((u8[6]) { 1, 0x10, 0x18, 1, 0, 2 }) 50#define FIP_ALL_FCF_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 2 })
51#define FIP_ALL_VN2VN_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 4 })
52#define FIP_ALL_P2P_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 5 })
37 53
38#define FIP_VER 1 /* version for fip_header */ 54#define FIP_VER 1 /* version for fip_header */
39 55
@@ -60,6 +76,7 @@ enum fip_opcode {
60 FIP_OP_LS = 2, /* Link Service request or reply */ 76 FIP_OP_LS = 2, /* Link Service request or reply */
61 FIP_OP_CTRL = 3, /* Keep Alive / Link Reset */ 77 FIP_OP_CTRL = 3, /* Keep Alive / Link Reset */
62 FIP_OP_VLAN = 4, /* VLAN discovery */ 78 FIP_OP_VLAN = 4, /* VLAN discovery */
79 FIP_OP_VN2VN = 5, /* VN2VN operation */
63 FIP_OP_VENDOR_MIN = 0xfff8, /* min vendor-specific opcode */ 80 FIP_OP_VENDOR_MIN = 0xfff8, /* min vendor-specific opcode */
64 FIP_OP_VENDOR_MAX = 0xfffe, /* max vendor-specific opcode */ 81 FIP_OP_VENDOR_MAX = 0xfffe, /* max vendor-specific opcode */
65}; 82};
@@ -97,11 +114,23 @@ enum fip_vlan_subcode {
97}; 114};
98 115
99/* 116/*
117 * Subcodes for FIP_OP_VN2VN.
118 */
119enum fip_vn2vn_subcode {
120 FIP_SC_VN_PROBE_REQ = 1, /* probe request */
121 FIP_SC_VN_PROBE_REP = 2, /* probe reply */
122 FIP_SC_VN_CLAIM_NOTIFY = 3, /* claim notification */
123 FIP_SC_VN_CLAIM_REP = 4, /* claim response */
124 FIP_SC_VN_BEACON = 5, /* beacon */
125};
126
127/*
100 * flags in header fip_flags. 128 * flags in header fip_flags.
101 */ 129 */
102enum fip_flag { 130enum fip_flag {
103 FIP_FL_FPMA = 0x8000, /* supports FPMA fabric-provided MACs */ 131 FIP_FL_FPMA = 0x8000, /* supports FPMA fabric-provided MACs */
104 FIP_FL_SPMA = 0x4000, /* supports SPMA server-provided MACs */ 132 FIP_FL_SPMA = 0x4000, /* supports SPMA server-provided MACs */
133 FIP_FL_REC_OR_P2P = 0x0008, /* configured addr or point-to-point */
105 FIP_FL_AVAIL = 0x0004, /* available for FLOGI/ELP */ 134 FIP_FL_AVAIL = 0x0004, /* available for FLOGI/ELP */
106 FIP_FL_SOL = 0x0002, /* this is a solicited message */ 135 FIP_FL_SOL = 0x0002, /* this is a solicited message */
107 FIP_FL_FPORT = 0x0001, /* sent from an F port */ 136 FIP_FL_FPORT = 0x0001, /* sent from an F port */
@@ -130,6 +159,7 @@ enum fip_desc_type {
130 FIP_DT_FKA = 12, /* advertisement keep-alive period */ 159 FIP_DT_FKA = 12, /* advertisement keep-alive period */
131 FIP_DT_VENDOR = 13, /* vendor ID */ 160 FIP_DT_VENDOR = 13, /* vendor ID */
132 FIP_DT_VLAN = 14, /* vlan number */ 161 FIP_DT_VLAN = 14, /* vlan number */
162 FIP_DT_FC4F = 15, /* FC-4 features */
133 FIP_DT_LIMIT, /* max defined desc_type + 1 */ 163 FIP_DT_LIMIT, /* max defined desc_type + 1 */
134 FIP_DT_VENDOR_BASE = 128, /* first vendor-specific desc_type */ 164 FIP_DT_VENDOR_BASE = 128, /* first vendor-specific desc_type */
135}; 165};
@@ -229,6 +259,16 @@ enum fip_fka_flags {
229/* FIP_DT_FKA flags */ 259/* FIP_DT_FKA flags */
230 260
231/* 261/*
262 * FIP_DT_FC4F - FC-4 features.
263 */
264struct fip_fc4_feat {
265 struct fip_desc fd_desc;
266 __u8 fd_resvd[2];
267 struct fc_ns_fts fd_fts;
268 struct fc_ns_ff fd_ff;
269} __attribute__((packed));
270
271/*
232 * FIP_DT_VENDOR descriptor. 272 * FIP_DT_VENDOR descriptor.
233 */ 273 */
234struct fip_vendor_desc { 274struct fip_vendor_desc {
diff --git a/include/scsi/fc/fc_ns.h b/include/scsi/fc/fc_ns.h
index e7d3ac497d7d..185015dd1166 100644
--- a/include/scsi/fc/fc_ns.h
+++ b/include/scsi/fc/fc_ns.h
@@ -100,6 +100,13 @@ struct fc_ns_fts {
100}; 100};
101 101
102/* 102/*
103 * FC4-features object.
104 */
105struct fc_ns_ff {
106 __be32 fd_feat[FC_NS_TYPES * 4 / FC_NS_BPW]; /* 4-bits per FC-type */
107};
108
109/*
103 * GID_PT request. 110 * GID_PT request.
104 */ 111 */
105struct fc_ns_gid_pt { 112struct fc_ns_gid_pt {
diff --git a/include/scsi/fc_encode.h b/include/scsi/fc_encode.h
index 9b4867c9c2d2..6d293c846a46 100644
--- a/include/scsi/fc_encode.h
+++ b/include/scsi/fc_encode.h
@@ -21,6 +21,13 @@
21#define _FC_ENCODE_H_ 21#define _FC_ENCODE_H_
22#include <asm/unaligned.h> 22#include <asm/unaligned.h>
23 23
24/*
25 * F_CTL values for simple requests and responses.
26 */
27#define FC_FCTL_REQ (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)
28#define FC_FCTL_RESP (FC_FC_EX_CTX | FC_FC_LAST_SEQ | \
29 FC_FC_END_SEQ | FC_FC_SEQ_INIT)
30
24struct fc_ns_rft { 31struct fc_ns_rft {
25 struct fc_ns_fid fid; /* port ID object */ 32 struct fc_ns_fid fid; /* port ID object */
26 struct fc_ns_fts fts; /* FC4-types object */ 33 struct fc_ns_fts fts; /* FC4-types object */
diff --git a/include/scsi/fc_frame.h b/include/scsi/fc_frame.h
index 4d3e9c7b7c57..4ad02041b667 100644
--- a/include/scsi/fc_frame.h
+++ b/include/scsi/fc_frame.h
@@ -30,6 +30,23 @@
30 30
31#include <linux/if_ether.h> 31#include <linux/if_ether.h>
32 32
33/* some helpful macros */
34
35#define ntohll(x) be64_to_cpu(x)
36#define htonll(x) cpu_to_be64(x)
37
38static inline u32 ntoh24(const u8 *p)
39{
40 return (p[0] << 16) | (p[1] << 8) | p[2];
41}
42
43static inline void hton24(u8 *p, u32 v)
44{
45 p[0] = (v >> 16) & 0xff;
46 p[1] = (v >> 8) & 0xff;
47 p[2] = v & 0xff;
48}
49
33/* 50/*
34 * The fc_frame interface is used to pass frame data between functions. 51 * The fc_frame interface is used to pass frame data between functions.
35 * The frame includes the data buffer, length, and SOF / EOF delimiter types. 52 * The frame includes the data buffer, length, and SOF / EOF delimiter types.
@@ -51,6 +68,7 @@
51#define fr_sof(fp) (fr_cb(fp)->fr_sof) 68#define fr_sof(fp) (fr_cb(fp)->fr_sof)
52#define fr_eof(fp) (fr_cb(fp)->fr_eof) 69#define fr_eof(fp) (fr_cb(fp)->fr_eof)
53#define fr_flags(fp) (fr_cb(fp)->fr_flags) 70#define fr_flags(fp) (fr_cb(fp)->fr_flags)
71#define fr_encaps(fp) (fr_cb(fp)->fr_encaps)
54#define fr_max_payload(fp) (fr_cb(fp)->fr_max_payload) 72#define fr_max_payload(fp) (fr_cb(fp)->fr_max_payload)
55#define fr_fsp(fp) (fr_cb(fp)->fr_fsp) 73#define fr_fsp(fp) (fr_cb(fp)->fr_fsp)
56#define fr_crc(fp) (fr_cb(fp)->fr_crc) 74#define fr_crc(fp) (fr_cb(fp)->fr_crc)
@@ -66,9 +84,10 @@ struct fcoe_rcv_info {
66 struct fc_fcp_pkt *fr_fsp; /* for the corresponding fcp I/O */ 84 struct fc_fcp_pkt *fr_fsp; /* for the corresponding fcp I/O */
67 u32 fr_crc; 85 u32 fr_crc;
68 u16 fr_max_payload; /* max FC payload */ 86 u16 fr_max_payload; /* max FC payload */
69 enum fc_sof fr_sof; /* start of frame delimiter */ 87 u8 fr_sof; /* start of frame delimiter */
70 enum fc_eof fr_eof; /* end of frame delimiter */ 88 u8 fr_eof; /* end of frame delimiter */
71 u8 fr_flags; /* flags - see below */ 89 u8 fr_flags; /* flags - see below */
90 u8 fr_encaps; /* LLD encapsulation info (e.g. FIP) */
72 u8 granted_mac[ETH_ALEN]; /* FCoE MAC address */ 91 u8 granted_mac[ETH_ALEN]; /* FCoE MAC address */
73}; 92};
74 93
@@ -97,6 +116,7 @@ static inline void fc_frame_init(struct fc_frame *fp)
97 fr_dev(fp) = NULL; 116 fr_dev(fp) = NULL;
98 fr_seq(fp) = NULL; 117 fr_seq(fp) = NULL;
99 fr_flags(fp) = 0; 118 fr_flags(fp) = 0;
119 fr_encaps(fp) = 0;
100} 120}
101 121
102struct fc_frame *fc_frame_alloc_fill(struct fc_lport *, size_t payload_len); 122struct fc_frame *fc_frame_alloc_fill(struct fc_lport *, size_t payload_len);
@@ -136,13 +156,39 @@ static inline int fc_frame_is_linear(struct fc_frame *fp)
136 156
137/* 157/*
138 * Get frame header from message in fc_frame structure. 158 * Get frame header from message in fc_frame structure.
159 * This version doesn't do a length check.
160 */
161static inline
162struct fc_frame_header *__fc_frame_header_get(const struct fc_frame *fp)
163{
164 return (struct fc_frame_header *)fr_hdr(fp);
165}
166
167/*
168 * Get frame header from message in fc_frame structure.
139 * This hides a cast and provides a place to add some checking. 169 * This hides a cast and provides a place to add some checking.
140 */ 170 */
141static inline 171static inline
142struct fc_frame_header *fc_frame_header_get(const struct fc_frame *fp) 172struct fc_frame_header *fc_frame_header_get(const struct fc_frame *fp)
143{ 173{
144 WARN_ON(fr_len(fp) < sizeof(struct fc_frame_header)); 174 WARN_ON(fr_len(fp) < sizeof(struct fc_frame_header));
145 return (struct fc_frame_header *) fr_hdr(fp); 175 return __fc_frame_header_get(fp);
176}
177
178/*
179 * Get source FC_ID (S_ID) from frame header in message.
180 */
181static inline u32 fc_frame_sid(const struct fc_frame *fp)
182{
183 return ntoh24(__fc_frame_header_get(fp)->fh_s_id);
184}
185
186/*
187 * Get destination FC_ID (D_ID) from frame header in message.
188 */
189static inline u32 fc_frame_did(const struct fc_frame *fp)
190{
191 return ntoh24(__fc_frame_header_get(fp)->fh_d_id);
146} 192}
147 193
148/* 194/*
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index 66d377b9c72b..a8631acd37c3 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -313,6 +313,7 @@ enum iscsi_param {
313 ISCSI_PARAM_INITIATOR_NAME, 313 ISCSI_PARAM_INITIATOR_NAME,
314 314
315 ISCSI_PARAM_TGT_RESET_TMO, 315 ISCSI_PARAM_TGT_RESET_TMO,
316 ISCSI_PARAM_TARGET_ALIAS,
316 /* must always be last */ 317 /* must always be last */
317 ISCSI_PARAM_MAX, 318 ISCSI_PARAM_MAX,
318}; 319};
@@ -353,6 +354,7 @@ enum iscsi_param {
353#define ISCSI_ISID (1ULL << ISCSI_PARAM_ISID) 354#define ISCSI_ISID (1ULL << ISCSI_PARAM_ISID)
354#define ISCSI_INITIATOR_NAME (1ULL << ISCSI_PARAM_INITIATOR_NAME) 355#define ISCSI_INITIATOR_NAME (1ULL << ISCSI_PARAM_INITIATOR_NAME)
355#define ISCSI_TGT_RESET_TMO (1ULL << ISCSI_PARAM_TGT_RESET_TMO) 356#define ISCSI_TGT_RESET_TMO (1ULL << ISCSI_PARAM_TGT_RESET_TMO)
357#define ISCSI_TARGET_ALIAS (1ULL << ISCSI_PARAM_TARGET_ALIAS)
356 358
357/* iSCSI HBA params */ 359/* iSCSI HBA params */
358enum iscsi_host_param { 360enum iscsi_host_param {
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 7495c0ba67ee..14be49b44e84 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -42,24 +42,6 @@
42#define FC_EX_TIMEOUT 1 /* Exchange timeout */ 42#define FC_EX_TIMEOUT 1 /* Exchange timeout */
43#define FC_EX_CLOSED 2 /* Exchange closed */ 43#define FC_EX_CLOSED 2 /* Exchange closed */
44 44
45/* some helpful macros */
46
47#define ntohll(x) be64_to_cpu(x)
48#define htonll(x) cpu_to_be64(x)
49
50
51static inline u32 ntoh24(const u8 *p)
52{
53 return (p[0] << 16) | (p[1] << 8) | p[2];
54}
55
56static inline void hton24(u8 *p, u32 v)
57{
58 p[0] = (v >> 16) & 0xff;
59 p[1] = (v >> 8) & 0xff;
60 p[2] = v & 0xff;
61}
62
63/** 45/**
64 * enum fc_lport_state - Local port states 46 * enum fc_lport_state - Local port states
65 * @LPORT_ST_DISABLED: Disabled 47 * @LPORT_ST_DISABLED: Disabled
@@ -97,25 +79,25 @@ enum fc_disc_event {
97/** 79/**
98 * enum fc_rport_state - Remote port states 80 * enum fc_rport_state - Remote port states
99 * @RPORT_ST_INIT: Initialized 81 * @RPORT_ST_INIT: Initialized
82 * @RPORT_ST_FLOGI: Waiting for FLOGI completion for point-to-multipoint
83 * @RPORT_ST_PLOGI_WAIT: Waiting for peer to login for point-to-multipoint
100 * @RPORT_ST_PLOGI: Waiting for PLOGI completion 84 * @RPORT_ST_PLOGI: Waiting for PLOGI completion
101 * @RPORT_ST_PRLI: Waiting for PRLI completion 85 * @RPORT_ST_PRLI: Waiting for PRLI completion
102 * @RPORT_ST_RTV: Waiting for RTV completion 86 * @RPORT_ST_RTV: Waiting for RTV completion
103 * @RPORT_ST_READY: Ready for use 87 * @RPORT_ST_READY: Ready for use
104 * @RPORT_ST_LOGO: Remote port logout (LOGO) sent
105 * @RPORT_ST_ADISC: Discover Address sent 88 * @RPORT_ST_ADISC: Discover Address sent
106 * @RPORT_ST_DELETE: Remote port being deleted 89 * @RPORT_ST_DELETE: Remote port being deleted
107 * @RPORT_ST_RESTART: Remote port being deleted and will restart
108*/ 90*/
109enum fc_rport_state { 91enum fc_rport_state {
110 RPORT_ST_INIT, 92 RPORT_ST_INIT,
93 RPORT_ST_FLOGI,
94 RPORT_ST_PLOGI_WAIT,
111 RPORT_ST_PLOGI, 95 RPORT_ST_PLOGI,
112 RPORT_ST_PRLI, 96 RPORT_ST_PRLI,
113 RPORT_ST_RTV, 97 RPORT_ST_RTV,
114 RPORT_ST_READY, 98 RPORT_ST_READY,
115 RPORT_ST_LOGO,
116 RPORT_ST_ADISC, 99 RPORT_ST_ADISC,
117 RPORT_ST_DELETE, 100 RPORT_ST_DELETE,
118 RPORT_ST_RESTART,
119}; 101};
120 102
121/** 103/**
@@ -173,6 +155,7 @@ struct fc_rport_libfc_priv {
173 u16 flags; 155 u16 flags;
174 #define FC_RP_FLAGS_REC_SUPPORTED (1 << 0) 156 #define FC_RP_FLAGS_REC_SUPPORTED (1 << 0)
175 #define FC_RP_FLAGS_RETRY (1 << 1) 157 #define FC_RP_FLAGS_RETRY (1 << 1)
158 #define FC_RP_STARTED (1 << 2)
176 unsigned int e_d_tov; 159 unsigned int e_d_tov;
177 unsigned int r_a_tov; 160 unsigned int r_a_tov;
178}; 161};
@@ -185,16 +168,18 @@ struct fc_rport_libfc_priv {
185 * @rp_state: Enumeration that tracks progress of PLOGI, PRLI, 168 * @rp_state: Enumeration that tracks progress of PLOGI, PRLI,
186 * and RTV exchanges 169 * and RTV exchanges
187 * @ids: The remote port identifiers and roles 170 * @ids: The remote port identifiers and roles
188 * @flags: REC and RETRY supported flags 171 * @flags: STARTED, REC and RETRY_SUPPORTED flags
189 * @max_seq: Maximum number of concurrent sequences 172 * @max_seq: Maximum number of concurrent sequences
190 * @disc_id: The discovery identifier 173 * @disc_id: The discovery identifier
191 * @maxframe_size: The maximum frame size 174 * @maxframe_size: The maximum frame size
192 * @retries: The retry count for the current state 175 * @retries: The retry count for the current state
176 * @major_retries: The retry count for the entire PLOGI/PRLI state machine
193 * @e_d_tov: Error detect timeout value (in msec) 177 * @e_d_tov: Error detect timeout value (in msec)
194 * @r_a_tov: Resource allocation timeout value (in msec) 178 * @r_a_tov: Resource allocation timeout value (in msec)
195 * @rp_mutex: The mutex that protects the remote port 179 * @rp_mutex: The mutex that protects the remote port
196 * @retry_work: Handle for retries 180 * @retry_work: Handle for retries
197 * @event_callback: Callback when READY, FAILED or LOGO states complete 181 * @event_callback: Callback when READY, FAILED or LOGO states complete
182 * @rcu: Structure used for freeing in an RCU-safe manner
198 */ 183 */
199struct fc_rport_priv { 184struct fc_rport_priv {
200 struct fc_lport *local_port; 185 struct fc_lport *local_port;
@@ -207,6 +192,7 @@ struct fc_rport_priv {
207 u16 disc_id; 192 u16 disc_id;
208 u16 maxframe_size; 193 u16 maxframe_size;
209 unsigned int retries; 194 unsigned int retries;
195 unsigned int major_retries;
210 unsigned int e_d_tov; 196 unsigned int e_d_tov;
211 unsigned int r_a_tov; 197 unsigned int r_a_tov;
212 struct mutex rp_mutex; 198 struct mutex rp_mutex;
@@ -216,6 +202,7 @@ struct fc_rport_priv {
216 struct list_head peers; 202 struct list_head peers;
217 struct work_struct event_work; 203 struct work_struct event_work;
218 u32 supported_classes; 204 u32 supported_classes;
205 struct rcu_head rcu;
219}; 206};
220 207
221/** 208/**
@@ -262,14 +249,12 @@ struct fcoe_dev_stats {
262 249
263/** 250/**
264 * struct fc_seq_els_data - ELS data used for passing ELS specific responses 251 * struct fc_seq_els_data - ELS data used for passing ELS specific responses
265 * @fp: The ELS frame
266 * @reason: The reason for rejection 252 * @reason: The reason for rejection
267 * @explan: The explaination of the rejection 253 * @explan: The explaination of the rejection
268 * 254 *
269 * Mainly used by the exchange manager layer. 255 * Mainly used by the exchange manager layer.
270 */ 256 */
271struct fc_seq_els_data { 257struct fc_seq_els_data {
272 struct fc_frame *fp;
273 enum fc_els_rjt_reason reason; 258 enum fc_els_rjt_reason reason;
274 enum fc_els_rjt_explan explan; 259 enum fc_els_rjt_explan explan;
275}; 260};
@@ -405,6 +390,7 @@ struct fc_seq {
405 * @esb_stat: ESB exchange status 390 * @esb_stat: ESB exchange status
406 * @r_a_tov: Resouce allocation time out value (in msecs) 391 * @r_a_tov: Resouce allocation time out value (in msecs)
407 * @seq_id: The next sequence ID to use 392 * @seq_id: The next sequence ID to use
393 * @encaps: encapsulation information for lower-level driver
408 * @f_ctl: F_CTL flags for the sequence 394 * @f_ctl: F_CTL flags for the sequence
409 * @fh_type: The frame type 395 * @fh_type: The frame type
410 * @class: The class of service 396 * @class: The class of service
@@ -436,6 +422,7 @@ struct fc_exch {
436 u32 esb_stat; 422 u32 esb_stat;
437 u32 r_a_tov; 423 u32 r_a_tov;
438 u8 seq_id; 424 u8 seq_id;
425 u8 encaps;
439 u32 f_ctl; 426 u32 f_ctl;
440 u8 fh_type; 427 u8 fh_type;
441 enum fc_class class; 428 enum fc_class class;
@@ -530,12 +517,11 @@ struct libfc_function_template {
530 struct fc_frame *); 517 struct fc_frame *);
531 518
532 /* 519 /*
533 * Send an ELS response using infomation from a previous 520 * Send an ELS response using infomation from the received frame.
534 * exchange and sequence.
535 * 521 *
536 * STATUS: OPTIONAL 522 * STATUS: OPTIONAL
537 */ 523 */
538 void (*seq_els_rsp_send)(struct fc_seq *, enum fc_els_cmd, 524 void (*seq_els_rsp_send)(struct fc_frame *, enum fc_els_cmd,
539 struct fc_seq_els_data *); 525 struct fc_seq_els_data *);
540 526
541 /* 527 /*
@@ -567,6 +553,13 @@ struct libfc_function_template {
567 struct fc_seq *(*seq_start_next)(struct fc_seq *); 553 struct fc_seq *(*seq_start_next)(struct fc_seq *);
568 554
569 /* 555 /*
556 * Assign a sequence for an incoming request frame.
557 *
558 * STATUS: OPTIONAL
559 */
560 struct fc_seq *(*seq_assign)(struct fc_lport *, struct fc_frame *);
561
562 /*
570 * Reset an exchange manager, completing all sequences and exchanges. 563 * Reset an exchange manager, completing all sequences and exchanges.
571 * If s_id is non-zero, reset only exchanges originating from that FID. 564 * If s_id is non-zero, reset only exchanges originating from that FID.
572 * If d_id is non-zero, reset only exchanges sending to that FID. 565 * If d_id is non-zero, reset only exchanges sending to that FID.
@@ -587,8 +580,7 @@ struct libfc_function_template {
587 * 580 *
588 * STATUS: OPTIONAL 581 * STATUS: OPTIONAL
589 */ 582 */
590 void (*lport_recv)(struct fc_lport *, struct fc_seq *, 583 void (*lport_recv)(struct fc_lport *, struct fc_frame *);
591 struct fc_frame *);
592 584
593 /* 585 /*
594 * Reset the local port. 586 * Reset the local port.
@@ -650,8 +642,7 @@ struct libfc_function_template {
650 * 642 *
651 * STATUS: OPTIONAL 643 * STATUS: OPTIONAL
652 */ 644 */
653 void (*rport_recv_req)(struct fc_seq *, struct fc_frame *, 645 void (*rport_recv_req)(struct fc_lport *, struct fc_frame *);
654 struct fc_lport *);
655 646
656 /* 647 /*
657 * lookup an rport by it's port ID. 648 * lookup an rport by it's port ID.
@@ -697,8 +688,7 @@ struct libfc_function_template {
697 * 688 *
698 * STATUS: OPTIONAL 689 * STATUS: OPTIONAL
699 */ 690 */
700 void (*disc_recv_req)(struct fc_seq *, struct fc_frame *, 691 void (*disc_recv_req)(struct fc_lport *, struct fc_frame *);
701 struct fc_lport *);
702 692
703 /* 693 /*
704 * Start discovery for a local port. 694 * Start discovery for a local port.
@@ -736,7 +726,7 @@ struct libfc_function_template {
736 * @buf_len: Length of the discovery buffer 726 * @buf_len: Length of the discovery buffer
737 * @disc_id: Discovery ID 727 * @disc_id: Discovery ID
738 * @rports: List of discovered remote ports 728 * @rports: List of discovered remote ports
739 * @lport: The local port that discovery is for 729 * @priv: Private pointer for use by discovery code
740 * @disc_mutex: Mutex that protects the discovery context 730 * @disc_mutex: Mutex that protects the discovery context
741 * @partial_buf: Partial name buffer (if names are returned 731 * @partial_buf: Partial name buffer (if names are returned
742 * in multiple frames) 732 * in multiple frames)
@@ -752,7 +742,7 @@ struct fc_disc {
752 u16 disc_id; 742 u16 disc_id;
753 743
754 struct list_head rports; 744 struct list_head rports;
755 struct fc_lport *lport; 745 void *priv;
756 struct mutex disc_mutex; 746 struct mutex disc_mutex;
757 struct fc_gpn_ft_resp partial_buf; 747 struct fc_gpn_ft_resp partial_buf;
758 struct delayed_work disc_work; 748 struct delayed_work disc_work;
@@ -796,6 +786,7 @@ struct fc_disc {
796 * @mfs: The maximum Fibre Channel payload size 786 * @mfs: The maximum Fibre Channel payload size
797 * @max_retry_count: The maximum retry attempts 787 * @max_retry_count: The maximum retry attempts
798 * @max_rport_retry_count: The maximum remote port retry attempts 788 * @max_rport_retry_count: The maximum remote port retry attempts
789 * @rport_priv_size: Size needed by driver after struct fc_rport_priv
799 * @lro_xid: The maximum XID for LRO 790 * @lro_xid: The maximum XID for LRO
800 * @lso_max: The maximum large offload send size 791 * @lso_max: The maximum large offload send size
801 * @fcts: FC-4 type mask 792 * @fcts: FC-4 type mask
@@ -842,9 +833,11 @@ struct fc_lport {
842 u32 lro_enabled:1; 833 u32 lro_enabled:1;
843 u32 does_npiv:1; 834 u32 does_npiv:1;
844 u32 npiv_enabled:1; 835 u32 npiv_enabled:1;
836 u32 point_to_multipoint:1;
845 u32 mfs; 837 u32 mfs;
846 u8 max_retry_count; 838 u8 max_retry_count;
847 u8 max_rport_retry_count; 839 u8 max_rport_retry_count;
840 u16 rport_priv_size;
848 u16 link_speed; 841 u16 link_speed;
849 u16 link_supported_speeds; 842 u16 link_supported_speeds;
850 u16 lro_xid; 843 u16 lro_xid;
@@ -986,6 +979,7 @@ int fc_set_mfs(struct fc_lport *, u32 mfs);
986struct fc_lport *libfc_vport_create(struct fc_vport *, int privsize); 979struct fc_lport *libfc_vport_create(struct fc_vport *, int privsize);
987struct fc_lport *fc_vport_id_lookup(struct fc_lport *, u32 port_id); 980struct fc_lport *fc_vport_id_lookup(struct fc_lport *, u32 port_id);
988int fc_lport_bsg_request(struct fc_bsg_job *); 981int fc_lport_bsg_request(struct fc_bsg_job *);
982void fc_lport_set_local_id(struct fc_lport *, u32 port_id);
989 983
990/* 984/*
991 * REMOTE PORT LAYER 985 * REMOTE PORT LAYER
@@ -998,6 +992,11 @@ void fc_rport_terminate_io(struct fc_rport *);
998 *****************************/ 992 *****************************/
999int fc_disc_init(struct fc_lport *); 993int fc_disc_init(struct fc_lport *);
1000 994
995static inline struct fc_lport *fc_disc_lport(struct fc_disc *disc)
996{
997 return container_of(disc, struct fc_lport, disc);
998}
999
1001/* 1000/*
1002 * FCP LAYER 1001 * FCP LAYER
1003 *****************************/ 1002 *****************************/
@@ -1029,6 +1028,10 @@ struct fc_seq *fc_elsct_send(struct fc_lport *, u32 did,
1029 void *arg, u32 timer_msec); 1028 void *arg, u32 timer_msec);
1030void fc_lport_flogi_resp(struct fc_seq *, struct fc_frame *, void *); 1029void fc_lport_flogi_resp(struct fc_seq *, struct fc_frame *, void *);
1031void fc_lport_logo_resp(struct fc_seq *, struct fc_frame *, void *); 1030void fc_lport_logo_resp(struct fc_seq *, struct fc_frame *, void *);
1031void fc_fill_reply_hdr(struct fc_frame *, const struct fc_frame *,
1032 enum fc_rctl, u32 parm_offset);
1033void fc_fill_hdr(struct fc_frame *, const struct fc_frame *,
1034 enum fc_rctl, u32 f_ctl, u16 seq_cnt, u32 parm_offset);
1032 1035
1033 1036
1034/* 1037/*
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index ec13f51531f8..06f1b5a8ed19 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -26,6 +26,7 @@
26#include <linux/netdevice.h> 26#include <linux/netdevice.h>
27#include <linux/skbuff.h> 27#include <linux/skbuff.h>
28#include <linux/workqueue.h> 28#include <linux/workqueue.h>
29#include <linux/random.h>
29#include <scsi/fc/fc_fcoe.h> 30#include <scsi/fc/fc_fcoe.h>
30#include <scsi/libfc.h> 31#include <scsi/libfc.h>
31 32
@@ -37,6 +38,7 @@
37#define FCOE_CTLR_START_DELAY 2000 /* mS after first adv. to choose FCF */ 38#define FCOE_CTLR_START_DELAY 2000 /* mS after first adv. to choose FCF */
38#define FCOE_CTRL_SOL_TOV 2000 /* min. solicitation interval (mS) */ 39#define FCOE_CTRL_SOL_TOV 2000 /* min. solicitation interval (mS) */
39#define FCOE_CTLR_FCF_LIMIT 20 /* max. number of FCF entries */ 40#define FCOE_CTLR_FCF_LIMIT 20 /* max. number of FCF entries */
41#define FCOE_CTLR_VN2VN_LOGIN_LIMIT 3 /* max. VN2VN rport login retries */
40 42
41/** 43/**
42 * enum fip_state - internal state of FCoE controller. 44 * enum fip_state - internal state of FCoE controller.
@@ -45,6 +47,11 @@
45 * @FIP_ST_AUTO: determining whether to use FIP or non-FIP mode. 47 * @FIP_ST_AUTO: determining whether to use FIP or non-FIP mode.
46 * @FIP_ST_NON_FIP: non-FIP mode selected. 48 * @FIP_ST_NON_FIP: non-FIP mode selected.
47 * @FIP_ST_ENABLED: FIP mode selected. 49 * @FIP_ST_ENABLED: FIP mode selected.
50 * @FIP_ST_VNMP_START: VN2VN multipath mode start, wait
51 * @FIP_ST_VNMP_PROBE1: VN2VN sent first probe, listening
52 * @FIP_ST_VNMP_PROBE2: VN2VN sent second probe, listening
53 * @FIP_ST_VNMP_CLAIM: VN2VN sent claim, waiting for responses
54 * @FIP_ST_VNMP_UP: VN2VN multipath mode operation
48 */ 55 */
49enum fip_state { 56enum fip_state {
50 FIP_ST_DISABLED, 57 FIP_ST_DISABLED,
@@ -52,8 +59,23 @@ enum fip_state {
52 FIP_ST_AUTO, 59 FIP_ST_AUTO,
53 FIP_ST_NON_FIP, 60 FIP_ST_NON_FIP,
54 FIP_ST_ENABLED, 61 FIP_ST_ENABLED,
62 FIP_ST_VNMP_START,
63 FIP_ST_VNMP_PROBE1,
64 FIP_ST_VNMP_PROBE2,
65 FIP_ST_VNMP_CLAIM,
66 FIP_ST_VNMP_UP,
55}; 67};
56 68
69/*
70 * Modes:
71 * The mode is the state that is to be entered after link up.
72 * It must not change after fcoe_ctlr_init() sets it.
73 */
74#define FIP_MODE_AUTO FIP_ST_AUTO
75#define FIP_MODE_NON_FIP FIP_ST_NON_FIP
76#define FIP_MODE_FABRIC FIP_ST_ENABLED
77#define FIP_MODE_VN2VN FIP_ST_VNMP_START
78
57/** 79/**
58 * struct fcoe_ctlr - FCoE Controller and FIP state 80 * struct fcoe_ctlr - FCoE Controller and FIP state
59 * @state: internal FIP state for network link and FIP or non-FIP mode. 81 * @state: internal FIP state for network link and FIP or non-FIP mode.
@@ -70,19 +92,20 @@ enum fip_state {
70 * @timer_work: &work_struct for doing keep-alives and resets. 92 * @timer_work: &work_struct for doing keep-alives and resets.
71 * @recv_work: &work_struct for receiving FIP frames. 93 * @recv_work: &work_struct for receiving FIP frames.
72 * @fip_recv_list: list of received FIP frames. 94 * @fip_recv_list: list of received FIP frames.
95 * @rnd_state: state for pseudo-random number generator.
96 * @port_id: proposed or selected local-port ID.
73 * @user_mfs: configured maximum FC frame size, including FC header. 97 * @user_mfs: configured maximum FC frame size, including FC header.
74 * @flogi_oxid: exchange ID of most recent fabric login. 98 * @flogi_oxid: exchange ID of most recent fabric login.
75 * @flogi_count: number of FLOGI attempts in AUTO mode. 99 * @flogi_count: number of FLOGI attempts in AUTO mode.
76 * @map_dest: use the FC_MAP mode for destination MAC addresses. 100 * @map_dest: use the FC_MAP mode for destination MAC addresses.
77 * @spma: supports SPMA server-provided MACs mode 101 * @spma: supports SPMA server-provided MACs mode
78 * @send_ctlr_ka: need to send controller keep alive 102 * @probe_tries: number of FC_IDs probed
79 * @send_port_ka: need to send port keep alives
80 * @dest_addr: MAC address of the selected FC forwarder. 103 * @dest_addr: MAC address of the selected FC forwarder.
81 * @ctl_src_addr: the native MAC address of our local port. 104 * @ctl_src_addr: the native MAC address of our local port.
82 * @send: LLD-supplied function to handle sending FIP Ethernet frames 105 * @send: LLD-supplied function to handle sending FIP Ethernet frames
83 * @update_mac: LLD-supplied function to handle changes to MAC addresses. 106 * @update_mac: LLD-supplied function to handle changes to MAC addresses.
84 * @get_src_addr: LLD-supplied function to supply a source MAC address. 107 * @get_src_addr: LLD-supplied function to supply a source MAC address.
85 * @lock: lock protecting this structure. 108 * @ctlr_mutex: lock protecting this structure.
86 * 109 *
87 * This structure is used by all FCoE drivers. It contains information 110 * This structure is used by all FCoE drivers. It contains information
88 * needed by all FCoE low-level drivers (LLDs) as well as internal state 111 * needed by all FCoE low-level drivers (LLDs) as well as internal state
@@ -103,21 +126,23 @@ struct fcoe_ctlr {
103 struct work_struct timer_work; 126 struct work_struct timer_work;
104 struct work_struct recv_work; 127 struct work_struct recv_work;
105 struct sk_buff_head fip_recv_list; 128 struct sk_buff_head fip_recv_list;
129
130 struct rnd_state rnd_state;
131 u32 port_id;
132
106 u16 user_mfs; 133 u16 user_mfs;
107 u16 flogi_oxid; 134 u16 flogi_oxid;
108 u8 flogi_count; 135 u8 flogi_count;
109 u8 reset_req;
110 u8 map_dest; 136 u8 map_dest;
111 u8 spma; 137 u8 spma;
112 u8 send_ctlr_ka; 138 u8 probe_tries;
113 u8 send_port_ka;
114 u8 dest_addr[ETH_ALEN]; 139 u8 dest_addr[ETH_ALEN];
115 u8 ctl_src_addr[ETH_ALEN]; 140 u8 ctl_src_addr[ETH_ALEN];
116 141
117 void (*send)(struct fcoe_ctlr *, struct sk_buff *); 142 void (*send)(struct fcoe_ctlr *, struct sk_buff *);
118 void (*update_mac)(struct fc_lport *, u8 *addr); 143 void (*update_mac)(struct fc_lport *, u8 *addr);
119 u8 * (*get_src_addr)(struct fc_lport *); 144 u8 * (*get_src_addr)(struct fc_lport *);
120 spinlock_t lock; 145 struct mutex ctlr_mutex;
121}; 146};
122 147
123/** 148/**
@@ -156,8 +181,26 @@ struct fcoe_fcf {
156 u8 fd_flags:1; 181 u8 fd_flags:1;
157}; 182};
158 183
184/**
185 * struct fcoe_rport - VN2VN remote port
186 * @time: time of create or last beacon packet received from node
187 * @fcoe_len: max FCoE frame size, not including VLAN or Ethernet headers
188 * @flags: flags from probe or claim
189 * @login_count: number of unsuccessful rport logins to this port
190 * @enode_mac: E_Node control MAC address
191 * @vn_mac: VN_Node assigned MAC address for data
192 */
193struct fcoe_rport {
194 unsigned long time;
195 u16 fcoe_len;
196 u16 flags;
197 u8 login_count;
198 u8 enode_mac[ETH_ALEN];
199 u8 vn_mac[ETH_ALEN];
200};
201
159/* FIP API functions */ 202/* FIP API functions */
160void fcoe_ctlr_init(struct fcoe_ctlr *); 203void fcoe_ctlr_init(struct fcoe_ctlr *, enum fip_state);
161void fcoe_ctlr_destroy(struct fcoe_ctlr *); 204void fcoe_ctlr_destroy(struct fcoe_ctlr *);
162void fcoe_ctlr_link_up(struct fcoe_ctlr *); 205void fcoe_ctlr_link_up(struct fcoe_ctlr *);
163int fcoe_ctlr_link_down(struct fcoe_ctlr *); 206int fcoe_ctlr_link_down(struct fcoe_ctlr *);
@@ -168,6 +211,17 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *,
168 211
169/* libfcoe funcs */ 212/* libfcoe funcs */
170u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int); 213u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int);
171int fcoe_libfc_config(struct fc_lport *, struct libfc_function_template *); 214int fcoe_libfc_config(struct fc_lport *, struct fcoe_ctlr *,
215 const struct libfc_function_template *, int init_fcp);
216
217/**
218 * is_fip_mode() - returns true if FIP mode selected.
219 * @fip: FCoE controller.
220 */
221static inline bool is_fip_mode(struct fcoe_ctlr *fip)
222{
223 return fip->state == FIP_ST_ENABLED;
224}
225
172 226
173#endif /* _LIBFCOE_H */ 227#endif /* _LIBFCOE_H */
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 3b586859669c..d06e13be717b 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -422,16 +422,7 @@ enum service_response {
422}; 422};
423 423
424enum exec_status { 424enum exec_status {
425 SAM_GOOD = 0, 425 /* The SAM_STAT_.. codes fit in the lower 6 bits */
426 SAM_CHECK_COND = 2,
427 SAM_COND_MET = 4,
428 SAM_BUSY = 8,
429 SAM_INTERMEDIATE = 0x10,
430 SAM_IM_COND_MET = 0x12,
431 SAM_RESV_CONFLICT= 0x14,
432 SAM_TASK_SET_FULL= 0x28,
433 SAM_ACA_ACTIVE = 0x30,
434 SAM_TASK_ABORTED = 0x40,
435 426
436 SAS_DEV_NO_RESPONSE = 0x80, 427 SAS_DEV_NO_RESPONSE = 0x80,
437 SAS_DATA_UNDERRUN, 428 SAS_DATA_UNDERRUN,
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index d80b6dbed1ca..50cb34ffef11 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -381,6 +381,14 @@ extern int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
381 struct scsi_sense_hdr *, int timeout, int retries, 381 struct scsi_sense_hdr *, int timeout, int retries,
382 int *resid); 382 int *resid);
383 383
384#ifdef CONFIG_PM_RUNTIME
385extern int scsi_autopm_get_device(struct scsi_device *);
386extern void scsi_autopm_put_device(struct scsi_device *);
387#else
388static inline int scsi_autopm_get_device(struct scsi_device *d) { return 0; }
389static inline void scsi_autopm_put_device(struct scsi_device *d) {}
390#endif /* CONFIG_PM_RUNTIME */
391
384static inline int __must_check scsi_device_reprobe(struct scsi_device *sdev) 392static inline int __must_check scsi_device_reprobe(struct scsi_device *sdev)
385{ 393{
386 return device_reprobe(&sdev->sdev_gendev); 394 return device_reprobe(&sdev->sdev_gendev);
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 349c7f30720d..7fff94b3b2a8 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -32,6 +32,7 @@ struct scsi_transport_template;
32struct iscsi_transport; 32struct iscsi_transport;
33struct iscsi_endpoint; 33struct iscsi_endpoint;
34struct Scsi_Host; 34struct Scsi_Host;
35struct scsi_cmnd;
35struct iscsi_cls_conn; 36struct iscsi_cls_conn;
36struct iscsi_conn; 37struct iscsi_conn;
37struct iscsi_task; 38struct iscsi_task;
@@ -255,5 +256,6 @@ extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time);
255extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size); 256extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size);
256extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep); 257extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep);
257extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle); 258extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle);
259extern int iscsi_block_scsi_eh(struct scsi_cmnd *cmd);
258 260
259#endif 261#endif