aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-22 20:34:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-22 20:34:15 -0400
commitc70b5296e775cde46cfcb2d860ba160108a5ec7a (patch)
tree30419cb982acca44499236adcca65f2f87698c74 /drivers
parent80c226fbef56576946c9655fcb2ab62e63404d12 (diff)
parent58ff4bd042adf8013c8f70fd03c2c0f8d022e387 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (84 commits) [SCSI] be2iscsi: SGE Len == 64K [SCSI] be2iscsi: Remove premature free of cid [SCSI] be2iscsi: More time for FW [SCSI] libsas: fix bug for vacant phy [SCSI] sd: Fix overflow with big physical blocks [SCSI] st: add MTWEOFI to write filemarks without flushing drive buffer [SCSI] libsas: Don't issue commands to devices that have been hot-removed [SCSI] megaraid_sas: Add Online Controller Reset to MegaRAID SAS drive [SCSI] lpfc 8.3.17: Update lpfc driver version to 8.3.17 [SCSI] lpfc 8.3.17: Replace function reset methodology [SCSI] lpfc 8.3.17: SCSI fixes [SCSI] lpfc 8.3.17: BSG fixes [SCSI] lpfc 8.3.17: SLI Additions and Fixes [SCSI] lpfc 8.3.17: Code Cleanup and Locking fixes [SCSI] zfcp: Remove scsi_cmnd->serial_number from debug traces [SCSI] ipr: fix array error logging [SCSI] aha152x: enable PCMCIA on 64bit [SCSI] scsi_dh_alua: Handle all states correctly [SCSI] cxgb4i: connection and ddp setting update [SCSI] cxgb3i: fixed connection over vlan ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/message/fusion/mptbase.c4
-rw-r--r--drivers/s390/scsi/Makefile5
-rw-r--r--drivers/s390/scsi/zfcp_aux.c126
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c17
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c186
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c32
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h14
-rw-r--r--drivers/s390/scsi/zfcp_def.h78
-rw-r--r--drivers/s390/scsi/zfcp_erp.c631
-rw-r--r--drivers/s390/scsi/zfcp_ext.h63
-rw-r--r--drivers/s390/scsi/zfcp_fc.c2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c609
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c18
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c158
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c221
-rw-r--r--drivers/s390/scsi/zfcp_unit.c244
-rw-r--r--drivers/scsi/Kconfig5
-rw-r--r--drivers/scsi/Makefile3
-rw-r--r--drivers/scsi/aacraid/commctrl.c2
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c4
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c2
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c3
-rw-r--r--drivers/scsi/be2iscsi/be_main.c2
-rw-r--r--drivers/scsi/bfa/Makefile17
-rw-r--r--drivers/scsi/bfa/bfa.h438
-rw-r--r--drivers/scsi/bfa/bfa_callback_priv.h57
-rw-r--r--drivers/scsi/bfa/bfa_cb_ioim.h (renamed from drivers/scsi/bfa/bfa_cb_ioim_macros.h)30
-rw-r--r--drivers/scsi/bfa/bfa_cee.c492
-rw-r--r--drivers/scsi/bfa/bfa_core.c1131
-rw-r--r--drivers/scsi/bfa/bfa_cs.h364
-rw-r--r--drivers/scsi/bfa/bfa_csdebug.c58
-rw-r--r--drivers/scsi/bfa/bfa_defs.h466
-rw-r--r--drivers/scsi/bfa/bfa_defs_fcs.h457
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h1081
-rw-r--r--drivers/scsi/bfa/bfa_drv.c (renamed from drivers/scsi/bfa/bfa_module.c)41
-rw-r--r--drivers/scsi/bfa/bfa_fc.h (renamed from drivers/scsi/bfa/include/protocol/fc.h)1011
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c (renamed from drivers/scsi/bfa/fcbuild.c)293
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.h316
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c3460
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h401
-rw-r--r--drivers/scsi/bfa/bfa_fcpim_priv.h192
-rw-r--r--drivers/scsi/bfa/bfa_fcport.c1962
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c1609
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h779
-rw-r--r--drivers/scsi/bfa/bfa_fcs_fcpim.c (renamed from drivers/scsi/bfa/fcpim.c)237
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c5411
-rw-r--r--drivers/scsi/bfa/bfa_fcs_port.c61
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c (renamed from drivers/scsi/bfa/rport.c)1662
-rw-r--r--drivers/scsi/bfa/bfa_fcs_uf.c99
-rw-r--r--drivers/scsi/bfa/bfa_fcxp.c774
-rw-r--r--drivers/scsi/bfa/bfa_fcxp_priv.h138
-rw-r--r--drivers/scsi/bfa/bfa_fwimg_priv.h44
-rw-r--r--drivers/scsi/bfa/bfa_hw_cb.c8
-rw-r--r--drivers/scsi/bfa/bfa_hw_ct.c11
-rw-r--r--drivers/scsi/bfa/bfa_intr.c270
-rw-r--r--drivers/scsi/bfa/bfa_intr_priv.h117
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c1888
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h248
-rw-r--r--drivers/scsi/bfa/bfa_ioc_cb.c124
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c137
-rw-r--r--drivers/scsi/bfa/bfa_iocfc.c927
-rw-r--r--drivers/scsi/bfa/bfa_iocfc.h184
-rw-r--r--drivers/scsi/bfa/bfa_iocfc_q.c44
-rw-r--r--drivers/scsi/bfa/bfa_ioim.c1364
-rw-r--r--drivers/scsi/bfa/bfa_itnim.c1088
-rw-r--r--drivers/scsi/bfa/bfa_log.c346
-rw-r--r--drivers/scsi/bfa/bfa_log_module.c537
-rw-r--r--drivers/scsi/bfa/bfa_lps.c892
-rw-r--r--drivers/scsi/bfa/bfa_lps_priv.h38
-rw-r--r--drivers/scsi/bfa/bfa_modules.h (renamed from drivers/scsi/bfa/bfa_priv.h)64
-rw-r--r--drivers/scsi/bfa/bfa_modules_priv.h43
-rw-r--r--drivers/scsi/bfa/bfa_os_inc.h144
-rw-r--r--drivers/scsi/bfa/bfa_plog.h (renamed from drivers/scsi/bfa/include/cs/bfa_plog.h)120
-rw-r--r--drivers/scsi/bfa/bfa_port.c134
-rw-r--r--drivers/scsi/bfa/bfa_port.h66
-rw-r--r--drivers/scsi/bfa/bfa_port_priv.h94
-rw-r--r--drivers/scsi/bfa/bfa_rport.c906
-rw-r--r--drivers/scsi/bfa/bfa_rport_priv.h45
-rw-r--r--drivers/scsi/bfa/bfa_sgpg.c226
-rw-r--r--drivers/scsi/bfa/bfa_sgpg_priv.h79
-rw-r--r--drivers/scsi/bfa/bfa_sm.c38
-rw-r--r--drivers/scsi/bfa/bfa_svc.c5423
-rw-r--r--drivers/scsi/bfa/bfa_svc.h657
-rw-r--r--drivers/scsi/bfa/bfa_timer.c90
-rw-r--r--drivers/scsi/bfa/bfa_trcmod_priv.h64
-rw-r--r--drivers/scsi/bfa/bfa_tskim.c690
-rw-r--r--drivers/scsi/bfa/bfa_uf.c343
-rw-r--r--drivers/scsi/bfa/bfa_uf_priv.h47
-rw-r--r--drivers/scsi/bfa/bfad.c1355
-rw-r--r--drivers/scsi/bfa/bfad_attr.c241
-rw-r--r--drivers/scsi/bfa/bfad_attr.h56
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c10
-rw-r--r--drivers/scsi/bfa/bfad_drv.h254
-rw-r--r--drivers/scsi/bfa/bfad_fwimg.c131
-rw-r--r--drivers/scsi/bfa/bfad_im.c257
-rw-r--r--drivers/scsi/bfa/bfad_im.h56
-rw-r--r--drivers/scsi/bfa/bfad_im_compat.h45
-rw-r--r--drivers/scsi/bfa/bfad_intr.c222
-rw-r--r--drivers/scsi/bfa/bfad_ipfc.h42
-rw-r--r--drivers/scsi/bfa/bfad_os.c50
-rw-r--r--drivers/scsi/bfa/bfad_tm.h59
-rw-r--r--drivers/scsi/bfa/bfad_trcmod.h52
-rw-r--r--drivers/scsi/bfa/bfi.h579
-rw-r--r--drivers/scsi/bfa/bfi_cbreg.h (renamed from drivers/scsi/bfa/include/bfi/bfi_cbreg.h)25
-rw-r--r--drivers/scsi/bfa/bfi_ctreg.h627
-rw-r--r--drivers/scsi/bfa/bfi_ms.h765
-rw-r--r--drivers/scsi/bfa/fab.c62
-rw-r--r--drivers/scsi/bfa/fabric.c1323
-rw-r--r--drivers/scsi/bfa/fcbuild.h279
-rw-r--r--drivers/scsi/bfa/fcptm.c68
-rw-r--r--drivers/scsi/bfa/fcs.h30
-rw-r--r--drivers/scsi/bfa/fcs_auth.h37
-rw-r--r--drivers/scsi/bfa/fcs_fabric.h68
-rw-r--r--drivers/scsi/bfa/fcs_fcpim.h39
-rw-r--r--drivers/scsi/bfa/fcs_fcptm.h45
-rw-r--r--drivers/scsi/bfa/fcs_fcxp.h29
-rw-r--r--drivers/scsi/bfa/fcs_lport.h118
-rw-r--r--drivers/scsi/bfa/fcs_ms.h35
-rw-r--r--drivers/scsi/bfa/fcs_port.h31
-rw-r--r--drivers/scsi/bfa/fcs_rport.h61
-rw-r--r--drivers/scsi/bfa/fcs_trcmod.h56
-rw-r--r--drivers/scsi/bfa/fcs_uf.h31
-rw-r--r--drivers/scsi/bfa/fcs_vport.h32
-rw-r--r--drivers/scsi/bfa/fdmi.c1230
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen.h96
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_adapter.h31
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_audit.h31
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_ethport.h35
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_ioc.h45
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_itnim.h33
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_lport.h51
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_port.h57
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_rport.h37
-rw-r--r--drivers/scsi/bfa/include/bfa.h203
-rw-r--r--drivers/scsi/bfa/include/bfa_fcpim.h177
-rw-r--r--drivers/scsi/bfa/include/bfa_fcptm.h47
-rw-r--r--drivers/scsi/bfa/include/bfa_svc.h338
-rw-r--r--drivers/scsi/bfa/include/bfa_timer.h53
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi.h174
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_boot.h34
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_cee.h119
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_ctreg.h640
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_fabric.h92
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_fcpim.h301
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_fcxp.h71
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_ioc.h208
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_iocfc.h179
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_lport.h89
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_lps.h104
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_pbc.h62
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_port.h115
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_pport.h118
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_rport.h104
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_uf.h52
-rw-r--r--drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h40
-rw-r--r--drivers/scsi/bfa/include/cna/cee/bfa_cee.h77
-rw-r--r--drivers/scsi/bfa/include/cna/port/bfa_port.h70
-rw-r--r--drivers/scsi/bfa/include/cna/pstats/ethport_defs.h36
-rw-r--r--drivers/scsi/bfa/include/cna/pstats/phyport_defs.h218
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_checksum.h60
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_debug.h45
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_log.h184
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_perf.h34
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_q.h81
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_sm.h77
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_trc.h176
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_wc.h68
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_adapter.h83
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_aen.h83
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_audit.h38
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_auth.h134
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_boot.h81
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_cee.h157
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_driver.h41
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_ethport.h99
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_fcpim.h45
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_fcport.h88
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_ioc.h158
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h322
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_ipfc.h70
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_itnim.h136
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_led.h35
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_lport.h68
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_mfg.h144
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_pci.h48
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_pm.h33
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_pom.h56
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_port.h248
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_pport.h393
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_qos.h99
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_rport.h199
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_status.h282
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_tin.h118
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h43
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_types.h30
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_version.h22
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_vf.h74
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_vport.h91
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb.h33
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h75
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb_port.h113
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb_rport.h80
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb_vf.h47
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb_vport.h48
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs.h76
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_auth.h82
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h112
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_fcpim.h132
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_fdmi.h63
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h219
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_rport.h105
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_vport.h67
-rw-r--r--drivers/scsi/bfa/include/log/bfa_log_fcs.h28
-rw-r--r--drivers/scsi/bfa/include/log/bfa_log_hal.h36
-rw-r--r--drivers/scsi/bfa/include/log/bfa_log_linux.h62
-rw-r--r--drivers/scsi/bfa/include/log/bfa_log_wdrv.h36
-rw-r--r--drivers/scsi/bfa/include/protocol/ct.h492
-rw-r--r--drivers/scsi/bfa/include/protocol/fc_sp.h224
-rw-r--r--drivers/scsi/bfa/include/protocol/fcp.h184
-rw-r--r--drivers/scsi/bfa/include/protocol/fdmi.h163
-rw-r--r--drivers/scsi/bfa/include/protocol/scsi.h1648
-rw-r--r--drivers/scsi/bfa/include/protocol/types.h42
-rw-r--r--drivers/scsi/bfa/loop.c213
-rw-r--r--drivers/scsi/bfa/lport_api.c303
-rw-r--r--drivers/scsi/bfa/lport_priv.h82
-rw-r--r--drivers/scsi/bfa/ms.c759
-rw-r--r--drivers/scsi/bfa/n2n.c105
-rw-r--r--drivers/scsi/bfa/ns.c1242
-rw-r--r--drivers/scsi/bfa/plog.c184
-rw-r--r--drivers/scsi/bfa/rport_api.c185
-rw-r--r--drivers/scsi/bfa/rport_ftrs.c379
-rw-r--r--drivers/scsi/bfa/scn.c482
-rw-r--r--drivers/scsi/bfa/vfapi.c292
-rw-r--r--drivers/scsi/bfa/vport.c903
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c63
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c62
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c15
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i.h161
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.c773
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.h312
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_init.c132
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c1018
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c1944
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.h243
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_pdu.c495
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_pdu.h59
-rw-r--r--drivers/scsi/cxgbi/Kconfig2
-rw-r--r--drivers/scsi/cxgbi/Makefile2
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/Kbuild (renamed from drivers/scsi/cxgb3i/Kbuild)1
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/Kconfig (renamed from drivers/scsi/cxgb3i/Kconfig)4
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c1465
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.h51
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/Kbuild3
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/Kconfig7
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c1604
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.h43
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c2612
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h745
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c65
-rw-r--r--drivers/scsi/fnic/fnic_main.c15
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c91
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h6
-rw-r--r--drivers/scsi/ipr.c116
-rw-r--r--drivers/scsi/ipr.h17
-rw-r--r--drivers/scsi/libsas/sas_ata.c5
-rw-r--r--drivers/scsi/libsas/sas_expander.c12
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c147
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c213
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c40
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c237
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c756
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h88
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c2
-rw-r--r--drivers/scsi/pcmcia/Kconfig1
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c30
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h15
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c54
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c66
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c431
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c156
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c1
-rw-r--r--drivers/scsi/scsi_debug.c125
-rw-r--r--drivers/scsi/scsi_transport_fc.c163
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/sd.c78
-rw-r--r--drivers/scsi/sd.h6
-rw-r--r--drivers/scsi/st.c15
302 files changed, 39077 insertions, 44889 deletions
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 6837a8ef9371..3e57b61ca446 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -5945,8 +5945,10 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
5945 goto out; 5945 goto out;
5946 5946
5947 mem = kmalloc(iocpage2sz, GFP_KERNEL); 5947 mem = kmalloc(iocpage2sz, GFP_KERNEL);
5948 if (!mem) 5948 if (!mem) {
5949 rc = -ENOMEM;
5949 goto out; 5950 goto out;
5951 }
5950 5952
5951 memcpy(mem, (u8 *)pIoc2, iocpage2sz); 5953 memcpy(mem, (u8 *)pIoc2, iocpage2sz);
5952 ioc->raid_data.pIocPg2 = (IOCPage2_t *) mem; 5954 ioc->raid_data.pIocPg2 = (IOCPage2_t *) mem;
diff --git a/drivers/s390/scsi/Makefile b/drivers/s390/scsi/Makefile
index cb301cc6178c..c454ffebb63e 100644
--- a/drivers/s390/scsi/Makefile
+++ b/drivers/s390/scsi/Makefile
@@ -2,7 +2,8 @@
2# Makefile for the S/390 specific device drivers 2# Makefile for the S/390 specific device drivers
3# 3#
4 4
5zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \ 5zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_cfdc.o zfcp_dbf.o zfcp_erp.o \
6 zfcp_fsf.o zfcp_dbf.o zfcp_sysfs.o zfcp_fc.o zfcp_cfdc.o 6 zfcp_fc.o zfcp_fsf.o zfcp_qdio.o zfcp_scsi.o zfcp_sysfs.o \
7 zfcp_unit.o
7 8
8obj-$(CONFIG_ZFCP) += zfcp.o 9obj-$(CONFIG_ZFCP) += zfcp.o
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 96fa1f536394..044fb22718d2 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -56,7 +56,6 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
56 struct ccw_device *cdev; 56 struct ccw_device *cdev;
57 struct zfcp_adapter *adapter; 57 struct zfcp_adapter *adapter;
58 struct zfcp_port *port; 58 struct zfcp_port *port;
59 struct zfcp_unit *unit;
60 59
61 cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid); 60 cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
62 if (!cdev) 61 if (!cdev)
@@ -72,17 +71,11 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
72 port = zfcp_get_port_by_wwpn(adapter, wwpn); 71 port = zfcp_get_port_by_wwpn(adapter, wwpn);
73 if (!port) 72 if (!port)
74 goto out_port; 73 goto out_port;
74 flush_work(&port->rport_work);
75 75
76 unit = zfcp_unit_enqueue(port, lun); 76 zfcp_unit_add(port, lun);
77 if (IS_ERR(unit))
78 goto out_unit;
79
80 zfcp_erp_unit_reopen(unit, 0, "auidc_1", NULL);
81 zfcp_erp_wait(adapter);
82 flush_work(&unit->scsi_work);
83
84out_unit:
85 put_device(&port->dev); 77 put_device(&port->dev);
78
86out_port: 79out_port:
87 zfcp_ccw_adapter_put(adapter); 80 zfcp_ccw_adapter_put(adapter);
88out_ccw_device: 81out_ccw_device:
@@ -158,6 +151,9 @@ static int __init zfcp_module_init(void)
158 fc_attach_transport(&zfcp_transport_functions); 151 fc_attach_transport(&zfcp_transport_functions);
159 if (!zfcp_data.scsi_transport_template) 152 if (!zfcp_data.scsi_transport_template)
160 goto out_transport; 153 goto out_transport;
154 scsi_transport_reserve_device(zfcp_data.scsi_transport_template,
155 sizeof(struct zfcp_scsi_dev));
156
161 157
162 retval = misc_register(&zfcp_cfdc_misc); 158 retval = misc_register(&zfcp_cfdc_misc);
163 if (retval) { 159 if (retval) {
@@ -211,30 +207,6 @@ static void __exit zfcp_module_exit(void)
211module_exit(zfcp_module_exit); 207module_exit(zfcp_module_exit);
212 208
213/** 209/**
214 * zfcp_get_unit_by_lun - find unit in unit list of port by FCP LUN
215 * @port: pointer to port to search for unit
216 * @fcp_lun: FCP LUN to search for
217 *
218 * Returns: pointer to zfcp_unit or NULL
219 */
220struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun)
221{
222 unsigned long flags;
223 struct zfcp_unit *unit;
224
225 read_lock_irqsave(&port->unit_list_lock, flags);
226 list_for_each_entry(unit, &port->unit_list, list)
227 if (unit->fcp_lun == fcp_lun) {
228 if (!get_device(&unit->dev))
229 unit = NULL;
230 read_unlock_irqrestore(&port->unit_list_lock, flags);
231 return unit;
232 }
233 read_unlock_irqrestore(&port->unit_list_lock, flags);
234 return NULL;
235}
236
237/**
238 * zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn 210 * zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn
239 * @adapter: pointer to adapter to search for port 211 * @adapter: pointer to adapter to search for port
240 * @wwpn: wwpn to search for 212 * @wwpn: wwpn to search for
@@ -259,92 +231,6 @@ struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
259 return NULL; 231 return NULL;
260} 232}
261 233
262/**
263 * zfcp_unit_release - dequeue unit
264 * @dev: pointer to device
265 *
266 * waits until all work is done on unit and removes it then from the unit->list
267 * of the associated port.
268 */
269static void zfcp_unit_release(struct device *dev)
270{
271 struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
272
273 put_device(&unit->port->dev);
274 kfree(unit);
275}
276
277/**
278 * zfcp_unit_enqueue - enqueue unit to unit list of a port.
279 * @port: pointer to port where unit is added
280 * @fcp_lun: FCP LUN of unit to be enqueued
281 * Returns: pointer to enqueued unit on success, ERR_PTR on error
282 *
283 * Sets up some unit internal structures and creates sysfs entry.
284 */
285struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
286{
287 struct zfcp_unit *unit;
288 int retval = -ENOMEM;
289
290 get_device(&port->dev);
291
292 unit = zfcp_get_unit_by_lun(port, fcp_lun);
293 if (unit) {
294 put_device(&unit->dev);
295 retval = -EEXIST;
296 goto err_out;
297 }
298
299 unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
300 if (!unit)
301 goto err_out;
302
303 unit->port = port;
304 unit->fcp_lun = fcp_lun;
305 unit->dev.parent = &port->dev;
306 unit->dev.release = zfcp_unit_release;
307
308 if (dev_set_name(&unit->dev, "0x%016llx",
309 (unsigned long long) fcp_lun)) {
310 kfree(unit);
311 goto err_out;
312 }
313 retval = -EINVAL;
314
315 INIT_WORK(&unit->scsi_work, zfcp_scsi_scan_work);
316
317 spin_lock_init(&unit->latencies.lock);
318 unit->latencies.write.channel.min = 0xFFFFFFFF;
319 unit->latencies.write.fabric.min = 0xFFFFFFFF;
320 unit->latencies.read.channel.min = 0xFFFFFFFF;
321 unit->latencies.read.fabric.min = 0xFFFFFFFF;
322 unit->latencies.cmd.channel.min = 0xFFFFFFFF;
323 unit->latencies.cmd.fabric.min = 0xFFFFFFFF;
324
325 if (device_register(&unit->dev)) {
326 put_device(&unit->dev);
327 goto err_out;
328 }
329
330 if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs))
331 goto err_out_put;
332
333 write_lock_irq(&port->unit_list_lock);
334 list_add_tail(&unit->list, &port->unit_list);
335 write_unlock_irq(&port->unit_list_lock);
336
337 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status);
338
339 return unit;
340
341err_out_put:
342 device_unregister(&unit->dev);
343err_out:
344 put_device(&port->dev);
345 return ERR_PTR(retval);
346}
347
348static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) 234static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
349{ 235{
350 adapter->pool.erp_req = 236 adapter->pool.erp_req =
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index ce1cc7a11fb4..0833c2b51e39 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -46,8 +46,7 @@ static int zfcp_ccw_activate(struct ccw_device *cdev)
46 if (!adapter) 46 if (!adapter)
47 return 0; 47 return 0;
48 48
49 zfcp_erp_modify_adapter_status(adapter, "ccresu1", NULL, 49 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
50 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
51 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 50 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
52 "ccresu2", NULL); 51 "ccresu2", NULL);
53 zfcp_erp_wait(adapter); 52 zfcp_erp_wait(adapter);
@@ -164,14 +163,7 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev)
164 BUG_ON(!zfcp_reqlist_isempty(adapter->req_list)); 163 BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
165 adapter->req_no = 0; 164 adapter->req_no = 0;
166 165
167 zfcp_erp_modify_adapter_status(adapter, "ccsonl1", NULL, 166 zfcp_ccw_activate(cdev);
168 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
169 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
170 "ccsonl2", NULL);
171 zfcp_erp_wait(adapter);
172
173 flush_work(&adapter->scan_work);
174
175 zfcp_ccw_adapter_put(adapter); 167 zfcp_ccw_adapter_put(adapter);
176 return 0; 168 return 0;
177} 169}
@@ -224,9 +216,8 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
224 break; 216 break;
225 case CIO_OPER: 217 case CIO_OPER:
226 dev_info(&cdev->dev, "The FCP device is operational again\n"); 218 dev_info(&cdev->dev, "The FCP device is operational again\n");
227 zfcp_erp_modify_adapter_status(adapter, "ccnoti3", NULL, 219 zfcp_erp_set_adapter_status(adapter,
228 ZFCP_STATUS_COMMON_RUNNING, 220 ZFCP_STATUS_COMMON_RUNNING);
229 ZFCP_SET);
230 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 221 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
231 "ccnoti4", NULL); 222 "ccnoti4", NULL);
232 break; 223 break;
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index 1838cda68ba8..d692e229ecba 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -2,9 +2,10 @@
2 * zfcp device driver 2 * zfcp device driver
3 * 3 *
4 * Userspace interface for accessing the 4 * Userspace interface for accessing the
5 * Access Control Lists / Control File Data Channel 5 * Access Control Lists / Control File Data Channel;
6 * handling of response code and states for ports and LUNs.
6 * 7 *
7 * Copyright IBM Corporation 2008, 2009 8 * Copyright IBM Corporation 2008, 2010
8 */ 9 */
9 10
10#define KMSG_COMPONENT "zfcp" 11#define KMSG_COMPONENT "zfcp"
@@ -261,3 +262,184 @@ struct miscdevice zfcp_cfdc_misc = {
261 .name = "zfcp_cfdc", 262 .name = "zfcp_cfdc",
262 .fops = &zfcp_cfdc_fops, 263 .fops = &zfcp_cfdc_fops,
263}; 264};
265
266/**
267 * zfcp_cfdc_adapter_access_changed - Process change in adapter ACT
268 * @adapter: Adapter where the Access Control Table (ACT) changed
269 *
270 * After a change in the adapter ACT, check if access to any
271 * previously denied resources is now possible.
272 */
273void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter)
274{
275 unsigned long flags;
276 struct zfcp_port *port;
277 struct scsi_device *sdev;
278 struct zfcp_scsi_dev *zfcp_sdev;
279 int status;
280
281 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
282 return;
283
284 read_lock_irqsave(&adapter->port_list_lock, flags);
285 list_for_each_entry(port, &adapter->port_list, list) {
286 status = atomic_read(&port->status);
287 if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||
288 (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
289 zfcp_erp_port_reopen(port,
290 ZFCP_STATUS_COMMON_ERP_FAILED,
291 "cfaac_1", NULL);
292 }
293 read_unlock_irqrestore(&adapter->port_list_lock, flags);
294
295 shost_for_each_device(sdev, port->adapter->scsi_host) {
296 zfcp_sdev = sdev_to_zfcp(sdev);
297 status = atomic_read(&zfcp_sdev->status);
298 if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||
299 (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
300 zfcp_erp_lun_reopen(sdev,
301 ZFCP_STATUS_COMMON_ERP_FAILED,
302 "cfaac_2", NULL);
303 }
304}
305
306static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
307{
308 u16 subtable = table >> 16;
309 u16 rule = table & 0xffff;
310 const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" };
311
312 if (subtable && subtable < ARRAY_SIZE(act_type))
313 dev_warn(&adapter->ccw_device->dev,
314 "Access denied according to ACT rule type %s, "
315 "rule %d\n", act_type[subtable], rule);
316}
317
318/**
319 * zfcp_cfdc_port_denied - Process "access denied" for port
320 * @port: The port where the acces has been denied
321 * @qual: The FSF status qualifier for the access denied FSF status
322 */
323void zfcp_cfdc_port_denied(struct zfcp_port *port,
324 union fsf_status_qual *qual)
325{
326 dev_warn(&port->adapter->ccw_device->dev,
327 "Access denied to port 0x%016Lx\n",
328 (unsigned long long)port->wwpn);
329
330 zfcp_act_eval_err(port->adapter, qual->halfword[0]);
331 zfcp_act_eval_err(port->adapter, qual->halfword[1]);
332 zfcp_erp_set_port_status(port,
333 ZFCP_STATUS_COMMON_ERP_FAILED |
334 ZFCP_STATUS_COMMON_ACCESS_DENIED);
335}
336
337/**
338 * zfcp_cfdc_lun_denied - Process "access denied" for LUN
339 * @sdev: The SCSI device / LUN where the access has been denied
340 * @qual: The FSF status qualifier for the access denied FSF status
341 */
342void zfcp_cfdc_lun_denied(struct scsi_device *sdev,
343 union fsf_status_qual *qual)
344{
345 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
346
347 dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
348 "Access denied to LUN 0x%016Lx on port 0x%016Lx\n",
349 zfcp_scsi_dev_lun(sdev),
350 (unsigned long long)zfcp_sdev->port->wwpn);
351 zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->halfword[0]);
352 zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->halfword[1]);
353 zfcp_erp_set_lun_status(sdev,
354 ZFCP_STATUS_COMMON_ERP_FAILED |
355 ZFCP_STATUS_COMMON_ACCESS_DENIED);
356
357 atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status);
358 atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status);
359}
360
361/**
362 * zfcp_cfdc_lun_shrng_vltn - Evaluate LUN sharing violation status
363 * @sdev: The LUN / SCSI device where sharing violation occurred
364 * @qual: The FSF status qualifier from the LUN sharing violation
365 */
366void zfcp_cfdc_lun_shrng_vltn(struct scsi_device *sdev,
367 union fsf_status_qual *qual)
368{
369 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
370
371 if (qual->word[0])
372 dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
373 "LUN 0x%Lx on port 0x%Lx is already in "
374 "use by CSS%d, MIF Image ID %x\n",
375 zfcp_scsi_dev_lun(sdev),
376 (unsigned long long)zfcp_sdev->port->wwpn,
377 qual->fsf_queue_designator.cssid,
378 qual->fsf_queue_designator.hla);
379 else
380 zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->word[2]);
381
382 zfcp_erp_set_lun_status(sdev,
383 ZFCP_STATUS_COMMON_ERP_FAILED |
384 ZFCP_STATUS_COMMON_ACCESS_DENIED);
385 atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status);
386 atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status);
387}
388
389/**
390 * zfcp_cfdc_open_lun_eval - Eval access ctrl. status for successful "open lun"
391 * @sdev: The SCSI device / LUN where to evaluate the status
392 * @bottom: The qtcb bottom with the status from the "open lun"
393 *
394 * Returns: 0 if LUN is usable, -EACCES if the access control table
395 * reports an unsupported configuration.
396 */
397int zfcp_cfdc_open_lun_eval(struct scsi_device *sdev,
398 struct fsf_qtcb_bottom_support *bottom)
399{
400 int shared, rw;
401 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
402 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
403
404 if ((adapter->connection_features & FSF_FEATURE_NPIV_MODE) ||
405 !(adapter->adapter_features & FSF_FEATURE_LUN_SHARING) ||
406 zfcp_ccw_priv_sch(adapter))
407 return 0;
408
409 shared = !(bottom->lun_access_info & FSF_UNIT_ACCESS_EXCLUSIVE);
410 rw = (bottom->lun_access_info & FSF_UNIT_ACCESS_OUTBOUND_TRANSFER);
411
412 if (shared)
413 atomic_set_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status);
414
415 if (!rw) {
416 atomic_set_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status);
417 dev_info(&adapter->ccw_device->dev, "SCSI device at LUN "
418 "0x%016Lx on port 0x%016Lx opened read-only\n",
419 zfcp_scsi_dev_lun(sdev),
420 (unsigned long long)zfcp_sdev->port->wwpn);
421 }
422
423 if (!shared && !rw) {
424 dev_err(&adapter->ccw_device->dev, "Exclusive read-only access "
425 "not supported (LUN 0x%016Lx, port 0x%016Lx)\n",
426 zfcp_scsi_dev_lun(sdev),
427 (unsigned long long)zfcp_sdev->port->wwpn);
428 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
429 zfcp_erp_lun_shutdown(sdev, 0, "fsouh_6", NULL);
430 return -EACCES;
431 }
432
433 if (shared && rw) {
434 dev_err(&adapter->ccw_device->dev,
435 "Shared read-write access not supported "
436 "(LUN 0x%016Lx, port 0x%016Lx)\n",
437 zfcp_scsi_dev_lun(sdev),
438 (unsigned long long)zfcp_sdev->port->wwpn);
439 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
440 zfcp_erp_lun_shutdown(sdev, 0, "fsosh_8", NULL);
441 return -EACCES;
442 }
443
444 return 0;
445}
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index a86117b0d6e1..2cdd6b28ff7f 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -154,7 +154,6 @@ void _zfcp_dbf_hba_fsf_response(const char *tag2, int level,
154 scsi_cmnd = (struct scsi_cmnd *)fsf_req->data; 154 scsi_cmnd = (struct scsi_cmnd *)fsf_req->data;
155 if (scsi_cmnd) { 155 if (scsi_cmnd) {
156 response->u.fcp.cmnd = (unsigned long)scsi_cmnd; 156 response->u.fcp.cmnd = (unsigned long)scsi_cmnd;
157 response->u.fcp.serial = scsi_cmnd->serial_number;
158 response->u.fcp.data_dir = 157 response->u.fcp.data_dir =
159 qtcb->bottom.io.data_direction; 158 qtcb->bottom.io.data_direction;
160 } 159 }
@@ -330,7 +329,6 @@ static void zfcp_dbf_hba_view_response(char **p,
330 break; 329 break;
331 zfcp_dbf_out(p, "data_direction", "0x%04x", r->u.fcp.data_dir); 330 zfcp_dbf_out(p, "data_direction", "0x%04x", r->u.fcp.data_dir);
332 zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd); 331 zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd);
333 zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial);
334 *p += sprintf(*p, "\n"); 332 *p += sprintf(*p, "\n");
335 break; 333 break;
336 334
@@ -482,7 +480,7 @@ static int zfcp_dbf_rec_view_format(debug_info_t *id, struct debug_view *view,
482 zfcp_dbf_out(&p, "fcp_lun", "0x%016Lx", r->u.trigger.fcp_lun); 480 zfcp_dbf_out(&p, "fcp_lun", "0x%016Lx", r->u.trigger.fcp_lun);
483 zfcp_dbf_out(&p, "adapter_status", "0x%08x", r->u.trigger.as); 481 zfcp_dbf_out(&p, "adapter_status", "0x%08x", r->u.trigger.as);
484 zfcp_dbf_out(&p, "port_status", "0x%08x", r->u.trigger.ps); 482 zfcp_dbf_out(&p, "port_status", "0x%08x", r->u.trigger.ps);
485 zfcp_dbf_out(&p, "unit_status", "0x%08x", r->u.trigger.us); 483 zfcp_dbf_out(&p, "lun_status", "0x%08x", r->u.trigger.ls);
486 break; 484 break;
487 case ZFCP_REC_DBF_ID_ACTION: 485 case ZFCP_REC_DBF_ID_ACTION:
488 zfcp_dbf_out(&p, "erp_action", "0x%016Lx", r->u.action.action); 486 zfcp_dbf_out(&p, "erp_action", "0x%016Lx", r->u.action.action);
@@ -600,19 +598,20 @@ void zfcp_dbf_rec_port(char *id, void *ref, struct zfcp_port *port)
600} 598}
601 599
602/** 600/**
603 * zfcp_dbf_rec_unit - trace event for unit state change 601 * zfcp_dbf_rec_lun - trace event for LUN state change
604 * @id: identifier for trigger of state change 602 * @id: identifier for trigger of state change
605 * @ref: additional reference (e.g. request) 603 * @ref: additional reference (e.g. request)
606 * @unit: unit 604 * @sdev: SCSI device
607 */ 605 */
608void zfcp_dbf_rec_unit(char *id, void *ref, struct zfcp_unit *unit) 606void zfcp_dbf_rec_lun(char *id, void *ref, struct scsi_device *sdev)
609{ 607{
610 struct zfcp_port *port = unit->port; 608 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
609 struct zfcp_port *port = zfcp_sdev->port;
611 struct zfcp_dbf *dbf = port->adapter->dbf; 610 struct zfcp_dbf *dbf = port->adapter->dbf;
612 611
613 zfcp_dbf_rec_target(id, ref, dbf, &unit->status, 612 zfcp_dbf_rec_target(id, ref, dbf, &zfcp_sdev->status,
614 &unit->erp_counter, port->wwpn, port->d_id, 613 &zfcp_sdev->erp_counter, port->wwpn, port->d_id,
615 unit->fcp_lun); 614 zfcp_scsi_dev_lun(sdev));
616} 615}
617 616
618/** 617/**
@@ -624,11 +623,11 @@ void zfcp_dbf_rec_unit(char *id, void *ref, struct zfcp_unit *unit)
624 * @action: address of error recovery action struct 623 * @action: address of error recovery action struct
625 * @adapter: adapter 624 * @adapter: adapter
626 * @port: port 625 * @port: port
627 * @unit: unit 626 * @sdev: SCSI device
628 */ 627 */
629void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action, 628void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action,
630 struct zfcp_adapter *adapter, struct zfcp_port *port, 629 struct zfcp_adapter *adapter, struct zfcp_port *port,
631 struct zfcp_unit *unit) 630 struct scsi_device *sdev)
632{ 631{
633 struct zfcp_dbf *dbf = adapter->dbf; 632 struct zfcp_dbf *dbf = adapter->dbf;
634 struct zfcp_dbf_rec_record *r = &dbf->rec_buf; 633 struct zfcp_dbf_rec_record *r = &dbf->rec_buf;
@@ -647,9 +646,10 @@ void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action,
647 r->u.trigger.ps = atomic_read(&port->status); 646 r->u.trigger.ps = atomic_read(&port->status);
648 r->u.trigger.wwpn = port->wwpn; 647 r->u.trigger.wwpn = port->wwpn;
649 } 648 }
650 if (unit) 649 if (sdev)
651 r->u.trigger.us = atomic_read(&unit->status); 650 r->u.trigger.ls = atomic_read(&sdev_to_zfcp(sdev)->status);
652 r->u.trigger.fcp_lun = unit ? unit->fcp_lun : ZFCP_DBF_INVALID_LUN; 651 r->u.trigger.fcp_lun = sdev ? zfcp_scsi_dev_lun(sdev) :
652 ZFCP_DBF_INVALID_LUN;
653 debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r)); 653 debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r));
654 spin_unlock_irqrestore(&dbf->rec_lock, flags); 654 spin_unlock_irqrestore(&dbf->rec_lock, flags);
655} 655}
@@ -879,7 +879,6 @@ void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
879 } 879 }
880 rec->scsi_result = scsi_cmnd->result; 880 rec->scsi_result = scsi_cmnd->result;
881 rec->scsi_cmnd = (unsigned long)scsi_cmnd; 881 rec->scsi_cmnd = (unsigned long)scsi_cmnd;
882 rec->scsi_serial = scsi_cmnd->serial_number;
883 memcpy(rec->scsi_opcode, scsi_cmnd->cmnd, 882 memcpy(rec->scsi_opcode, scsi_cmnd->cmnd,
884 min((int)scsi_cmnd->cmd_len, 883 min((int)scsi_cmnd->cmd_len,
885 ZFCP_DBF_SCSI_OPCODE)); 884 ZFCP_DBF_SCSI_OPCODE));
@@ -948,7 +947,6 @@ static int zfcp_dbf_scsi_view_format(debug_info_t *id, struct debug_view *view,
948 zfcp_dbf_out(&p, "scsi_lun", "0x%08x", r->scsi_lun); 947 zfcp_dbf_out(&p, "scsi_lun", "0x%08x", r->scsi_lun);
949 zfcp_dbf_out(&p, "scsi_result", "0x%08x", r->scsi_result); 948 zfcp_dbf_out(&p, "scsi_result", "0x%08x", r->scsi_result);
950 zfcp_dbf_out(&p, "scsi_cmnd", "0x%0Lx", r->scsi_cmnd); 949 zfcp_dbf_out(&p, "scsi_cmnd", "0x%0Lx", r->scsi_cmnd);
951 zfcp_dbf_out(&p, "scsi_serial", "0x%016Lx", r->scsi_serial);
952 zfcp_dbf_outd(&p, "scsi_opcode", r->scsi_opcode, ZFCP_DBF_SCSI_OPCODE, 950 zfcp_dbf_outd(&p, "scsi_opcode", r->scsi_opcode, ZFCP_DBF_SCSI_OPCODE,
953 0, ZFCP_DBF_SCSI_OPCODE); 951 0, ZFCP_DBF_SCSI_OPCODE);
954 zfcp_dbf_out(&p, "scsi_retries", "0x%02x", r->scsi_retries); 952 zfcp_dbf_out(&p, "scsi_retries", "0x%02x", r->scsi_retries);
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 2bcc3403126a..04081b1b62b4 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -60,7 +60,7 @@ struct zfcp_dbf_rec_record_trigger {
60 u8 need; 60 u8 need;
61 u32 as; 61 u32 as;
62 u32 ps; 62 u32 ps;
63 u32 us; 63 u32 ls;
64 u64 ref; 64 u64 ref;
65 u64 action; 65 u64 action;
66 u64 wwpn; 66 u64 wwpn;
@@ -110,7 +110,6 @@ struct zfcp_dbf_hba_record_response {
110 union { 110 union {
111 struct { 111 struct {
112 u64 cmnd; 112 u64 cmnd;
113 u64 serial;
114 u32 data_dir; 113 u32 data_dir;
115 } fcp; 114 } fcp;
116 struct { 115 struct {
@@ -206,7 +205,6 @@ struct zfcp_dbf_scsi_record {
206 u32 scsi_lun; 205 u32 scsi_lun;
207 u32 scsi_result; 206 u32 scsi_result;
208 u64 scsi_cmnd; 207 u64 scsi_cmnd;
209 u64 scsi_serial;
210#define ZFCP_DBF_SCSI_OPCODE 16 208#define ZFCP_DBF_SCSI_OPCODE 16
211 u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE]; 209 u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE];
212 u8 scsi_retries; 210 u8 scsi_retries;
@@ -350,16 +348,16 @@ void zfcp_dbf_scsi_abort(const char *tag, struct zfcp_dbf *dbf,
350/** 348/**
351 * zfcp_dbf_scsi_devreset - trace event for Logical Unit or Target Reset 349 * zfcp_dbf_scsi_devreset - trace event for Logical Unit or Target Reset
352 * @tag: tag indicating success or failure of reset operation 350 * @tag: tag indicating success or failure of reset operation
351 * @scmnd: SCSI command which caused this error recovery
353 * @flag: indicates type of reset (Target Reset, Logical Unit Reset) 352 * @flag: indicates type of reset (Target Reset, Logical Unit Reset)
354 * @unit: unit that needs reset
355 * @scsi_cmnd: SCSI command which caused this error recovery
356 */ 353 */
357static inline 354static inline
358void zfcp_dbf_scsi_devreset(const char *tag, u8 flag, struct zfcp_unit *unit, 355void zfcp_dbf_scsi_devreset(const char *tag, struct scsi_cmnd *scmnd, u8 flag)
359 struct scsi_cmnd *scsi_cmnd)
360{ 356{
357 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device);
358
361 zfcp_dbf_scsi(flag == FCP_TMF_TGT_RESET ? "trst" : "lrst", tag, 1, 359 zfcp_dbf_scsi(flag == FCP_TMF_TGT_RESET ? "trst" : "lrst", tag, 1,
362 unit->port->adapter->dbf, scsi_cmnd, NULL, 0); 360 zfcp_sdev->port->adapter->dbf, scmnd, NULL, 0);
363} 361}
364 362
365#endif /* ZFCP_DBF_H */ 363#endif /* ZFCP_DBF_H */
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index e1c6b6e05a75..9ae1d0a6f627 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -85,8 +85,8 @@ struct zfcp_reqlist;
85#define ZFCP_STATUS_PORT_LINK_TEST 0x00000002 85#define ZFCP_STATUS_PORT_LINK_TEST 0x00000002
86 86
87/* logical unit status */ 87/* logical unit status */
88#define ZFCP_STATUS_UNIT_SHARED 0x00000004 88#define ZFCP_STATUS_LUN_SHARED 0x00000004
89#define ZFCP_STATUS_UNIT_READONLY 0x00000008 89#define ZFCP_STATUS_LUN_READONLY 0x00000008
90 90
91/* FSF request status (this does not have a common part) */ 91/* FSF request status (this does not have a common part) */
92#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002 92#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002
@@ -118,7 +118,7 @@ struct zfcp_erp_action {
118 int action; /* requested action code */ 118 int action; /* requested action code */
119 struct zfcp_adapter *adapter; /* device which should be recovered */ 119 struct zfcp_adapter *adapter; /* device which should be recovered */
120 struct zfcp_port *port; 120 struct zfcp_port *port;
121 struct zfcp_unit *unit; 121 struct scsi_device *sdev;
122 u32 status; /* recovery status */ 122 u32 status; /* recovery status */
123 u32 step; /* active step of this erp action */ 123 u32 step; /* active step of this erp action */
124 unsigned long fsf_req_id; 124 unsigned long fsf_req_id;
@@ -219,21 +219,66 @@ struct zfcp_port {
219 unsigned int starget_id; 219 unsigned int starget_id;
220}; 220};
221 221
222/**
223 * struct zfcp_unit - LUN configured via zfcp sysfs
224 * @dev: struct device for sysfs representation and reference counting
225 * @list: entry in LUN/unit list per zfcp_port
226 * @port: reference to zfcp_port where this LUN is configured
227 * @fcp_lun: 64 bit LUN value
228 * @scsi_work: for running scsi_scan_target
229 *
230 * This is the representation of a LUN that has been configured for
231 * usage. The main data here is the 64 bit LUN value, data for
232 * running I/O and recovery is in struct zfcp_scsi_dev.
233 */
222struct zfcp_unit { 234struct zfcp_unit {
223 struct device dev; 235 struct device dev;
224 struct list_head list; /* list of logical units */ 236 struct list_head list;
225 struct zfcp_port *port; /* remote port of unit */ 237 struct zfcp_port *port;
226 atomic_t status; /* status of this logical unit */ 238 u64 fcp_lun;
227 u64 fcp_lun; /* own FCP_LUN */
228 u32 handle; /* handle assigned by FSF */
229 struct scsi_device *device; /* scsi device struct pointer */
230 struct zfcp_erp_action erp_action; /* pending error recovery */
231 atomic_t erp_counter;
232 struct zfcp_latencies latencies;
233 struct work_struct scsi_work; 239 struct work_struct scsi_work;
234}; 240};
235 241
236/** 242/**
243 * struct zfcp_scsi_dev - zfcp data per SCSI device
244 * @status: zfcp internal status flags
245 * @lun_handle: handle from "open lun" for issuing FSF requests
246 * @erp_action: zfcp erp data for opening and recovering this LUN
247 * @erp_counter: zfcp erp counter for this LUN
248 * @latencies: FSF channel and fabric latencies
249 * @port: zfcp_port where this LUN belongs to
250 */
251struct zfcp_scsi_dev {
252 atomic_t status;
253 u32 lun_handle;
254 struct zfcp_erp_action erp_action;
255 atomic_t erp_counter;
256 struct zfcp_latencies latencies;
257 struct zfcp_port *port;
258};
259
260/**
261 * sdev_to_zfcp - Access zfcp LUN data for SCSI device
262 * @sdev: scsi_device where to get the zfcp_scsi_dev pointer
263 */
264static inline struct zfcp_scsi_dev *sdev_to_zfcp(struct scsi_device *sdev)
265{
266 return scsi_transport_device_data(sdev);
267}
268
269/**
270 * zfcp_scsi_dev_lun - Return SCSI device LUN as 64 bit FCP LUN
271 * @sdev: SCSI device where to get the LUN from
272 */
273static inline u64 zfcp_scsi_dev_lun(struct scsi_device *sdev)
274{
275 u64 fcp_lun;
276
277 int_to_scsilun(sdev->lun, (struct scsi_lun *)&fcp_lun);
278 return fcp_lun;
279}
280
281/**
237 * struct zfcp_fsf_req - basic FSF request structure 282 * struct zfcp_fsf_req - basic FSF request structure
238 * @list: list of FSF requests 283 * @list: list of FSF requests
239 * @req_id: unique request ID 284 * @req_id: unique request ID
@@ -249,7 +294,6 @@ struct zfcp_unit {
249 * @erp_action: reference to erp action if request issued on behalf of ERP 294 * @erp_action: reference to erp action if request issued on behalf of ERP
250 * @pool: reference to memory pool if used for this request 295 * @pool: reference to memory pool if used for this request
251 * @issued: time when request was send (STCK) 296 * @issued: time when request was send (STCK)
252 * @unit: reference to unit if this request is a SCSI request
253 * @handler: handler which should be called to process response 297 * @handler: handler which should be called to process response
254 */ 298 */
255struct zfcp_fsf_req { 299struct zfcp_fsf_req {
@@ -267,7 +311,6 @@ struct zfcp_fsf_req {
267 struct zfcp_erp_action *erp_action; 311 struct zfcp_erp_action *erp_action;
268 mempool_t *pool; 312 mempool_t *pool;
269 unsigned long long issued; 313 unsigned long long issued;
270 struct zfcp_unit *unit;
271 void (*handler)(struct zfcp_fsf_req *); 314 void (*handler)(struct zfcp_fsf_req *);
272}; 315};
273 316
@@ -282,9 +325,4 @@ struct zfcp_data {
282 struct kmem_cache *adisc_cache; 325 struct kmem_cache *adisc_cache;
283}; 326};
284 327
285/********************** ZFCP SPECIFIC DEFINES ********************************/
286
287#define ZFCP_SET 0x00000100
288#define ZFCP_CLEAR 0x00000200
289
290#endif /* ZFCP_DEF_H */ 328#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 160b432c907f..d37c7331f244 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -21,6 +21,7 @@ enum zfcp_erp_act_flags {
21 ZFCP_STATUS_ERP_DISMISSING = 0x00100000, 21 ZFCP_STATUS_ERP_DISMISSING = 0x00100000,
22 ZFCP_STATUS_ERP_DISMISSED = 0x00200000, 22 ZFCP_STATUS_ERP_DISMISSED = 0x00200000,
23 ZFCP_STATUS_ERP_LOWMEM = 0x00400000, 23 ZFCP_STATUS_ERP_LOWMEM = 0x00400000,
24 ZFCP_STATUS_ERP_NO_REF = 0x00800000,
24}; 25};
25 26
26enum zfcp_erp_steps { 27enum zfcp_erp_steps {
@@ -29,12 +30,12 @@ enum zfcp_erp_steps {
29 ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010, 30 ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010,
30 ZFCP_ERP_STEP_PORT_CLOSING = 0x0100, 31 ZFCP_ERP_STEP_PORT_CLOSING = 0x0100,
31 ZFCP_ERP_STEP_PORT_OPENING = 0x0800, 32 ZFCP_ERP_STEP_PORT_OPENING = 0x0800,
32 ZFCP_ERP_STEP_UNIT_CLOSING = 0x1000, 33 ZFCP_ERP_STEP_LUN_CLOSING = 0x1000,
33 ZFCP_ERP_STEP_UNIT_OPENING = 0x2000, 34 ZFCP_ERP_STEP_LUN_OPENING = 0x2000,
34}; 35};
35 36
36enum zfcp_erp_act_type { 37enum zfcp_erp_act_type {
37 ZFCP_ERP_ACTION_REOPEN_UNIT = 1, 38 ZFCP_ERP_ACTION_REOPEN_LUN = 1,
38 ZFCP_ERP_ACTION_REOPEN_PORT = 2, 39 ZFCP_ERP_ACTION_REOPEN_PORT = 2,
39 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3, 40 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
40 ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4, 41 ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4,
@@ -56,9 +57,8 @@ enum zfcp_erp_act_result {
56 57
57static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask) 58static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask)
58{ 59{
59 zfcp_erp_modify_adapter_status(adapter, "erablk1", NULL, 60 zfcp_erp_clear_adapter_status(adapter,
60 ZFCP_STATUS_COMMON_UNBLOCKED | mask, 61 ZFCP_STATUS_COMMON_UNBLOCKED | mask);
61 ZFCP_CLEAR);
62} 62}
63 63
64static int zfcp_erp_action_exists(struct zfcp_erp_action *act) 64static int zfcp_erp_action_exists(struct zfcp_erp_action *act)
@@ -88,24 +88,24 @@ static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act)
88 zfcp_erp_action_ready(act); 88 zfcp_erp_action_ready(act);
89} 89}
90 90
91static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit) 91static void zfcp_erp_action_dismiss_lun(struct scsi_device *sdev)
92{ 92{
93 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_INUSE) 93 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
94 zfcp_erp_action_dismiss(&unit->erp_action); 94
95 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
96 zfcp_erp_action_dismiss(&zfcp_sdev->erp_action);
95} 97}
96 98
97static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) 99static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
98{ 100{
99 struct zfcp_unit *unit; 101 struct scsi_device *sdev;
100 102
101 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) 103 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
102 zfcp_erp_action_dismiss(&port->erp_action); 104 zfcp_erp_action_dismiss(&port->erp_action);
103 else { 105 else
104 read_lock(&port->unit_list_lock); 106 shost_for_each_device(sdev, port->adapter->scsi_host)
105 list_for_each_entry(unit, &port->unit_list, list) 107 if (sdev_to_zfcp(sdev)->port == port)
106 zfcp_erp_action_dismiss_unit(unit); 108 zfcp_erp_action_dismiss_lun(sdev);
107 read_unlock(&port->unit_list_lock);
108 }
109} 109}
110 110
111static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) 111static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
@@ -124,15 +124,17 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
124 124
125static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, 125static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
126 struct zfcp_port *port, 126 struct zfcp_port *port,
127 struct zfcp_unit *unit) 127 struct scsi_device *sdev)
128{ 128{
129 int need = want; 129 int need = want;
130 int u_status, p_status, a_status; 130 int l_status, p_status, a_status;
131 struct zfcp_scsi_dev *zfcp_sdev;
131 132
132 switch (want) { 133 switch (want) {
133 case ZFCP_ERP_ACTION_REOPEN_UNIT: 134 case ZFCP_ERP_ACTION_REOPEN_LUN:
134 u_status = atomic_read(&unit->status); 135 zfcp_sdev = sdev_to_zfcp(sdev);
135 if (u_status & ZFCP_STATUS_COMMON_ERP_INUSE) 136 l_status = atomic_read(&zfcp_sdev->status);
137 if (l_status & ZFCP_STATUS_COMMON_ERP_INUSE)
136 return 0; 138 return 0;
137 p_status = atomic_read(&port->status); 139 p_status = atomic_read(&port->status);
138 if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) || 140 if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) ||
@@ -169,22 +171,26 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
169 return need; 171 return need;
170} 172}
171 173
172static struct zfcp_erp_action *zfcp_erp_setup_act(int need, 174static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
173 struct zfcp_adapter *adapter, 175 struct zfcp_adapter *adapter,
174 struct zfcp_port *port, 176 struct zfcp_port *port,
175 struct zfcp_unit *unit) 177 struct scsi_device *sdev)
176{ 178{
177 struct zfcp_erp_action *erp_action; 179 struct zfcp_erp_action *erp_action;
178 u32 status = 0; 180 struct zfcp_scsi_dev *zfcp_sdev;
179 181
180 switch (need) { 182 switch (need) {
181 case ZFCP_ERP_ACTION_REOPEN_UNIT: 183 case ZFCP_ERP_ACTION_REOPEN_LUN:
182 if (!get_device(&unit->dev)) 184 zfcp_sdev = sdev_to_zfcp(sdev);
183 return NULL; 185 if (!(act_status & ZFCP_STATUS_ERP_NO_REF))
184 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status); 186 if (scsi_device_get(sdev))
185 erp_action = &unit->erp_action; 187 return NULL;
186 if (!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_RUNNING)) 188 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
187 status = ZFCP_STATUS_ERP_CLOSE_ONLY; 189 &zfcp_sdev->status);
190 erp_action = &zfcp_sdev->erp_action;
191 if (!(atomic_read(&zfcp_sdev->status) &
192 ZFCP_STATUS_COMMON_RUNNING))
193 act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
188 break; 194 break;
189 195
190 case ZFCP_ERP_ACTION_REOPEN_PORT: 196 case ZFCP_ERP_ACTION_REOPEN_PORT:
@@ -195,7 +201,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
195 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); 201 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
196 erp_action = &port->erp_action; 202 erp_action = &port->erp_action;
197 if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING)) 203 if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
198 status = ZFCP_STATUS_ERP_CLOSE_ONLY; 204 act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
199 break; 205 break;
200 206
201 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 207 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
@@ -205,7 +211,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
205 erp_action = &adapter->erp_action; 211 erp_action = &adapter->erp_action;
206 if (!(atomic_read(&adapter->status) & 212 if (!(atomic_read(&adapter->status) &
207 ZFCP_STATUS_COMMON_RUNNING)) 213 ZFCP_STATUS_COMMON_RUNNING))
208 status = ZFCP_STATUS_ERP_CLOSE_ONLY; 214 act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
209 break; 215 break;
210 216
211 default: 217 default:
@@ -215,16 +221,17 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
215 memset(erp_action, 0, sizeof(struct zfcp_erp_action)); 221 memset(erp_action, 0, sizeof(struct zfcp_erp_action));
216 erp_action->adapter = adapter; 222 erp_action->adapter = adapter;
217 erp_action->port = port; 223 erp_action->port = port;
218 erp_action->unit = unit; 224 erp_action->sdev = sdev;
219 erp_action->action = need; 225 erp_action->action = need;
220 erp_action->status = status; 226 erp_action->status = act_status;
221 227
222 return erp_action; 228 return erp_action;
223} 229}
224 230
225static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, 231static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
226 struct zfcp_port *port, 232 struct zfcp_port *port,
227 struct zfcp_unit *unit, char *id, void *ref) 233 struct scsi_device *sdev,
234 char *id, void *ref, u32 act_status)
228{ 235{
229 int retval = 1, need; 236 int retval = 1, need;
230 struct zfcp_erp_action *act = NULL; 237 struct zfcp_erp_action *act = NULL;
@@ -232,21 +239,21 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
232 if (!adapter->erp_thread) 239 if (!adapter->erp_thread)
233 return -EIO; 240 return -EIO;
234 241
235 need = zfcp_erp_required_act(want, adapter, port, unit); 242 need = zfcp_erp_required_act(want, adapter, port, sdev);
236 if (!need) 243 if (!need)
237 goto out; 244 goto out;
238 245
239 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); 246 act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev);
240 act = zfcp_erp_setup_act(need, adapter, port, unit);
241 if (!act) 247 if (!act)
242 goto out; 248 goto out;
249 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
243 ++adapter->erp_total_count; 250 ++adapter->erp_total_count;
244 list_add_tail(&act->list, &adapter->erp_ready_head); 251 list_add_tail(&act->list, &adapter->erp_ready_head);
245 wake_up(&adapter->erp_ready_wq); 252 wake_up(&adapter->erp_ready_wq);
246 zfcp_dbf_rec_thread("eracte1", adapter->dbf); 253 zfcp_dbf_rec_thread("eracte1", adapter->dbf);
247 retval = 0; 254 retval = 0;
248 out: 255 out:
249 zfcp_dbf_rec_trigger(id, ref, want, need, act, adapter, port, unit); 256 zfcp_dbf_rec_trigger(id, ref, want, need, act, adapter, port, sdev);
250 return retval; 257 return retval;
251} 258}
252 259
@@ -258,11 +265,12 @@ static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
258 265
259 /* ensure propagation of failed status to new devices */ 266 /* ensure propagation of failed status to new devices */
260 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { 267 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
261 zfcp_erp_adapter_failed(adapter, "erareo1", NULL); 268 zfcp_erp_set_adapter_status(adapter,
269 ZFCP_STATUS_COMMON_ERP_FAILED);
262 return -EIO; 270 return -EIO;
263 } 271 }
264 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, 272 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
265 adapter, NULL, NULL, id, ref); 273 adapter, NULL, NULL, id, ref, 0);
266} 274}
267 275
268/** 276/**
@@ -282,10 +290,11 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
282 290
283 write_lock_irqsave(&adapter->erp_lock, flags); 291 write_lock_irqsave(&adapter->erp_lock, flags);
284 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) 292 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
285 zfcp_erp_adapter_failed(adapter, "erareo1", NULL); 293 zfcp_erp_set_adapter_status(adapter,
294 ZFCP_STATUS_COMMON_ERP_FAILED);
286 else 295 else
287 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter, 296 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
288 NULL, NULL, id, ref); 297 NULL, NULL, id, ref, 0);
289 write_unlock_irqrestore(&adapter->erp_lock, flags); 298 write_unlock_irqrestore(&adapter->erp_lock, flags);
290} 299}
291 300
@@ -317,25 +326,10 @@ void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id,
317 zfcp_erp_port_reopen(port, clear | flags, id, ref); 326 zfcp_erp_port_reopen(port, clear | flags, id, ref);
318} 327}
319 328
320/**
321 * zfcp_erp_unit_shutdown - Shutdown unit
322 * @unit: Unit to shut down.
323 * @clear: Status flags to clear.
324 * @id: Id for debug trace event.
325 * @ref: Reference for debug trace event.
326 */
327void zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear, char *id,
328 void *ref)
329{
330 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
331 zfcp_erp_unit_reopen(unit, clear | flags, id, ref);
332}
333
334static void zfcp_erp_port_block(struct zfcp_port *port, int clear) 329static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
335{ 330{
336 zfcp_erp_modify_port_status(port, "erpblk1", NULL, 331 zfcp_erp_clear_port_status(port,
337 ZFCP_STATUS_COMMON_UNBLOCKED | clear, 332 ZFCP_STATUS_COMMON_UNBLOCKED | clear);
338 ZFCP_CLEAR);
339} 333}
340 334
341static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, 335static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port,
@@ -348,7 +342,7 @@ static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port,
348 return; 342 return;
349 343
350 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED, 344 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
351 port->adapter, port, NULL, id, ref); 345 port->adapter, port, NULL, id, ref, 0);
352} 346}
353 347
354/** 348/**
@@ -376,12 +370,12 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id,
376 370
377 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { 371 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
378 /* ensure propagation of failed status to new devices */ 372 /* ensure propagation of failed status to new devices */
379 zfcp_erp_port_failed(port, "erpreo1", NULL); 373 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
380 return -EIO; 374 return -EIO;
381 } 375 }
382 376
383 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT, 377 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
384 port->adapter, port, NULL, id, ref); 378 port->adapter, port, NULL, id, ref, 0);
385} 379}
386 380
387/** 381/**
@@ -404,53 +398,88 @@ int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, void *ref)
404 return retval; 398 return retval;
405} 399}
406 400
407static void zfcp_erp_unit_block(struct zfcp_unit *unit, int clear_mask) 401static void zfcp_erp_lun_block(struct scsi_device *sdev, int clear_mask)
408{ 402{
409 zfcp_erp_modify_unit_status(unit, "erublk1", NULL, 403 zfcp_erp_clear_lun_status(sdev,
410 ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask, 404 ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask);
411 ZFCP_CLEAR);
412} 405}
413 406
414static void _zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id, 407static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
415 void *ref) 408 void *ref, u32 act_status)
416{ 409{
417 struct zfcp_adapter *adapter = unit->port->adapter; 410 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
411 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
418 412
419 zfcp_erp_unit_block(unit, clear); 413 zfcp_erp_lun_block(sdev, clear);
420 414
421 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_FAILED) 415 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
422 return; 416 return;
423 417
424 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_UNIT, 418 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
425 adapter, unit->port, unit, id, ref); 419 zfcp_sdev->port, sdev, id, ref, act_status);
426} 420}
427 421
428/** 422/**
429 * zfcp_erp_unit_reopen - initiate reopen of a unit 423 * zfcp_erp_lun_reopen - initiate reopen of a LUN
430 * @unit: unit to be reopened 424 * @sdev: SCSI device / LUN to be reopened
431 * @clear_mask: specifies flags in unit status to be cleared 425 * @clear_mask: specifies flags in LUN status to be cleared
432 * Return: 0 on success, < 0 on error 426 * Return: 0 on success, < 0 on error
433 */ 427 */
434void zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id, 428void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
435 void *ref) 429 void *ref)
436{ 430{
437 unsigned long flags; 431 unsigned long flags;
438 struct zfcp_port *port = unit->port; 432 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
433 struct zfcp_port *port = zfcp_sdev->port;
439 struct zfcp_adapter *adapter = port->adapter; 434 struct zfcp_adapter *adapter = port->adapter;
440 435
441 write_lock_irqsave(&adapter->erp_lock, flags); 436 write_lock_irqsave(&adapter->erp_lock, flags);
442 _zfcp_erp_unit_reopen(unit, clear, id, ref); 437 _zfcp_erp_lun_reopen(sdev, clear, id, ref, 0);
443 write_unlock_irqrestore(&adapter->erp_lock, flags); 438 write_unlock_irqrestore(&adapter->erp_lock, flags);
444} 439}
445 440
446static int status_change_set(unsigned long mask, atomic_t *status) 441/**
442 * zfcp_erp_lun_shutdown - Shutdown LUN
443 * @sdev: SCSI device / LUN to shut down.
444 * @clear: Status flags to clear.
445 * @id: Id for debug trace event.
446 * @ref: Reference for debug trace event.
447 */
448void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *id,
449 void *ref)
447{ 450{
448 return (atomic_read(status) ^ mask) & mask; 451 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
452 zfcp_erp_lun_reopen(sdev, clear | flags, id, ref);
449} 453}
450 454
451static int status_change_clear(unsigned long mask, atomic_t *status) 455/**
456 * zfcp_erp_lun_shutdown_wait - Shutdown LUN and wait for erp completion
457 * @sdev: SCSI device / LUN to shut down.
458 * @id: Id for debug trace event.
459 *
460 * Do not acquire a reference for the LUN when creating the ERP
461 * action. It is safe, because this function waits for the ERP to
462 * complete first. This allows to shutdown the LUN, even when the SCSI
463 * device is in the state SDEV_DEL when scsi_device_get will fail.
464 */
465void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id)
452{ 466{
453 return atomic_read(status) & mask; 467 unsigned long flags;
468 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
469 struct zfcp_port *port = zfcp_sdev->port;
470 struct zfcp_adapter *adapter = port->adapter;
471 int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
472
473 write_lock_irqsave(&adapter->erp_lock, flags);
474 _zfcp_erp_lun_reopen(sdev, clear, id, NULL, ZFCP_STATUS_ERP_NO_REF);
475 write_unlock_irqrestore(&adapter->erp_lock, flags);
476
477 zfcp_erp_wait(adapter);
478}
479
480static int status_change_set(unsigned long mask, atomic_t *status)
481{
482 return (atomic_read(status) ^ mask) & mask;
454} 483}
455 484
456static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) 485static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
@@ -467,11 +496,13 @@ static void zfcp_erp_port_unblock(struct zfcp_port *port)
467 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status); 496 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
468} 497}
469 498
470static void zfcp_erp_unit_unblock(struct zfcp_unit *unit) 499static void zfcp_erp_lun_unblock(struct scsi_device *sdev)
471{ 500{
472 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status)) 501 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
473 zfcp_dbf_rec_unit("eruubl1", NULL, unit); 502
474 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status); 503 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status))
504 zfcp_dbf_rec_lun("erlubl1", NULL, sdev);
505 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
475} 506}
476 507
477static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) 508static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
@@ -559,15 +590,14 @@ static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
559 read_unlock(&adapter->port_list_lock); 590 read_unlock(&adapter->port_list_lock);
560} 591}
561 592
562static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, 593static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
563 char *id, void *ref) 594 char *id, void *ref)
564{ 595{
565 struct zfcp_unit *unit; 596 struct scsi_device *sdev;
566 597
567 read_lock(&port->unit_list_lock); 598 shost_for_each_device(sdev, port->adapter->scsi_host)
568 list_for_each_entry(unit, &port->unit_list, list) 599 if (sdev_to_zfcp(sdev)->port == port)
569 _zfcp_erp_unit_reopen(unit, clear, id, ref); 600 _zfcp_erp_lun_reopen(sdev, clear, id, ref, 0);
570 read_unlock(&port->unit_list_lock);
571} 601}
572 602
573static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) 603static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
@@ -582,8 +612,8 @@ static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
582 case ZFCP_ERP_ACTION_REOPEN_PORT: 612 case ZFCP_ERP_ACTION_REOPEN_PORT:
583 _zfcp_erp_port_reopen(act->port, 0, "ersff_3", NULL); 613 _zfcp_erp_port_reopen(act->port, 0, "ersff_3", NULL);
584 break; 614 break;
585 case ZFCP_ERP_ACTION_REOPEN_UNIT: 615 case ZFCP_ERP_ACTION_REOPEN_LUN:
586 _zfcp_erp_unit_reopen(act->unit, 0, "ersff_4", NULL); 616 _zfcp_erp_lun_reopen(act->sdev, 0, "ersff_4", NULL, 0);
587 break; 617 break;
588 } 618 }
589} 619}
@@ -598,7 +628,7 @@ static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act)
598 _zfcp_erp_port_reopen(act->port, 0, "ersfs_2", NULL); 628 _zfcp_erp_port_reopen(act->port, 0, "ersfs_2", NULL);
599 break; 629 break;
600 case ZFCP_ERP_ACTION_REOPEN_PORT: 630 case ZFCP_ERP_ACTION_REOPEN_PORT:
601 _zfcp_erp_unit_reopen_all(act->port, 0, "ersfs_3", NULL); 631 _zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3", NULL);
602 break; 632 break;
603 } 633 }
604} 634}
@@ -742,9 +772,8 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act)
742 zfcp_fsf_req_dismiss_all(adapter); 772 zfcp_fsf_req_dismiss_all(adapter);
743 adapter->fsf_req_seq_no = 0; 773 adapter->fsf_req_seq_no = 0;
744 zfcp_fc_wka_ports_force_offline(adapter->gs); 774 zfcp_fc_wka_ports_force_offline(adapter->gs);
745 /* all ports and units are closed */ 775 /* all ports and LUNs are closed */
746 zfcp_erp_modify_adapter_status(adapter, "erascl1", NULL, 776 zfcp_erp_clear_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN);
747 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
748 777
749 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | 778 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
750 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 779 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
@@ -861,7 +890,7 @@ static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
861 struct zfcp_port *port = act->port; 890 struct zfcp_port *port = act->port;
862 891
863 if (port->wwpn != adapter->peer_wwpn) { 892 if (port->wwpn != adapter->peer_wwpn) {
864 zfcp_erp_port_failed(port, "eroptp1", NULL); 893 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
865 return ZFCP_ERP_FAILED; 894 return ZFCP_ERP_FAILED;
866 } 895 }
867 port->d_id = adapter->peer_d_id; 896 port->d_id = adapter->peer_d_id;
@@ -933,82 +962,87 @@ close_init_done:
933 return zfcp_erp_port_strategy_open_common(erp_action); 962 return zfcp_erp_port_strategy_open_common(erp_action);
934} 963}
935 964
936static void zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit) 965static void zfcp_erp_lun_strategy_clearstati(struct scsi_device *sdev)
937{ 966{
967 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
968
938 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 969 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
939 ZFCP_STATUS_UNIT_SHARED | 970 ZFCP_STATUS_LUN_SHARED | ZFCP_STATUS_LUN_READONLY,
940 ZFCP_STATUS_UNIT_READONLY, 971 &zfcp_sdev->status);
941 &unit->status);
942} 972}
943 973
944static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *erp_action) 974static int zfcp_erp_lun_strategy_close(struct zfcp_erp_action *erp_action)
945{ 975{
946 int retval = zfcp_fsf_close_unit(erp_action); 976 int retval = zfcp_fsf_close_lun(erp_action);
947 if (retval == -ENOMEM) 977 if (retval == -ENOMEM)
948 return ZFCP_ERP_NOMEM; 978 return ZFCP_ERP_NOMEM;
949 erp_action->step = ZFCP_ERP_STEP_UNIT_CLOSING; 979 erp_action->step = ZFCP_ERP_STEP_LUN_CLOSING;
950 if (retval) 980 if (retval)
951 return ZFCP_ERP_FAILED; 981 return ZFCP_ERP_FAILED;
952 return ZFCP_ERP_CONTINUES; 982 return ZFCP_ERP_CONTINUES;
953} 983}
954 984
955static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *erp_action) 985static int zfcp_erp_lun_strategy_open(struct zfcp_erp_action *erp_action)
956{ 986{
957 int retval = zfcp_fsf_open_unit(erp_action); 987 int retval = zfcp_fsf_open_lun(erp_action);
958 if (retval == -ENOMEM) 988 if (retval == -ENOMEM)
959 return ZFCP_ERP_NOMEM; 989 return ZFCP_ERP_NOMEM;
960 erp_action->step = ZFCP_ERP_STEP_UNIT_OPENING; 990 erp_action->step = ZFCP_ERP_STEP_LUN_OPENING;
961 if (retval) 991 if (retval)
962 return ZFCP_ERP_FAILED; 992 return ZFCP_ERP_FAILED;
963 return ZFCP_ERP_CONTINUES; 993 return ZFCP_ERP_CONTINUES;
964} 994}
965 995
966static int zfcp_erp_unit_strategy(struct zfcp_erp_action *erp_action) 996static int zfcp_erp_lun_strategy(struct zfcp_erp_action *erp_action)
967{ 997{
968 struct zfcp_unit *unit = erp_action->unit; 998 struct scsi_device *sdev = erp_action->sdev;
999 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
969 1000
970 switch (erp_action->step) { 1001 switch (erp_action->step) {
971 case ZFCP_ERP_STEP_UNINITIALIZED: 1002 case ZFCP_ERP_STEP_UNINITIALIZED:
972 zfcp_erp_unit_strategy_clearstati(unit); 1003 zfcp_erp_lun_strategy_clearstati(sdev);
973 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN) 1004 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
974 return zfcp_erp_unit_strategy_close(erp_action); 1005 return zfcp_erp_lun_strategy_close(erp_action);
975 /* already closed, fall through */ 1006 /* already closed, fall through */
976 case ZFCP_ERP_STEP_UNIT_CLOSING: 1007 case ZFCP_ERP_STEP_LUN_CLOSING:
977 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN) 1008 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
978 return ZFCP_ERP_FAILED; 1009 return ZFCP_ERP_FAILED;
979 if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY) 1010 if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
980 return ZFCP_ERP_EXIT; 1011 return ZFCP_ERP_EXIT;
981 return zfcp_erp_unit_strategy_open(erp_action); 1012 return zfcp_erp_lun_strategy_open(erp_action);
982 1013
983 case ZFCP_ERP_STEP_UNIT_OPENING: 1014 case ZFCP_ERP_STEP_LUN_OPENING:
984 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN) 1015 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
985 return ZFCP_ERP_SUCCEEDED; 1016 return ZFCP_ERP_SUCCEEDED;
986 } 1017 }
987 return ZFCP_ERP_FAILED; 1018 return ZFCP_ERP_FAILED;
988} 1019}
989 1020
990static int zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result) 1021static int zfcp_erp_strategy_check_lun(struct scsi_device *sdev, int result)
991{ 1022{
1023 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1024
992 switch (result) { 1025 switch (result) {
993 case ZFCP_ERP_SUCCEEDED : 1026 case ZFCP_ERP_SUCCEEDED :
994 atomic_set(&unit->erp_counter, 0); 1027 atomic_set(&zfcp_sdev->erp_counter, 0);
995 zfcp_erp_unit_unblock(unit); 1028 zfcp_erp_lun_unblock(sdev);
996 break; 1029 break;
997 case ZFCP_ERP_FAILED : 1030 case ZFCP_ERP_FAILED :
998 atomic_inc(&unit->erp_counter); 1031 atomic_inc(&zfcp_sdev->erp_counter);
999 if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS) { 1032 if (atomic_read(&zfcp_sdev->erp_counter) > ZFCP_MAX_ERPS) {
1000 dev_err(&unit->port->adapter->ccw_device->dev, 1033 dev_err(&zfcp_sdev->port->adapter->ccw_device->dev,
1001 "ERP failed for unit 0x%016Lx on " 1034 "ERP failed for LUN 0x%016Lx on "
1002 "port 0x%016Lx\n", 1035 "port 0x%016Lx\n",
1003 (unsigned long long)unit->fcp_lun, 1036 (unsigned long long)zfcp_scsi_dev_lun(sdev),
1004 (unsigned long long)unit->port->wwpn); 1037 (unsigned long long)zfcp_sdev->port->wwpn);
1005 zfcp_erp_unit_failed(unit, "erusck1", NULL); 1038 zfcp_erp_set_lun_status(sdev,
1039 ZFCP_STATUS_COMMON_ERP_FAILED);
1006 } 1040 }
1007 break; 1041 break;
1008 } 1042 }
1009 1043
1010 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { 1044 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
1011 zfcp_erp_unit_block(unit, 0); 1045 zfcp_erp_lun_block(sdev, 0);
1012 result = ZFCP_ERP_EXIT; 1046 result = ZFCP_ERP_EXIT;
1013 } 1047 }
1014 return result; 1048 return result;
@@ -1032,7 +1066,8 @@ static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
1032 dev_err(&port->adapter->ccw_device->dev, 1066 dev_err(&port->adapter->ccw_device->dev,
1033 "ERP failed for remote port 0x%016Lx\n", 1067 "ERP failed for remote port 0x%016Lx\n",
1034 (unsigned long long)port->wwpn); 1068 (unsigned long long)port->wwpn);
1035 zfcp_erp_port_failed(port, "erpsck1", NULL); 1069 zfcp_erp_set_port_status(port,
1070 ZFCP_STATUS_COMMON_ERP_FAILED);
1036 } 1071 }
1037 break; 1072 break;
1038 } 1073 }
@@ -1059,7 +1094,8 @@ static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
1059 dev_err(&adapter->ccw_device->dev, 1094 dev_err(&adapter->ccw_device->dev,
1060 "ERP cannot recover an error " 1095 "ERP cannot recover an error "
1061 "on the FCP device\n"); 1096 "on the FCP device\n");
1062 zfcp_erp_adapter_failed(adapter, "erasck1", NULL); 1097 zfcp_erp_set_adapter_status(adapter,
1098 ZFCP_STATUS_COMMON_ERP_FAILED);
1063 } 1099 }
1064 break; 1100 break;
1065 } 1101 }
@@ -1076,12 +1112,12 @@ static int zfcp_erp_strategy_check_target(struct zfcp_erp_action *erp_action,
1076{ 1112{
1077 struct zfcp_adapter *adapter = erp_action->adapter; 1113 struct zfcp_adapter *adapter = erp_action->adapter;
1078 struct zfcp_port *port = erp_action->port; 1114 struct zfcp_port *port = erp_action->port;
1079 struct zfcp_unit *unit = erp_action->unit; 1115 struct scsi_device *sdev = erp_action->sdev;
1080 1116
1081 switch (erp_action->action) { 1117 switch (erp_action->action) {
1082 1118
1083 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1119 case ZFCP_ERP_ACTION_REOPEN_LUN:
1084 result = zfcp_erp_strategy_check_unit(unit, result); 1120 result = zfcp_erp_strategy_check_lun(sdev, result);
1085 break; 1121 break;
1086 1122
1087 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 1123 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
@@ -1116,7 +1152,8 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
1116 int action = act->action; 1152 int action = act->action;
1117 struct zfcp_adapter *adapter = act->adapter; 1153 struct zfcp_adapter *adapter = act->adapter;
1118 struct zfcp_port *port = act->port; 1154 struct zfcp_port *port = act->port;
1119 struct zfcp_unit *unit = act->unit; 1155 struct scsi_device *sdev = act->sdev;
1156 struct zfcp_scsi_dev *zfcp_sdev;
1120 u32 erp_status = act->status; 1157 u32 erp_status = act->status;
1121 1158
1122 switch (action) { 1159 switch (action) {
@@ -1139,11 +1176,12 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
1139 } 1176 }
1140 break; 1177 break;
1141 1178
1142 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1179 case ZFCP_ERP_ACTION_REOPEN_LUN:
1143 if (zfcp_erp_strat_change_det(&unit->status, erp_status)) { 1180 zfcp_sdev = sdev_to_zfcp(sdev);
1144 _zfcp_erp_unit_reopen(unit, 1181 if (zfcp_erp_strat_change_det(&zfcp_sdev->status, erp_status)) {
1145 ZFCP_STATUS_COMMON_ERP_FAILED, 1182 _zfcp_erp_lun_reopen(sdev,
1146 "ersscg3", NULL); 1183 ZFCP_STATUS_COMMON_ERP_FAILED,
1184 "ersscg3", NULL, 0);
1147 return ZFCP_ERP_EXIT; 1185 return ZFCP_ERP_EXIT;
1148 } 1186 }
1149 break; 1187 break;
@@ -1154,6 +1192,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
1154static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action) 1192static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
1155{ 1193{
1156 struct zfcp_adapter *adapter = erp_action->adapter; 1194 struct zfcp_adapter *adapter = erp_action->adapter;
1195 struct zfcp_scsi_dev *zfcp_sdev;
1157 1196
1158 adapter->erp_total_count--; 1197 adapter->erp_total_count--;
1159 if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) { 1198 if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
@@ -1165,9 +1204,10 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
1165 zfcp_dbf_rec_action("eractd1", erp_action); 1204 zfcp_dbf_rec_action("eractd1", erp_action);
1166 1205
1167 switch (erp_action->action) { 1206 switch (erp_action->action) {
1168 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1207 case ZFCP_ERP_ACTION_REOPEN_LUN:
1208 zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
1169 atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, 1209 atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
1170 &erp_action->unit->status); 1210 &zfcp_sdev->status);
1171 break; 1211 break;
1172 1212
1173 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 1213 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
@@ -1187,11 +1227,12 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
1187{ 1227{
1188 struct zfcp_adapter *adapter = act->adapter; 1228 struct zfcp_adapter *adapter = act->adapter;
1189 struct zfcp_port *port = act->port; 1229 struct zfcp_port *port = act->port;
1190 struct zfcp_unit *unit = act->unit; 1230 struct scsi_device *sdev = act->sdev;
1191 1231
1192 switch (act->action) { 1232 switch (act->action) {
1193 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1233 case ZFCP_ERP_ACTION_REOPEN_LUN:
1194 put_device(&unit->dev); 1234 if (!(act->status & ZFCP_STATUS_ERP_NO_REF))
1235 scsi_device_put(sdev);
1195 break; 1236 break;
1196 1237
1197 case ZFCP_ERP_ACTION_REOPEN_PORT: 1238 case ZFCP_ERP_ACTION_REOPEN_PORT:
@@ -1222,8 +1263,8 @@ static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
1222 return zfcp_erp_port_forced_strategy(erp_action); 1263 return zfcp_erp_port_forced_strategy(erp_action);
1223 case ZFCP_ERP_ACTION_REOPEN_PORT: 1264 case ZFCP_ERP_ACTION_REOPEN_PORT:
1224 return zfcp_erp_port_strategy(erp_action); 1265 return zfcp_erp_port_strategy(erp_action);
1225 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1266 case ZFCP_ERP_ACTION_REOPEN_LUN:
1226 return zfcp_erp_unit_strategy(erp_action); 1267 return zfcp_erp_lun_strategy(erp_action);
1227 } 1268 }
1228 return ZFCP_ERP_FAILED; 1269 return ZFCP_ERP_FAILED;
1229} 1270}
@@ -1376,42 +1417,6 @@ void zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
1376} 1417}
1377 1418
1378/** 1419/**
1379 * zfcp_erp_adapter_failed - Set adapter status to failed.
1380 * @adapter: Failed adapter.
1381 * @id: Event id for debug trace.
1382 * @ref: Reference for debug trace.
1383 */
1384void zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, char *id, void *ref)
1385{
1386 zfcp_erp_modify_adapter_status(adapter, id, ref,
1387 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1388}
1389
1390/**
1391 * zfcp_erp_port_failed - Set port status to failed.
1392 * @port: Failed port.
1393 * @id: Event id for debug trace.
1394 * @ref: Reference for debug trace.
1395 */
1396void zfcp_erp_port_failed(struct zfcp_port *port, char *id, void *ref)
1397{
1398 zfcp_erp_modify_port_status(port, id, ref,
1399 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1400}
1401
1402/**
1403 * zfcp_erp_unit_failed - Set unit status to failed.
1404 * @unit: Failed unit.
1405 * @id: Event id for debug trace.
1406 * @ref: Reference for debug trace.
1407 */
1408void zfcp_erp_unit_failed(struct zfcp_unit *unit, char *id, void *ref)
1409{
1410 zfcp_erp_modify_unit_status(unit, id, ref,
1411 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1412}
1413
1414/**
1415 * zfcp_erp_wait - wait for completion of error recovery on an adapter 1420 * zfcp_erp_wait - wait for completion of error recovery on an adapter
1416 * @adapter: adapter for which to wait for completion of its error recovery 1421 * @adapter: adapter for which to wait for completion of its error recovery
1417 */ 1422 */
@@ -1423,210 +1428,148 @@ void zfcp_erp_wait(struct zfcp_adapter *adapter)
1423} 1428}
1424 1429
1425/** 1430/**
1426 * zfcp_erp_modify_adapter_status - change adapter status bits 1431 * zfcp_erp_set_adapter_status - set adapter status bits
1427 * @adapter: adapter to change the status 1432 * @adapter: adapter to change the status
1428 * @id: id for the debug trace
1429 * @ref: reference for the debug trace
1430 * @mask: status bits to change 1433 * @mask: status bits to change
1431 * @set_or_clear: ZFCP_SET or ZFCP_CLEAR
1432 * 1434 *
1433 * Changes in common status bits are propagated to attached ports and units. 1435 * Changes in common status bits are propagated to attached ports and LUNs.
1434 */ 1436 */
1435void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id, 1437void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
1436 void *ref, u32 mask, int set_or_clear)
1437{ 1438{
1438 struct zfcp_port *port; 1439 struct zfcp_port *port;
1440 struct scsi_device *sdev;
1439 unsigned long flags; 1441 unsigned long flags;
1440 u32 common_mask = mask & ZFCP_COMMON_FLAGS; 1442 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1441 1443
1442 if (set_or_clear == ZFCP_SET) { 1444 atomic_set_mask(mask, &adapter->status);
1443 if (status_change_set(mask, &adapter->status))
1444 zfcp_dbf_rec_adapter(id, ref, adapter->dbf);
1445 atomic_set_mask(mask, &adapter->status);
1446 } else {
1447 if (status_change_clear(mask, &adapter->status))
1448 zfcp_dbf_rec_adapter(id, ref, adapter->dbf);
1449 atomic_clear_mask(mask, &adapter->status);
1450 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
1451 atomic_set(&adapter->erp_counter, 0);
1452 }
1453 1445
1454 if (common_mask) { 1446 if (!common_mask)
1455 read_lock_irqsave(&adapter->port_list_lock, flags); 1447 return;
1456 list_for_each_entry(port, &adapter->port_list, list) 1448
1457 zfcp_erp_modify_port_status(port, id, ref, common_mask, 1449 read_lock_irqsave(&adapter->port_list_lock, flags);
1458 set_or_clear); 1450 list_for_each_entry(port, &adapter->port_list, list)
1459 read_unlock_irqrestore(&adapter->port_list_lock, flags); 1451 atomic_set_mask(common_mask, &port->status);
1460 } 1452 read_unlock_irqrestore(&adapter->port_list_lock, flags);
1453
1454 shost_for_each_device(sdev, adapter->scsi_host)
1455 atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
1461} 1456}
1462 1457
1463/** 1458/**
1464 * zfcp_erp_modify_port_status - change port status bits 1459 * zfcp_erp_clear_adapter_status - clear adapter status bits
1465 * @port: port to change the status bits 1460 * @adapter: adapter to change the status
1466 * @id: id for the debug trace
1467 * @ref: reference for the debug trace
1468 * @mask: status bits to change 1461 * @mask: status bits to change
1469 * @set_or_clear: ZFCP_SET or ZFCP_CLEAR
1470 * 1462 *
1471 * Changes in common status bits are propagated to attached units. 1463 * Changes in common status bits are propagated to attached ports and LUNs.
1472 */ 1464 */
1473void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref, 1465void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
1474 u32 mask, int set_or_clear)
1475{ 1466{
1476 struct zfcp_unit *unit; 1467 struct zfcp_port *port;
1468 struct scsi_device *sdev;
1477 unsigned long flags; 1469 unsigned long flags;
1478 u32 common_mask = mask & ZFCP_COMMON_FLAGS; 1470 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1471 u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
1472
1473 atomic_clear_mask(mask, &adapter->status);
1474
1475 if (!common_mask)
1476 return;
1477
1478 if (clear_counter)
1479 atomic_set(&adapter->erp_counter, 0);
1479 1480
1480 if (set_or_clear == ZFCP_SET) { 1481 read_lock_irqsave(&adapter->port_list_lock, flags);
1481 if (status_change_set(mask, &port->status)) 1482 list_for_each_entry(port, &adapter->port_list, list) {
1482 zfcp_dbf_rec_port(id, ref, port); 1483 atomic_clear_mask(common_mask, &port->status);
1483 atomic_set_mask(mask, &port->status); 1484 if (clear_counter)
1484 } else {
1485 if (status_change_clear(mask, &port->status))
1486 zfcp_dbf_rec_port(id, ref, port);
1487 atomic_clear_mask(mask, &port->status);
1488 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
1489 atomic_set(&port->erp_counter, 0); 1485 atomic_set(&port->erp_counter, 0);
1490 } 1486 }
1487 read_unlock_irqrestore(&adapter->port_list_lock, flags);
1491 1488
1492 if (common_mask) { 1489 shost_for_each_device(sdev, adapter->scsi_host) {
1493 read_lock_irqsave(&port->unit_list_lock, flags); 1490 atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
1494 list_for_each_entry(unit, &port->unit_list, list) 1491 if (clear_counter)
1495 zfcp_erp_modify_unit_status(unit, id, ref, common_mask, 1492 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
1496 set_or_clear);
1497 read_unlock_irqrestore(&port->unit_list_lock, flags);
1498 } 1493 }
1499} 1494}
1500 1495
1501/** 1496/**
1502 * zfcp_erp_modify_unit_status - change unit status bits 1497 * zfcp_erp_set_port_status - set port status bits
1503 * @unit: unit to change the status bits 1498 * @port: port to change the status
1504 * @id: id for the debug trace
1505 * @ref: reference for the debug trace
1506 * @mask: status bits to change 1499 * @mask: status bits to change
1507 * @set_or_clear: ZFCP_SET or ZFCP_CLEAR 1500 *
1508 */ 1501 * Changes in common status bits are propagated to attached LUNs.
1509void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, char *id, void *ref,
1510 u32 mask, int set_or_clear)
1511{
1512 if (set_or_clear == ZFCP_SET) {
1513 if (status_change_set(mask, &unit->status))
1514 zfcp_dbf_rec_unit(id, ref, unit);
1515 atomic_set_mask(mask, &unit->status);
1516 } else {
1517 if (status_change_clear(mask, &unit->status))
1518 zfcp_dbf_rec_unit(id, ref, unit);
1519 atomic_clear_mask(mask, &unit->status);
1520 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) {
1521 atomic_set(&unit->erp_counter, 0);
1522 }
1523 }
1524}
1525
1526/**
1527 * zfcp_erp_port_boxed - Mark port as "boxed" and start ERP
1528 * @port: The "boxed" port.
1529 * @id: The debug trace id.
1530 * @id: Reference for the debug trace.
1531 */ 1502 */
1532void zfcp_erp_port_boxed(struct zfcp_port *port, char *id, void *ref) 1503void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
1533{ 1504{
1534 zfcp_erp_modify_port_status(port, id, ref, 1505 struct scsi_device *sdev;
1535 ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET); 1506 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1536 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
1537}
1538 1507
1539/** 1508 atomic_set_mask(mask, &port->status);
1540 * zfcp_erp_unit_boxed - Mark unit as "boxed" and start ERP
1541 * @port: The "boxed" unit.
1542 * @id: The debug trace id.
1543 * @id: Reference for the debug trace.
1544 */
1545void zfcp_erp_unit_boxed(struct zfcp_unit *unit, char *id, void *ref)
1546{
1547 zfcp_erp_modify_unit_status(unit, id, ref,
1548 ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET);
1549 zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
1550}
1551 1509
1552/** 1510 if (!common_mask)
1553 * zfcp_erp_port_access_denied - Adapter denied access to port. 1511 return;
1554 * @port: port where access has been denied 1512
1555 * @id: id for debug trace 1513 shost_for_each_device(sdev, port->adapter->scsi_host)
1556 * @ref: reference for debug trace 1514 if (sdev_to_zfcp(sdev)->port == port)
1557 * 1515 atomic_set_mask(common_mask,
1558 * Since the adapter has denied access, stop using the port and the 1516 &sdev_to_zfcp(sdev)->status);
1559 * attached units.
1560 */
1561void zfcp_erp_port_access_denied(struct zfcp_port *port, char *id, void *ref)
1562{
1563 zfcp_erp_modify_port_status(port, id, ref,
1564 ZFCP_STATUS_COMMON_ERP_FAILED |
1565 ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET);
1566} 1517}
1567 1518
1568/** 1519/**
1569 * zfcp_erp_unit_access_denied - Adapter denied access to unit. 1520 * zfcp_erp_clear_port_status - clear port status bits
1570 * @unit: unit where access has been denied 1521 * @port: adapter to change the status
1571 * @id: id for debug trace 1522 * @mask: status bits to change
1572 * @ref: reference for debug trace
1573 * 1523 *
1574 * Since the adapter has denied access, stop using the unit. 1524 * Changes in common status bits are propagated to attached LUNs.
1575 */ 1525 */
1576void zfcp_erp_unit_access_denied(struct zfcp_unit *unit, char *id, void *ref) 1526void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
1577{ 1527{
1578 zfcp_erp_modify_unit_status(unit, id, ref, 1528 struct scsi_device *sdev;
1579 ZFCP_STATUS_COMMON_ERP_FAILED | 1529 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1580 ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET); 1530 u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
1581}
1582 1531
1583static void zfcp_erp_unit_access_changed(struct zfcp_unit *unit, char *id, 1532 atomic_clear_mask(mask, &port->status);
1584 void *ref) 1533
1585{ 1534 if (!common_mask)
1586 int status = atomic_read(&unit->status);
1587 if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED |
1588 ZFCP_STATUS_COMMON_ACCESS_BOXED)))
1589 return; 1535 return;
1590 1536
1591 zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); 1537 if (clear_counter)
1538 atomic_set(&port->erp_counter, 0);
1539
1540 shost_for_each_device(sdev, port->adapter->scsi_host)
1541 if (sdev_to_zfcp(sdev)->port == port) {
1542 atomic_clear_mask(common_mask,
1543 &sdev_to_zfcp(sdev)->status);
1544 if (clear_counter)
1545 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
1546 }
1592} 1547}
1593 1548
1594static void zfcp_erp_port_access_changed(struct zfcp_port *port, char *id, 1549/**
1595 void *ref) 1550 * zfcp_erp_set_lun_status - set lun status bits
1551 * @sdev: SCSI device / lun to set the status bits
1552 * @mask: status bits to change
1553 */
1554void zfcp_erp_set_lun_status(struct scsi_device *sdev, u32 mask)
1596{ 1555{
1597 struct zfcp_unit *unit; 1556 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1598 unsigned long flags;
1599 int status = atomic_read(&port->status);
1600 1557
1601 if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED | 1558 atomic_set_mask(mask, &zfcp_sdev->status);
1602 ZFCP_STATUS_COMMON_ACCESS_BOXED))) {
1603 read_lock_irqsave(&port->unit_list_lock, flags);
1604 list_for_each_entry(unit, &port->unit_list, list)
1605 zfcp_erp_unit_access_changed(unit, id, ref);
1606 read_unlock_irqrestore(&port->unit_list_lock, flags);
1607 return;
1608 }
1609
1610 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
1611} 1559}
1612 1560
1613/** 1561/**
1614 * zfcp_erp_adapter_access_changed - Process change in adapter ACT 1562 * zfcp_erp_clear_lun_status - clear lun status bits
1615 * @adapter: Adapter where the Access Control Table (ACT) changed 1563 * @sdev: SCSi device / lun to clear the status bits
1616 * @id: Id for debug trace 1564 * @mask: status bits to change
1617 * @ref: Reference for debug trace
1618 */ 1565 */
1619void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, char *id, 1566void zfcp_erp_clear_lun_status(struct scsi_device *sdev, u32 mask)
1620 void *ref)
1621{ 1567{
1622 unsigned long flags; 1568 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1623 struct zfcp_port *port;
1624 1569
1625 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) 1570 atomic_clear_mask(mask, &zfcp_sdev->status);
1626 return;
1627 1571
1628 read_lock_irqsave(&adapter->port_list_lock, flags); 1572 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
1629 list_for_each_entry(port, &adapter->port_list, list) 1573 atomic_set(&zfcp_sdev->erp_counter, 0);
1630 zfcp_erp_port_access_changed(port, id, ref);
1631 read_unlock_irqrestore(&adapter->port_list_lock, flags);
1632} 1574}
1575
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 3b93239c6f69..bf8f3e514839 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -15,12 +15,10 @@
15#include "zfcp_fc.h" 15#include "zfcp_fc.h"
16 16
17/* zfcp_aux.c */ 17/* zfcp_aux.c */
18extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, u64);
19extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64); 18extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64);
20extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *); 19extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *);
21extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32, 20extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
22 u32); 21 u32);
23extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64);
24extern void zfcp_sg_free_table(struct scatterlist *, int); 22extern void zfcp_sg_free_table(struct scatterlist *, int);
25extern int zfcp_sg_setup_table(struct scatterlist *, int); 23extern int zfcp_sg_setup_table(struct scatterlist *, int);
26extern void zfcp_device_unregister(struct device *, 24extern void zfcp_device_unregister(struct device *,
@@ -36,6 +34,14 @@ extern void zfcp_ccw_adapter_put(struct zfcp_adapter *);
36 34
37/* zfcp_cfdc.c */ 35/* zfcp_cfdc.c */
38extern struct miscdevice zfcp_cfdc_misc; 36extern struct miscdevice zfcp_cfdc_misc;
37extern void zfcp_cfdc_port_denied(struct zfcp_port *, union fsf_status_qual *);
38extern void zfcp_cfdc_lun_denied(struct scsi_device *, union fsf_status_qual *);
39extern void zfcp_cfdc_lun_shrng_vltn(struct scsi_device *,
40 union fsf_status_qual *);
41extern int zfcp_cfdc_open_lun_eval(struct scsi_device *,
42 struct fsf_qtcb_bottom_support *);
43extern void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *);
44
39 45
40/* zfcp_dbf.c */ 46/* zfcp_dbf.c */
41extern int zfcp_dbf_adapter_register(struct zfcp_adapter *); 47extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
@@ -44,10 +50,10 @@ extern void zfcp_dbf_rec_thread(char *, struct zfcp_dbf *);
44extern void zfcp_dbf_rec_thread_lock(char *, struct zfcp_dbf *); 50extern void zfcp_dbf_rec_thread_lock(char *, struct zfcp_dbf *);
45extern void zfcp_dbf_rec_adapter(char *, void *, struct zfcp_dbf *); 51extern void zfcp_dbf_rec_adapter(char *, void *, struct zfcp_dbf *);
46extern void zfcp_dbf_rec_port(char *, void *, struct zfcp_port *); 52extern void zfcp_dbf_rec_port(char *, void *, struct zfcp_port *);
47extern void zfcp_dbf_rec_unit(char *, void *, struct zfcp_unit *); 53extern void zfcp_dbf_rec_lun(char *, void *, struct scsi_device *);
48extern void zfcp_dbf_rec_trigger(char *, void *, u8, u8, void *, 54extern void zfcp_dbf_rec_trigger(char *, void *, u8, u8, void *,
49 struct zfcp_adapter *, struct zfcp_port *, 55 struct zfcp_adapter *, struct zfcp_port *,
50 struct zfcp_unit *); 56 struct scsi_device *);
51extern void zfcp_dbf_rec_action(char *, struct zfcp_erp_action *); 57extern void zfcp_dbf_rec_action(char *, struct zfcp_erp_action *);
52extern void _zfcp_dbf_hba_fsf_response(const char *, int, struct zfcp_fsf_req *, 58extern void _zfcp_dbf_hba_fsf_response(const char *, int, struct zfcp_fsf_req *,
53 struct zfcp_dbf *); 59 struct zfcp_dbf *);
@@ -65,34 +71,26 @@ extern void _zfcp_dbf_scsi(const char *, const char *, int, struct zfcp_dbf *,
65 unsigned long); 71 unsigned long);
66 72
67/* zfcp_erp.c */ 73/* zfcp_erp.c */
68extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, char *, 74extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
69 void *, u32, int); 75extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
70extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *, void *); 76extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *, void *);
71extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *, 77extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *,
72 void *); 78 void *);
73extern void zfcp_erp_adapter_failed(struct zfcp_adapter *, char *, void *); 79extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
74extern void zfcp_erp_modify_port_status(struct zfcp_port *, char *, void *, u32, 80extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32);
75 int);
76extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *, void *); 81extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *, void *);
77extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *, void *); 82extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *, void *);
78extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *, 83extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *,
79 void *); 84 void *);
80extern void zfcp_erp_port_failed(struct zfcp_port *, char *, void *); 85extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
81extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, char *, void *, u32, 86extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
82 int); 87extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *, void *);
83extern void zfcp_erp_unit_reopen(struct zfcp_unit *, int, char *, void *); 88extern void zfcp_erp_lun_shutdown(struct scsi_device *, int, char *, void *);
84extern void zfcp_erp_unit_shutdown(struct zfcp_unit *, int, char *, void *); 89extern void zfcp_erp_lun_shutdown_wait(struct scsi_device *, char *);
85extern void zfcp_erp_unit_failed(struct zfcp_unit *, char *, void *);
86extern int zfcp_erp_thread_setup(struct zfcp_adapter *); 90extern int zfcp_erp_thread_setup(struct zfcp_adapter *);
87extern void zfcp_erp_thread_kill(struct zfcp_adapter *); 91extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
88extern void zfcp_erp_wait(struct zfcp_adapter *); 92extern void zfcp_erp_wait(struct zfcp_adapter *);
89extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long); 93extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
90extern void zfcp_erp_port_boxed(struct zfcp_port *, char *, void *);
91extern void zfcp_erp_unit_boxed(struct zfcp_unit *, char *, void *);
92extern void zfcp_erp_port_access_denied(struct zfcp_port *, char *, void *);
93extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, char *, void *);
94extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *,
95 void *);
96extern void zfcp_erp_timeout_handler(unsigned long); 94extern void zfcp_erp_timeout_handler(unsigned long);
97 95
98/* zfcp_fc.c */ 96/* zfcp_fc.c */
@@ -118,8 +116,8 @@ extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *);
118extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *); 116extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *);
119extern int zfcp_fsf_close_port(struct zfcp_erp_action *); 117extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
120extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *); 118extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
121extern int zfcp_fsf_open_unit(struct zfcp_erp_action *); 119extern int zfcp_fsf_open_lun(struct zfcp_erp_action *);
122extern int zfcp_fsf_close_unit(struct zfcp_erp_action *); 120extern int zfcp_fsf_close_lun(struct zfcp_erp_action *);
123extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *); 121extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
124extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *, 122extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *,
125 struct fsf_qtcb_bottom_config *); 123 struct fsf_qtcb_bottom_config *);
@@ -135,12 +133,10 @@ extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *,
135 mempool_t *, unsigned int); 133 mempool_t *, unsigned int);
136extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32, 134extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32,
137 struct zfcp_fsf_ct_els *, unsigned int); 135 struct zfcp_fsf_ct_els *, unsigned int);
138extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *, 136extern int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *);
139 struct scsi_cmnd *);
140extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); 137extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
141extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *, u8); 138extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *, u8);
142extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long, 139extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *);
143 struct zfcp_unit *);
144extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int); 140extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
145 141
146/* zfcp_qdio.c */ 142/* zfcp_qdio.c */
@@ -163,8 +159,6 @@ extern void zfcp_scsi_rport_work(struct work_struct *);
163extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *); 159extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
164extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *); 160extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *);
165extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *); 161extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *);
166extern void zfcp_scsi_scan(struct zfcp_unit *);
167extern void zfcp_scsi_scan_work(struct work_struct *);
168extern void zfcp_scsi_set_prot(struct zfcp_adapter *); 162extern void zfcp_scsi_set_prot(struct zfcp_adapter *);
169extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int); 163extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
170 164
@@ -175,4 +169,13 @@ extern struct attribute_group zfcp_sysfs_port_attrs;
175extern struct device_attribute *zfcp_sysfs_sdev_attrs[]; 169extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
176extern struct device_attribute *zfcp_sysfs_shost_attrs[]; 170extern struct device_attribute *zfcp_sysfs_shost_attrs[];
177 171
172/* zfcp_unit.c */
173extern int zfcp_unit_add(struct zfcp_port *, u64);
174extern int zfcp_unit_remove(struct zfcp_port *, u64);
175extern struct zfcp_unit *zfcp_unit_find(struct zfcp_port *, u64);
176extern struct scsi_device *zfcp_unit_sdev(struct zfcp_unit *unit);
177extern void zfcp_unit_scsi_scan(struct zfcp_unit *);
178extern void zfcp_unit_queue_scsi_scan(struct zfcp_port *);
179extern unsigned int zfcp_unit_sdev_status(struct zfcp_unit *);
180
178#endif /* ZFCP_EXT_H */ 181#endif /* ZFCP_EXT_H */
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 6f3ed2b9a349..86fd905df48b 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -365,7 +365,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work)
365 } 365 }
366 366
367 if (!port->d_id) { 367 if (!port->d_id) {
368 zfcp_erp_port_failed(port, "fcgpn_2", NULL); 368 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
369 goto out; 369 goto out;
370 } 370 }
371 371
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 9d1d7d1842ce..beaf0916ceab 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -61,45 +61,6 @@ static u32 fsf_qtcb_type[] = {
61 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND 61 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND
62}; 62};
63 63
64static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
65{
66 u16 subtable = table >> 16;
67 u16 rule = table & 0xffff;
68 const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" };
69
70 if (subtable && subtable < ARRAY_SIZE(act_type))
71 dev_warn(&adapter->ccw_device->dev,
72 "Access denied according to ACT rule type %s, "
73 "rule %d\n", act_type[subtable], rule);
74}
75
76static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req,
77 struct zfcp_port *port)
78{
79 struct fsf_qtcb_header *header = &req->qtcb->header;
80 dev_warn(&req->adapter->ccw_device->dev,
81 "Access denied to port 0x%016Lx\n",
82 (unsigned long long)port->wwpn);
83 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
84 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
85 zfcp_erp_port_access_denied(port, "fspad_1", req);
86 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
87}
88
89static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req,
90 struct zfcp_unit *unit)
91{
92 struct fsf_qtcb_header *header = &req->qtcb->header;
93 dev_warn(&req->adapter->ccw_device->dev,
94 "Access denied to unit 0x%016Lx on port 0x%016Lx\n",
95 (unsigned long long)unit->fcp_lun,
96 (unsigned long long)unit->port->wwpn);
97 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
98 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
99 zfcp_erp_unit_access_denied(unit, "fsuad_1", req);
100 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
101}
102
103static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) 64static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
104{ 65{
105 dev_err(&req->adapter->ccw_device->dev, "FCP device not " 66 dev_err(&req->adapter->ccw_device->dev, "FCP device not "
@@ -143,7 +104,7 @@ static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
143 read_unlock_irqrestore(&adapter->port_list_lock, flags); 104 read_unlock_irqrestore(&adapter->port_list_lock, flags);
144} 105}
145 106
146static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id, 107static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
147 struct fsf_link_down_info *link_down) 108 struct fsf_link_down_info *link_down)
148{ 109{
149 struct zfcp_adapter *adapter = req->adapter; 110 struct zfcp_adapter *adapter = req->adapter;
@@ -223,7 +184,7 @@ static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id,
223 "the FC fabric is down\n"); 184 "the FC fabric is down\n");
224 } 185 }
225out: 186out:
226 zfcp_erp_adapter_failed(adapter, id, req); 187 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
227} 188}
228 189
229static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req) 190static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
@@ -234,13 +195,13 @@ static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
234 195
235 switch (sr_buf->status_subtype) { 196 switch (sr_buf->status_subtype) {
236 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: 197 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
237 zfcp_fsf_link_down_info_eval(req, "fssrld1", ldi); 198 zfcp_fsf_link_down_info_eval(req, ldi);
238 break; 199 break;
239 case FSF_STATUS_READ_SUB_FDISC_FAILED: 200 case FSF_STATUS_READ_SUB_FDISC_FAILED:
240 zfcp_fsf_link_down_info_eval(req, "fssrld2", ldi); 201 zfcp_fsf_link_down_info_eval(req, ldi);
241 break; 202 break;
242 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE: 203 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
243 zfcp_fsf_link_down_info_eval(req, "fssrld3", NULL); 204 zfcp_fsf_link_down_info_eval(req, NULL);
244 }; 205 };
245} 206}
246 207
@@ -281,9 +242,8 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
281 dev_info(&adapter->ccw_device->dev, 242 dev_info(&adapter->ccw_device->dev,
282 "The local link has been restored\n"); 243 "The local link has been restored\n");
283 /* All ports should be marked as ready to run again */ 244 /* All ports should be marked as ready to run again */
284 zfcp_erp_modify_adapter_status(adapter, "fssrh_1", NULL, 245 zfcp_erp_set_adapter_status(adapter,
285 ZFCP_STATUS_COMMON_RUNNING, 246 ZFCP_STATUS_COMMON_RUNNING);
286 ZFCP_SET);
287 zfcp_erp_adapter_reopen(adapter, 247 zfcp_erp_adapter_reopen(adapter,
288 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 248 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
289 ZFCP_STATUS_COMMON_ERP_FAILED, 249 ZFCP_STATUS_COMMON_ERP_FAILED,
@@ -293,13 +253,12 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
293 break; 253 break;
294 case FSF_STATUS_READ_NOTIFICATION_LOST: 254 case FSF_STATUS_READ_NOTIFICATION_LOST:
295 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED) 255 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
296 zfcp_erp_adapter_access_changed(adapter, "fssrh_3", 256 zfcp_cfdc_adapter_access_changed(adapter);
297 req);
298 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS) 257 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
299 queue_work(adapter->work_queue, &adapter->scan_work); 258 queue_work(adapter->work_queue, &adapter->scan_work);
300 break; 259 break;
301 case FSF_STATUS_READ_CFDC_UPDATED: 260 case FSF_STATUS_READ_CFDC_UPDATED:
302 zfcp_erp_adapter_access_changed(adapter, "fssrh_4", req); 261 zfcp_cfdc_adapter_access_changed(adapter);
303 break; 262 break;
304 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT: 263 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
305 adapter->adapter_features = sr_buf->payload.word[0]; 264 adapter->adapter_features = sr_buf->payload.word[0];
@@ -399,16 +358,14 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
399 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4", req); 358 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4", req);
400 break; 359 break;
401 case FSF_PROT_LINK_DOWN: 360 case FSF_PROT_LINK_DOWN:
402 zfcp_fsf_link_down_info_eval(req, "fspse_5", 361 zfcp_fsf_link_down_info_eval(req, &psq->link_down_info);
403 &psq->link_down_info);
404 /* go through reopen to flush pending requests */ 362 /* go through reopen to flush pending requests */
405 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req); 363 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req);
406 break; 364 break;
407 case FSF_PROT_REEST_QUEUE: 365 case FSF_PROT_REEST_QUEUE:
408 /* All ports should be marked as ready to run again */ 366 /* All ports should be marked as ready to run again */
409 zfcp_erp_modify_adapter_status(adapter, "fspse_7", NULL, 367 zfcp_erp_set_adapter_status(adapter,
410 ZFCP_STATUS_COMMON_RUNNING, 368 ZFCP_STATUS_COMMON_RUNNING);
411 ZFCP_SET);
412 zfcp_erp_adapter_reopen(adapter, 369 zfcp_erp_adapter_reopen(adapter,
413 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 370 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
414 ZFCP_STATUS_COMMON_ERP_FAILED, 371 ZFCP_STATUS_COMMON_ERP_FAILED,
@@ -578,7 +535,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
578 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 535 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
579 &adapter->status); 536 &adapter->status);
580 537
581 zfcp_fsf_link_down_info_eval(req, "fsecdh2", 538 zfcp_fsf_link_down_info_eval(req,
582 &qtcb->header.fsf_status_qual.link_down_info); 539 &qtcb->header.fsf_status_qual.link_down_info);
583 break; 540 break;
584 default: 541 default:
@@ -644,7 +601,7 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
644 break; 601 break;
645 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 602 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
646 zfcp_fsf_exchange_port_evaluate(req); 603 zfcp_fsf_exchange_port_evaluate(req);
647 zfcp_fsf_link_down_info_eval(req, "fsepdh1", 604 zfcp_fsf_link_down_info_eval(req,
648 &qtcb->header.fsf_status_qual.link_down_info); 605 &qtcb->header.fsf_status_qual.link_down_info);
649 break; 606 break;
650 } 607 }
@@ -771,7 +728,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
771 struct fsf_status_read_buffer *sr_buf; 728 struct fsf_status_read_buffer *sr_buf;
772 int retval = -EIO; 729 int retval = -EIO;
773 730
774 spin_lock_bh(&qdio->req_q_lock); 731 spin_lock_irq(&qdio->req_q_lock);
775 if (zfcp_qdio_sbal_get(qdio)) 732 if (zfcp_qdio_sbal_get(qdio))
776 goto out; 733 goto out;
777 734
@@ -805,13 +762,14 @@ failed_buf:
805 zfcp_fsf_req_free(req); 762 zfcp_fsf_req_free(req);
806 zfcp_dbf_hba_fsf_unsol("fail", adapter->dbf, NULL); 763 zfcp_dbf_hba_fsf_unsol("fail", adapter->dbf, NULL);
807out: 764out:
808 spin_unlock_bh(&qdio->req_q_lock); 765 spin_unlock_irq(&qdio->req_q_lock);
809 return retval; 766 return retval;
810} 767}
811 768
812static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) 769static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
813{ 770{
814 struct zfcp_unit *unit = req->data; 771 struct scsi_device *sdev = req->data;
772 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
815 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual; 773 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
816 774
817 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 775 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
@@ -820,14 +778,15 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
820 switch (req->qtcb->header.fsf_status) { 778 switch (req->qtcb->header.fsf_status) {
821 case FSF_PORT_HANDLE_NOT_VALID: 779 case FSF_PORT_HANDLE_NOT_VALID:
822 if (fsq->word[0] == fsq->word[1]) { 780 if (fsq->word[0] == fsq->word[1]) {
823 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 781 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0,
824 "fsafch1", req); 782 "fsafch1", req);
825 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 783 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
826 } 784 }
827 break; 785 break;
828 case FSF_LUN_HANDLE_NOT_VALID: 786 case FSF_LUN_HANDLE_NOT_VALID:
829 if (fsq->word[0] == fsq->word[1]) { 787 if (fsq->word[0] == fsq->word[1]) {
830 zfcp_erp_port_reopen(unit->port, 0, "fsafch2", req); 788 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2",
789 req);
831 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 790 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
832 } 791 }
833 break; 792 break;
@@ -835,17 +794,23 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
835 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED; 794 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
836 break; 795 break;
837 case FSF_PORT_BOXED: 796 case FSF_PORT_BOXED:
838 zfcp_erp_port_boxed(unit->port, "fsafch3", req); 797 zfcp_erp_set_port_status(zfcp_sdev->port,
798 ZFCP_STATUS_COMMON_ACCESS_BOXED);
799 zfcp_erp_port_reopen(zfcp_sdev->port,
800 ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3",
801 req);
839 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 802 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
840 break; 803 break;
841 case FSF_LUN_BOXED: 804 case FSF_LUN_BOXED:
842 zfcp_erp_unit_boxed(unit, "fsafch4", req); 805 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
806 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
807 "fsafch4", req);
843 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 808 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
844 break; 809 break;
845 case FSF_ADAPTER_STATUS_AVAILABLE: 810 case FSF_ADAPTER_STATUS_AVAILABLE:
846 switch (fsq->word[0]) { 811 switch (fsq->word[0]) {
847 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 812 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
848 zfcp_fc_test_link(unit->port); 813 zfcp_fc_test_link(zfcp_sdev->port);
849 /* fall through */ 814 /* fall through */
850 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 815 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
851 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 816 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -859,19 +824,20 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
859} 824}
860 825
861/** 826/**
862 * zfcp_fsf_abort_fcp_command - abort running SCSI command 827 * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command
863 * @old_req_id: unsigned long 828 * @scmnd: The SCSI command to abort
864 * @unit: pointer to struct zfcp_unit
865 * Returns: pointer to struct zfcp_fsf_req 829 * Returns: pointer to struct zfcp_fsf_req
866 */ 830 */
867 831
868struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, 832struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
869 struct zfcp_unit *unit)
870{ 833{
871 struct zfcp_fsf_req *req = NULL; 834 struct zfcp_fsf_req *req = NULL;
872 struct zfcp_qdio *qdio = unit->port->adapter->qdio; 835 struct scsi_device *sdev = scmnd->device;
836 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
837 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
838 unsigned long old_req_id = (unsigned long) scmnd->host_scribble;
873 839
874 spin_lock_bh(&qdio->req_q_lock); 840 spin_lock_irq(&qdio->req_q_lock);
875 if (zfcp_qdio_sbal_get(qdio)) 841 if (zfcp_qdio_sbal_get(qdio))
876 goto out; 842 goto out;
877 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND, 843 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
@@ -882,16 +848,16 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
882 goto out; 848 goto out;
883 } 849 }
884 850
885 if (unlikely(!(atomic_read(&unit->status) & 851 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
886 ZFCP_STATUS_COMMON_UNBLOCKED))) 852 ZFCP_STATUS_COMMON_UNBLOCKED)))
887 goto out_error_free; 853 goto out_error_free;
888 854
889 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 855 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
890 856
891 req->data = unit; 857 req->data = zfcp_sdev;
892 req->handler = zfcp_fsf_abort_fcp_command_handler; 858 req->handler = zfcp_fsf_abort_fcp_command_handler;
893 req->qtcb->header.lun_handle = unit->handle; 859 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
894 req->qtcb->header.port_handle = unit->port->handle; 860 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
895 req->qtcb->bottom.support.req_handle = (u64) old_req_id; 861 req->qtcb->bottom.support.req_handle = (u64) old_req_id;
896 862
897 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); 863 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
@@ -902,7 +868,7 @@ out_error_free:
902 zfcp_fsf_req_free(req); 868 zfcp_fsf_req_free(req);
903 req = NULL; 869 req = NULL;
904out: 870out:
905 spin_unlock_bh(&qdio->req_q_lock); 871 spin_unlock_irq(&qdio->req_q_lock);
906 return req; 872 return req;
907} 873}
908 874
@@ -1041,7 +1007,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1041 struct zfcp_fsf_req *req; 1007 struct zfcp_fsf_req *req;
1042 int ret = -EIO; 1008 int ret = -EIO;
1043 1009
1044 spin_lock_bh(&qdio->req_q_lock); 1010 spin_lock_irq(&qdio->req_q_lock);
1045 if (zfcp_qdio_sbal_get(qdio)) 1011 if (zfcp_qdio_sbal_get(qdio))
1046 goto out; 1012 goto out;
1047 1013
@@ -1073,7 +1039,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1073failed_send: 1039failed_send:
1074 zfcp_fsf_req_free(req); 1040 zfcp_fsf_req_free(req);
1075out: 1041out:
1076 spin_unlock_bh(&qdio->req_q_lock); 1042 spin_unlock_irq(&qdio->req_q_lock);
1077 return ret; 1043 return ret;
1078} 1044}
1079 1045
@@ -1111,8 +1077,10 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1111 case FSF_RESPONSE_SIZE_TOO_LARGE: 1077 case FSF_RESPONSE_SIZE_TOO_LARGE:
1112 break; 1078 break;
1113 case FSF_ACCESS_DENIED: 1079 case FSF_ACCESS_DENIED:
1114 if (port) 1080 if (port) {
1115 zfcp_fsf_access_denied_port(req, port); 1081 zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
1082 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1083 }
1116 break; 1084 break;
1117 case FSF_SBAL_MISMATCH: 1085 case FSF_SBAL_MISMATCH:
1118 /* should never occure, avoided in zfcp_fsf_send_els */ 1086 /* should never occure, avoided in zfcp_fsf_send_els */
@@ -1137,7 +1105,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1137 struct zfcp_qdio *qdio = adapter->qdio; 1105 struct zfcp_qdio *qdio = adapter->qdio;
1138 int ret = -EIO; 1106 int ret = -EIO;
1139 1107
1140 spin_lock_bh(&qdio->req_q_lock); 1108 spin_lock_irq(&qdio->req_q_lock);
1141 if (zfcp_qdio_sbal_get(qdio)) 1109 if (zfcp_qdio_sbal_get(qdio))
1142 goto out; 1110 goto out;
1143 1111
@@ -1173,7 +1141,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1173failed_send: 1141failed_send:
1174 zfcp_fsf_req_free(req); 1142 zfcp_fsf_req_free(req);
1175out: 1143out:
1176 spin_unlock_bh(&qdio->req_q_lock); 1144 spin_unlock_irq(&qdio->req_q_lock);
1177 return ret; 1145 return ret;
1178} 1146}
1179 1147
@@ -1183,7 +1151,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1183 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1151 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1184 int retval = -EIO; 1152 int retval = -EIO;
1185 1153
1186 spin_lock_bh(&qdio->req_q_lock); 1154 spin_lock_irq(&qdio->req_q_lock);
1187 if (zfcp_qdio_sbal_get(qdio)) 1155 if (zfcp_qdio_sbal_get(qdio))
1188 goto out; 1156 goto out;
1189 1157
@@ -1215,7 +1183,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1215 erp_action->fsf_req_id = 0; 1183 erp_action->fsf_req_id = 0;
1216 } 1184 }
1217out: 1185out:
1218 spin_unlock_bh(&qdio->req_q_lock); 1186 spin_unlock_irq(&qdio->req_q_lock);
1219 return retval; 1187 return retval;
1220} 1188}
1221 1189
@@ -1225,7 +1193,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1225 struct zfcp_fsf_req *req = NULL; 1193 struct zfcp_fsf_req *req = NULL;
1226 int retval = -EIO; 1194 int retval = -EIO;
1227 1195
1228 spin_lock_bh(&qdio->req_q_lock); 1196 spin_lock_irq(&qdio->req_q_lock);
1229 if (zfcp_qdio_sbal_get(qdio)) 1197 if (zfcp_qdio_sbal_get(qdio))
1230 goto out_unlock; 1198 goto out_unlock;
1231 1199
@@ -1251,7 +1219,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1251 1219
1252 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1220 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1253 retval = zfcp_fsf_req_send(req); 1221 retval = zfcp_fsf_req_send(req);
1254 spin_unlock_bh(&qdio->req_q_lock); 1222 spin_unlock_irq(&qdio->req_q_lock);
1255 if (!retval) 1223 if (!retval)
1256 wait_for_completion(&req->completion); 1224 wait_for_completion(&req->completion);
1257 1225
@@ -1259,7 +1227,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1259 return retval; 1227 return retval;
1260 1228
1261out_unlock: 1229out_unlock:
1262 spin_unlock_bh(&qdio->req_q_lock); 1230 spin_unlock_irq(&qdio->req_q_lock);
1263 return retval; 1231 return retval;
1264} 1232}
1265 1233
@@ -1277,7 +1245,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1277 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1245 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1278 return -EOPNOTSUPP; 1246 return -EOPNOTSUPP;
1279 1247
1280 spin_lock_bh(&qdio->req_q_lock); 1248 spin_lock_irq(&qdio->req_q_lock);
1281 if (zfcp_qdio_sbal_get(qdio)) 1249 if (zfcp_qdio_sbal_get(qdio))
1282 goto out; 1250 goto out;
1283 1251
@@ -1304,7 +1272,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1304 erp_action->fsf_req_id = 0; 1272 erp_action->fsf_req_id = 0;
1305 } 1273 }
1306out: 1274out:
1307 spin_unlock_bh(&qdio->req_q_lock); 1275 spin_unlock_irq(&qdio->req_q_lock);
1308 return retval; 1276 return retval;
1309} 1277}
1310 1278
@@ -1323,7 +1291,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1323 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1291 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1324 return -EOPNOTSUPP; 1292 return -EOPNOTSUPP;
1325 1293
1326 spin_lock_bh(&qdio->req_q_lock); 1294 spin_lock_irq(&qdio->req_q_lock);
1327 if (zfcp_qdio_sbal_get(qdio)) 1295 if (zfcp_qdio_sbal_get(qdio))
1328 goto out_unlock; 1296 goto out_unlock;
1329 1297
@@ -1343,7 +1311,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1343 req->handler = zfcp_fsf_exchange_port_data_handler; 1311 req->handler = zfcp_fsf_exchange_port_data_handler;
1344 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1312 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1345 retval = zfcp_fsf_req_send(req); 1313 retval = zfcp_fsf_req_send(req);
1346 spin_unlock_bh(&qdio->req_q_lock); 1314 spin_unlock_irq(&qdio->req_q_lock);
1347 1315
1348 if (!retval) 1316 if (!retval)
1349 wait_for_completion(&req->completion); 1317 wait_for_completion(&req->completion);
@@ -1353,7 +1321,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1353 return retval; 1321 return retval;
1354 1322
1355out_unlock: 1323out_unlock:
1356 spin_unlock_bh(&qdio->req_q_lock); 1324 spin_unlock_irq(&qdio->req_q_lock);
1357 return retval; 1325 return retval;
1358} 1326}
1359 1327
@@ -1370,14 +1338,16 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1370 case FSF_PORT_ALREADY_OPEN: 1338 case FSF_PORT_ALREADY_OPEN:
1371 break; 1339 break;
1372 case FSF_ACCESS_DENIED: 1340 case FSF_ACCESS_DENIED:
1373 zfcp_fsf_access_denied_port(req, port); 1341 zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
1342 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1374 break; 1343 break;
1375 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1344 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1376 dev_warn(&req->adapter->ccw_device->dev, 1345 dev_warn(&req->adapter->ccw_device->dev,
1377 "Not enough FCP adapter resources to open " 1346 "Not enough FCP adapter resources to open "
1378 "remote port 0x%016Lx\n", 1347 "remote port 0x%016Lx\n",
1379 (unsigned long long)port->wwpn); 1348 (unsigned long long)port->wwpn);
1380 zfcp_erp_port_failed(port, "fsoph_1", req); 1349 zfcp_erp_set_port_status(port,
1350 ZFCP_STATUS_COMMON_ERP_FAILED);
1381 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1351 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1382 break; 1352 break;
1383 case FSF_ADAPTER_STATUS_AVAILABLE: 1353 case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -1437,7 +1407,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1437 struct zfcp_fsf_req *req; 1407 struct zfcp_fsf_req *req;
1438 int retval = -EIO; 1408 int retval = -EIO;
1439 1409
1440 spin_lock_bh(&qdio->req_q_lock); 1410 spin_lock_irq(&qdio->req_q_lock);
1441 if (zfcp_qdio_sbal_get(qdio)) 1411 if (zfcp_qdio_sbal_get(qdio))
1442 goto out; 1412 goto out;
1443 1413
@@ -1468,7 +1438,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1468 put_device(&port->dev); 1438 put_device(&port->dev);
1469 } 1439 }
1470out: 1440out:
1471 spin_unlock_bh(&qdio->req_q_lock); 1441 spin_unlock_irq(&qdio->req_q_lock);
1472 return retval; 1442 return retval;
1473} 1443}
1474 1444
@@ -1487,9 +1457,7 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1487 case FSF_ADAPTER_STATUS_AVAILABLE: 1457 case FSF_ADAPTER_STATUS_AVAILABLE:
1488 break; 1458 break;
1489 case FSF_GOOD: 1459 case FSF_GOOD:
1490 zfcp_erp_modify_port_status(port, "fscph_2", req, 1460 zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN);
1491 ZFCP_STATUS_COMMON_OPEN,
1492 ZFCP_CLEAR);
1493 break; 1461 break;
1494 } 1462 }
1495} 1463}
@@ -1505,7 +1473,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1505 struct zfcp_fsf_req *req; 1473 struct zfcp_fsf_req *req;
1506 int retval = -EIO; 1474 int retval = -EIO;
1507 1475
1508 spin_lock_bh(&qdio->req_q_lock); 1476 spin_lock_irq(&qdio->req_q_lock);
1509 if (zfcp_qdio_sbal_get(qdio)) 1477 if (zfcp_qdio_sbal_get(qdio))
1510 goto out; 1478 goto out;
1511 1479
@@ -1534,7 +1502,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1534 erp_action->fsf_req_id = 0; 1502 erp_action->fsf_req_id = 0;
1535 } 1503 }
1536out: 1504out:
1537 spin_unlock_bh(&qdio->req_q_lock); 1505 spin_unlock_irq(&qdio->req_q_lock);
1538 return retval; 1506 return retval;
1539} 1507}
1540 1508
@@ -1580,7 +1548,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1580 struct zfcp_fsf_req *req; 1548 struct zfcp_fsf_req *req;
1581 int retval = -EIO; 1549 int retval = -EIO;
1582 1550
1583 spin_lock_bh(&qdio->req_q_lock); 1551 spin_lock_irq(&qdio->req_q_lock);
1584 if (zfcp_qdio_sbal_get(qdio)) 1552 if (zfcp_qdio_sbal_get(qdio))
1585 goto out; 1553 goto out;
1586 1554
@@ -1605,7 +1573,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1605 if (retval) 1573 if (retval)
1606 zfcp_fsf_req_free(req); 1574 zfcp_fsf_req_free(req);
1607out: 1575out:
1608 spin_unlock_bh(&qdio->req_q_lock); 1576 spin_unlock_irq(&qdio->req_q_lock);
1609 return retval; 1577 return retval;
1610} 1578}
1611 1579
@@ -1633,7 +1601,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1633 struct zfcp_fsf_req *req; 1601 struct zfcp_fsf_req *req;
1634 int retval = -EIO; 1602 int retval = -EIO;
1635 1603
1636 spin_lock_bh(&qdio->req_q_lock); 1604 spin_lock_irq(&qdio->req_q_lock);
1637 if (zfcp_qdio_sbal_get(qdio)) 1605 if (zfcp_qdio_sbal_get(qdio))
1638 goto out; 1606 goto out;
1639 1607
@@ -1658,7 +1626,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1658 if (retval) 1626 if (retval)
1659 zfcp_fsf_req_free(req); 1627 zfcp_fsf_req_free(req);
1660out: 1628out:
1661 spin_unlock_bh(&qdio->req_q_lock); 1629 spin_unlock_irq(&qdio->req_q_lock);
1662 return retval; 1630 return retval;
1663} 1631}
1664 1632
@@ -1666,7 +1634,7 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1666{ 1634{
1667 struct zfcp_port *port = req->data; 1635 struct zfcp_port *port = req->data;
1668 struct fsf_qtcb_header *header = &req->qtcb->header; 1636 struct fsf_qtcb_header *header = &req->qtcb->header;
1669 struct zfcp_unit *unit; 1637 struct scsi_device *sdev;
1670 1638
1671 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1639 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1672 return; 1640 return;
@@ -1677,18 +1645,19 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1677 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1645 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1678 break; 1646 break;
1679 case FSF_ACCESS_DENIED: 1647 case FSF_ACCESS_DENIED:
1680 zfcp_fsf_access_denied_port(req, port); 1648 zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
1681 break; 1649 break;
1682 case FSF_PORT_BOXED: 1650 case FSF_PORT_BOXED:
1683 /* can't use generic zfcp_erp_modify_port_status because 1651 /* can't use generic zfcp_erp_modify_port_status because
1684 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ 1652 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
1685 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1653 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1686 read_lock(&port->unit_list_lock); 1654 shost_for_each_device(sdev, port->adapter->scsi_host)
1687 list_for_each_entry(unit, &port->unit_list, list) 1655 if (sdev_to_zfcp(sdev)->port == port)
1688 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, 1656 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1689 &unit->status); 1657 &sdev_to_zfcp(sdev)->status);
1690 read_unlock(&port->unit_list_lock); 1658 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
1691 zfcp_erp_port_boxed(port, "fscpph2", req); 1659 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
1660 "fscpph2", req);
1692 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1661 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1693 break; 1662 break;
1694 case FSF_ADAPTER_STATUS_AVAILABLE: 1663 case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -1705,11 +1674,10 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1705 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port 1674 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
1706 */ 1675 */
1707 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1676 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1708 read_lock(&port->unit_list_lock); 1677 shost_for_each_device(sdev, port->adapter->scsi_host)
1709 list_for_each_entry(unit, &port->unit_list, list) 1678 if (sdev_to_zfcp(sdev)->port == port)
1710 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, 1679 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1711 &unit->status); 1680 &sdev_to_zfcp(sdev)->status);
1712 read_unlock(&port->unit_list_lock);
1713 break; 1681 break;
1714 } 1682 }
1715} 1683}
@@ -1725,7 +1693,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1725 struct zfcp_fsf_req *req; 1693 struct zfcp_fsf_req *req;
1726 int retval = -EIO; 1694 int retval = -EIO;
1727 1695
1728 spin_lock_bh(&qdio->req_q_lock); 1696 spin_lock_irq(&qdio->req_q_lock);
1729 if (zfcp_qdio_sbal_get(qdio)) 1697 if (zfcp_qdio_sbal_get(qdio))
1730 goto out; 1698 goto out;
1731 1699
@@ -1754,69 +1722,57 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1754 erp_action->fsf_req_id = 0; 1722 erp_action->fsf_req_id = 0;
1755 } 1723 }
1756out: 1724out:
1757 spin_unlock_bh(&qdio->req_q_lock); 1725 spin_unlock_irq(&qdio->req_q_lock);
1758 return retval; 1726 return retval;
1759} 1727}
1760 1728
1761static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req) 1729static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
1762{ 1730{
1763 struct zfcp_adapter *adapter = req->adapter; 1731 struct zfcp_adapter *adapter = req->adapter;
1764 struct zfcp_unit *unit = req->data; 1732 struct scsi_device *sdev = req->data;
1733 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1765 struct fsf_qtcb_header *header = &req->qtcb->header; 1734 struct fsf_qtcb_header *header = &req->qtcb->header;
1766 struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support; 1735 struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
1767 struct fsf_queue_designator *queue_designator =
1768 &header->fsf_status_qual.fsf_queue_designator;
1769 int exclusive, readwrite;
1770 1736
1771 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1737 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1772 return; 1738 return;
1773 1739
1774 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 1740 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1775 ZFCP_STATUS_COMMON_ACCESS_BOXED | 1741 ZFCP_STATUS_COMMON_ACCESS_BOXED |
1776 ZFCP_STATUS_UNIT_SHARED | 1742 ZFCP_STATUS_LUN_SHARED |
1777 ZFCP_STATUS_UNIT_READONLY, 1743 ZFCP_STATUS_LUN_READONLY,
1778 &unit->status); 1744 &zfcp_sdev->status);
1779 1745
1780 switch (header->fsf_status) { 1746 switch (header->fsf_status) {
1781 1747
1782 case FSF_PORT_HANDLE_NOT_VALID: 1748 case FSF_PORT_HANDLE_NOT_VALID:
1783 zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fsouh_1", req); 1749 zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1", req);
1784 /* fall through */ 1750 /* fall through */
1785 case FSF_LUN_ALREADY_OPEN: 1751 case FSF_LUN_ALREADY_OPEN:
1786 break; 1752 break;
1787 case FSF_ACCESS_DENIED: 1753 case FSF_ACCESS_DENIED:
1788 zfcp_fsf_access_denied_unit(req, unit); 1754 zfcp_cfdc_lun_denied(sdev, &header->fsf_status_qual);
1789 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status); 1755 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1790 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
1791 break; 1756 break;
1792 case FSF_PORT_BOXED: 1757 case FSF_PORT_BOXED:
1793 zfcp_erp_port_boxed(unit->port, "fsouh_2", req); 1758 zfcp_erp_set_port_status(zfcp_sdev->port,
1759 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1760 zfcp_erp_port_reopen(zfcp_sdev->port,
1761 ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2",
1762 req);
1794 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1763 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1795 break; 1764 break;
1796 case FSF_LUN_SHARING_VIOLATION: 1765 case FSF_LUN_SHARING_VIOLATION:
1797 if (header->fsf_status_qual.word[0]) 1766 zfcp_cfdc_lun_shrng_vltn(sdev, &header->fsf_status_qual);
1798 dev_warn(&adapter->ccw_device->dev,
1799 "LUN 0x%Lx on port 0x%Lx is already in "
1800 "use by CSS%d, MIF Image ID %x\n",
1801 (unsigned long long)unit->fcp_lun,
1802 (unsigned long long)unit->port->wwpn,
1803 queue_designator->cssid,
1804 queue_designator->hla);
1805 else
1806 zfcp_act_eval_err(adapter,
1807 header->fsf_status_qual.word[2]);
1808 zfcp_erp_unit_access_denied(unit, "fsouh_3", req);
1809 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
1810 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
1811 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1767 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1812 break; 1768 break;
1813 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED: 1769 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
1814 dev_warn(&adapter->ccw_device->dev, 1770 dev_warn(&adapter->ccw_device->dev,
1815 "No handle is available for LUN " 1771 "No handle is available for LUN "
1816 "0x%016Lx on port 0x%016Lx\n", 1772 "0x%016Lx on port 0x%016Lx\n",
1817 (unsigned long long)unit->fcp_lun, 1773 (unsigned long long)zfcp_scsi_dev_lun(sdev),
1818 (unsigned long long)unit->port->wwpn); 1774 (unsigned long long)zfcp_sdev->port->wwpn);
1819 zfcp_erp_unit_failed(unit, "fsouh_4", req); 1775 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
1820 /* fall through */ 1776 /* fall through */
1821 case FSF_INVALID_COMMAND_OPTION: 1777 case FSF_INVALID_COMMAND_OPTION:
1822 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1778 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1824,7 +1780,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1824 case FSF_ADAPTER_STATUS_AVAILABLE: 1780 case FSF_ADAPTER_STATUS_AVAILABLE:
1825 switch (header->fsf_status_qual.word[0]) { 1781 switch (header->fsf_status_qual.word[0]) {
1826 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1782 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1827 zfcp_fc_test_link(unit->port); 1783 zfcp_fc_test_link(zfcp_sdev->port);
1828 /* fall through */ 1784 /* fall through */
1829 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1785 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1830 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1786 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1833,70 +1789,26 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1833 break; 1789 break;
1834 1790
1835 case FSF_GOOD: 1791 case FSF_GOOD:
1836 unit->handle = header->lun_handle; 1792 zfcp_sdev->lun_handle = header->lun_handle;
1837 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); 1793 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
1838 1794 zfcp_cfdc_open_lun_eval(sdev, bottom);
1839 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
1840 (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) &&
1841 !zfcp_ccw_priv_sch(adapter)) {
1842 exclusive = (bottom->lun_access_info &
1843 FSF_UNIT_ACCESS_EXCLUSIVE);
1844 readwrite = (bottom->lun_access_info &
1845 FSF_UNIT_ACCESS_OUTBOUND_TRANSFER);
1846
1847 if (!exclusive)
1848 atomic_set_mask(ZFCP_STATUS_UNIT_SHARED,
1849 &unit->status);
1850
1851 if (!readwrite) {
1852 atomic_set_mask(ZFCP_STATUS_UNIT_READONLY,
1853 &unit->status);
1854 dev_info(&adapter->ccw_device->dev,
1855 "SCSI device at LUN 0x%016Lx on port "
1856 "0x%016Lx opened read-only\n",
1857 (unsigned long long)unit->fcp_lun,
1858 (unsigned long long)unit->port->wwpn);
1859 }
1860
1861 if (exclusive && !readwrite) {
1862 dev_err(&adapter->ccw_device->dev,
1863 "Exclusive read-only access not "
1864 "supported (unit 0x%016Lx, "
1865 "port 0x%016Lx)\n",
1866 (unsigned long long)unit->fcp_lun,
1867 (unsigned long long)unit->port->wwpn);
1868 zfcp_erp_unit_failed(unit, "fsouh_5", req);
1869 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1870 zfcp_erp_unit_shutdown(unit, 0, "fsouh_6", req);
1871 } else if (!exclusive && readwrite) {
1872 dev_err(&adapter->ccw_device->dev,
1873 "Shared read-write access not "
1874 "supported (unit 0x%016Lx, port "
1875 "0x%016Lx)\n",
1876 (unsigned long long)unit->fcp_lun,
1877 (unsigned long long)unit->port->wwpn);
1878 zfcp_erp_unit_failed(unit, "fsouh_7", req);
1879 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1880 zfcp_erp_unit_shutdown(unit, 0, "fsouh_8", req);
1881 }
1882 }
1883 break; 1795 break;
1884 } 1796 }
1885} 1797}
1886 1798
1887/** 1799/**
1888 * zfcp_fsf_open_unit - open unit 1800 * zfcp_fsf_open_lun - open LUN
1889 * @erp_action: pointer to struct zfcp_erp_action 1801 * @erp_action: pointer to struct zfcp_erp_action
1890 * Returns: 0 on success, error otherwise 1802 * Returns: 0 on success, error otherwise
1891 */ 1803 */
1892int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) 1804int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
1893{ 1805{
1894 struct zfcp_adapter *adapter = erp_action->adapter; 1806 struct zfcp_adapter *adapter = erp_action->adapter;
1895 struct zfcp_qdio *qdio = adapter->qdio; 1807 struct zfcp_qdio *qdio = adapter->qdio;
1896 struct zfcp_fsf_req *req; 1808 struct zfcp_fsf_req *req;
1897 int retval = -EIO; 1809 int retval = -EIO;
1898 1810
1899 spin_lock_bh(&qdio->req_q_lock); 1811 spin_lock_irq(&qdio->req_q_lock);
1900 if (zfcp_qdio_sbal_get(qdio)) 1812 if (zfcp_qdio_sbal_get(qdio))
1901 goto out; 1813 goto out;
1902 1814
@@ -1913,9 +1825,9 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1913 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1825 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1914 1826
1915 req->qtcb->header.port_handle = erp_action->port->handle; 1827 req->qtcb->header.port_handle = erp_action->port->handle;
1916 req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun; 1828 req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev);
1917 req->handler = zfcp_fsf_open_unit_handler; 1829 req->handler = zfcp_fsf_open_lun_handler;
1918 req->data = erp_action->unit; 1830 req->data = erp_action->sdev;
1919 req->erp_action = erp_action; 1831 req->erp_action = erp_action;
1920 erp_action->fsf_req_id = req->req_id; 1832 erp_action->fsf_req_id = req->req_id;
1921 1833
@@ -1929,34 +1841,40 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1929 erp_action->fsf_req_id = 0; 1841 erp_action->fsf_req_id = 0;
1930 } 1842 }
1931out: 1843out:
1932 spin_unlock_bh(&qdio->req_q_lock); 1844 spin_unlock_irq(&qdio->req_q_lock);
1933 return retval; 1845 return retval;
1934} 1846}
1935 1847
1936static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req) 1848static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
1937{ 1849{
1938 struct zfcp_unit *unit = req->data; 1850 struct scsi_device *sdev = req->data;
1851 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1939 1852
1940 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1853 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1941 return; 1854 return;
1942 1855
1943 switch (req->qtcb->header.fsf_status) { 1856 switch (req->qtcb->header.fsf_status) {
1944 case FSF_PORT_HANDLE_NOT_VALID: 1857 case FSF_PORT_HANDLE_NOT_VALID:
1945 zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fscuh_1", req); 1858 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1",
1859 req);
1946 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1860 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1947 break; 1861 break;
1948 case FSF_LUN_HANDLE_NOT_VALID: 1862 case FSF_LUN_HANDLE_NOT_VALID:
1949 zfcp_erp_port_reopen(unit->port, 0, "fscuh_2", req); 1863 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2", req);
1950 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1864 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1951 break; 1865 break;
1952 case FSF_PORT_BOXED: 1866 case FSF_PORT_BOXED:
1953 zfcp_erp_port_boxed(unit->port, "fscuh_3", req); 1867 zfcp_erp_set_port_status(zfcp_sdev->port,
1868 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1869 zfcp_erp_port_reopen(zfcp_sdev->port,
1870 ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3",
1871 req);
1954 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1872 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1955 break; 1873 break;
1956 case FSF_ADAPTER_STATUS_AVAILABLE: 1874 case FSF_ADAPTER_STATUS_AVAILABLE:
1957 switch (req->qtcb->header.fsf_status_qual.word[0]) { 1875 switch (req->qtcb->header.fsf_status_qual.word[0]) {
1958 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1876 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1959 zfcp_fc_test_link(unit->port); 1877 zfcp_fc_test_link(zfcp_sdev->port);
1960 /* fall through */ 1878 /* fall through */
1961 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1879 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1962 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1880 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1964,23 +1882,24 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
1964 } 1882 }
1965 break; 1883 break;
1966 case FSF_GOOD: 1884 case FSF_GOOD:
1967 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); 1885 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
1968 break; 1886 break;
1969 } 1887 }
1970} 1888}
1971 1889
1972/** 1890/**
1973 * zfcp_fsf_close_unit - close zfcp unit 1891 * zfcp_fsf_close_LUN - close LUN
1974 * @erp_action: pointer to struct zfcp_unit 1892 * @erp_action: pointer to erp_action triggering the "close LUN"
1975 * Returns: 0 on success, error otherwise 1893 * Returns: 0 on success, error otherwise
1976 */ 1894 */
1977int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) 1895int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
1978{ 1896{
1979 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1897 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1898 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
1980 struct zfcp_fsf_req *req; 1899 struct zfcp_fsf_req *req;
1981 int retval = -EIO; 1900 int retval = -EIO;
1982 1901
1983 spin_lock_bh(&qdio->req_q_lock); 1902 spin_lock_irq(&qdio->req_q_lock);
1984 if (zfcp_qdio_sbal_get(qdio)) 1903 if (zfcp_qdio_sbal_get(qdio))
1985 goto out; 1904 goto out;
1986 1905
@@ -1997,9 +1916,9 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
1997 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1916 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1998 1917
1999 req->qtcb->header.port_handle = erp_action->port->handle; 1918 req->qtcb->header.port_handle = erp_action->port->handle;
2000 req->qtcb->header.lun_handle = erp_action->unit->handle; 1919 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2001 req->handler = zfcp_fsf_close_unit_handler; 1920 req->handler = zfcp_fsf_close_lun_handler;
2002 req->data = erp_action->unit; 1921 req->data = erp_action->sdev;
2003 req->erp_action = erp_action; 1922 req->erp_action = erp_action;
2004 erp_action->fsf_req_id = req->req_id; 1923 erp_action->fsf_req_id = req->req_id;
2005 1924
@@ -2010,7 +1929,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
2010 erp_action->fsf_req_id = 0; 1929 erp_action->fsf_req_id = 0;
2011 } 1930 }
2012out: 1931out:
2013 spin_unlock_bh(&qdio->req_q_lock); 1932 spin_unlock_irq(&qdio->req_q_lock);
2014 return retval; 1933 return retval;
2015} 1934}
2016 1935
@@ -2025,7 +1944,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2025{ 1944{
2026 struct fsf_qual_latency_info *lat_in; 1945 struct fsf_qual_latency_info *lat_in;
2027 struct latency_cont *lat = NULL; 1946 struct latency_cont *lat = NULL;
2028 struct zfcp_unit *unit = req->unit; 1947 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scsi->device);
2029 struct zfcp_blk_drv_data blktrc; 1948 struct zfcp_blk_drv_data blktrc;
2030 int ticks = req->adapter->timer_ticks; 1949 int ticks = req->adapter->timer_ticks;
2031 1950
@@ -2048,24 +1967,24 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2048 case FSF_DATADIR_DIF_READ_STRIP: 1967 case FSF_DATADIR_DIF_READ_STRIP:
2049 case FSF_DATADIR_DIF_READ_CONVERT: 1968 case FSF_DATADIR_DIF_READ_CONVERT:
2050 case FSF_DATADIR_READ: 1969 case FSF_DATADIR_READ:
2051 lat = &unit->latencies.read; 1970 lat = &zfcp_sdev->latencies.read;
2052 break; 1971 break;
2053 case FSF_DATADIR_DIF_WRITE_INSERT: 1972 case FSF_DATADIR_DIF_WRITE_INSERT:
2054 case FSF_DATADIR_DIF_WRITE_CONVERT: 1973 case FSF_DATADIR_DIF_WRITE_CONVERT:
2055 case FSF_DATADIR_WRITE: 1974 case FSF_DATADIR_WRITE:
2056 lat = &unit->latencies.write; 1975 lat = &zfcp_sdev->latencies.write;
2057 break; 1976 break;
2058 case FSF_DATADIR_CMND: 1977 case FSF_DATADIR_CMND:
2059 lat = &unit->latencies.cmd; 1978 lat = &zfcp_sdev->latencies.cmd;
2060 break; 1979 break;
2061 } 1980 }
2062 1981
2063 if (lat) { 1982 if (lat) {
2064 spin_lock(&unit->latencies.lock); 1983 spin_lock(&zfcp_sdev->latencies.lock);
2065 zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat); 1984 zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
2066 zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat); 1985 zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
2067 lat->counter++; 1986 lat->counter++;
2068 spin_unlock(&unit->latencies.lock); 1987 spin_unlock(&zfcp_sdev->latencies.lock);
2069 } 1988 }
2070 } 1989 }
2071 1990
@@ -2073,12 +1992,88 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2073 sizeof(blktrc)); 1992 sizeof(blktrc));
2074} 1993}
2075 1994
2076static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) 1995static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
1996{
1997 struct scsi_cmnd *scmnd = req->data;
1998 struct scsi_device *sdev = scmnd->device;
1999 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2000 struct fsf_qtcb_header *header = &req->qtcb->header;
2001
2002 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2003 return;
2004
2005 switch (header->fsf_status) {
2006 case FSF_HANDLE_MISMATCH:
2007 case FSF_PORT_HANDLE_NOT_VALID:
2008 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1",
2009 req);
2010 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2011 break;
2012 case FSF_FCPLUN_NOT_VALID:
2013 case FSF_LUN_HANDLE_NOT_VALID:
2014 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2", req);
2015 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2016 break;
2017 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
2018 zfcp_fsf_class_not_supp(req);
2019 break;
2020 case FSF_ACCESS_DENIED:
2021 zfcp_cfdc_lun_denied(sdev, &header->fsf_status_qual);
2022 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2023 break;
2024 case FSF_DIRECTION_INDICATOR_NOT_VALID:
2025 dev_err(&req->adapter->ccw_device->dev,
2026 "Incorrect direction %d, LUN 0x%016Lx on port "
2027 "0x%016Lx closed\n",
2028 req->qtcb->bottom.io.data_direction,
2029 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2030 (unsigned long long)zfcp_sdev->port->wwpn);
2031 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
2032 "fssfch3", req);
2033 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2034 break;
2035 case FSF_CMND_LENGTH_NOT_VALID:
2036 dev_err(&req->adapter->ccw_device->dev,
2037 "Incorrect CDB length %d, LUN 0x%016Lx on "
2038 "port 0x%016Lx closed\n",
2039 req->qtcb->bottom.io.fcp_cmnd_length,
2040 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2041 (unsigned long long)zfcp_sdev->port->wwpn);
2042 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
2043 "fssfch4", req);
2044 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2045 break;
2046 case FSF_PORT_BOXED:
2047 zfcp_erp_set_port_status(zfcp_sdev->port,
2048 ZFCP_STATUS_COMMON_ACCESS_BOXED);
2049 zfcp_erp_port_reopen(zfcp_sdev->port,
2050 ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5",
2051 req);
2052 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2053 break;
2054 case FSF_LUN_BOXED:
2055 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
2056 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
2057 "fssfch6", req);
2058 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2059 break;
2060 case FSF_ADAPTER_STATUS_AVAILABLE:
2061 if (header->fsf_status_qual.word[0] ==
2062 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2063 zfcp_fc_test_link(zfcp_sdev->port);
2064 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2065 break;
2066 }
2067}
2068
2069static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
2077{ 2070{
2078 struct scsi_cmnd *scpnt; 2071 struct scsi_cmnd *scpnt;
2079 struct fcp_resp_with_ext *fcp_rsp; 2072 struct fcp_resp_with_ext *fcp_rsp;
2080 unsigned long flags; 2073 unsigned long flags;
2081 2074
2075 zfcp_fsf_fcp_handler_common(req);
2076
2082 read_lock_irqsave(&req->adapter->abort_lock, flags); 2077 read_lock_irqsave(&req->adapter->abort_lock, flags);
2083 2078
2084 scpnt = req->data; 2079 scpnt = req->data;
@@ -2125,97 +2120,6 @@ skip_fsfstatus:
2125 read_unlock_irqrestore(&req->adapter->abort_lock, flags); 2120 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2126} 2121}
2127 2122
2128static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req)
2129{
2130 struct fcp_resp_with_ext *fcp_rsp;
2131 struct fcp_resp_rsp_info *rsp_info;
2132
2133 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2134 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
2135
2136 if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
2137 (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2138 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2139}
2140
2141
2142static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
2143{
2144 struct zfcp_unit *unit;
2145 struct fsf_qtcb_header *header = &req->qtcb->header;
2146
2147 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT))
2148 unit = req->data;
2149 else
2150 unit = req->unit;
2151
2152 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2153 goto skip_fsfstatus;
2154
2155 switch (header->fsf_status) {
2156 case FSF_HANDLE_MISMATCH:
2157 case FSF_PORT_HANDLE_NOT_VALID:
2158 zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fssfch1", req);
2159 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2160 break;
2161 case FSF_FCPLUN_NOT_VALID:
2162 case FSF_LUN_HANDLE_NOT_VALID:
2163 zfcp_erp_port_reopen(unit->port, 0, "fssfch2", req);
2164 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2165 break;
2166 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
2167 zfcp_fsf_class_not_supp(req);
2168 break;
2169 case FSF_ACCESS_DENIED:
2170 zfcp_fsf_access_denied_unit(req, unit);
2171 break;
2172 case FSF_DIRECTION_INDICATOR_NOT_VALID:
2173 dev_err(&req->adapter->ccw_device->dev,
2174 "Incorrect direction %d, unit 0x%016Lx on port "
2175 "0x%016Lx closed\n",
2176 req->qtcb->bottom.io.data_direction,
2177 (unsigned long long)unit->fcp_lun,
2178 (unsigned long long)unit->port->wwpn);
2179 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch3",
2180 req);
2181 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2182 break;
2183 case FSF_CMND_LENGTH_NOT_VALID:
2184 dev_err(&req->adapter->ccw_device->dev,
2185 "Incorrect CDB length %d, unit 0x%016Lx on "
2186 "port 0x%016Lx closed\n",
2187 req->qtcb->bottom.io.fcp_cmnd_length,
2188 (unsigned long long)unit->fcp_lun,
2189 (unsigned long long)unit->port->wwpn);
2190 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch4",
2191 req);
2192 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2193 break;
2194 case FSF_PORT_BOXED:
2195 zfcp_erp_port_boxed(unit->port, "fssfch5", req);
2196 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2197 break;
2198 case FSF_LUN_BOXED:
2199 zfcp_erp_unit_boxed(unit, "fssfch6", req);
2200 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2201 break;
2202 case FSF_ADAPTER_STATUS_AVAILABLE:
2203 if (header->fsf_status_qual.word[0] ==
2204 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2205 zfcp_fc_test_link(unit->port);
2206 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2207 break;
2208 }
2209skip_fsfstatus:
2210 if (req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
2211 zfcp_fsf_send_fcp_ctm_handler(req);
2212 else {
2213 zfcp_fsf_send_fcp_command_task_handler(req);
2214 req->unit = NULL;
2215 put_device(&unit->dev);
2216 }
2217}
2218
2219static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir) 2123static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
2220{ 2124{
2221 switch (scsi_get_prot_op(scsi_cmnd)) { 2125 switch (scsi_get_prot_op(scsi_cmnd)) {
@@ -2255,22 +2159,22 @@ static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
2255} 2159}
2256 2160
2257/** 2161/**
2258 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command) 2162 * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command)
2259 * @unit: unit where command is sent to
2260 * @scsi_cmnd: scsi command to be sent 2163 * @scsi_cmnd: scsi command to be sent
2261 */ 2164 */
2262int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, 2165int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2263 struct scsi_cmnd *scsi_cmnd)
2264{ 2166{
2265 struct zfcp_fsf_req *req; 2167 struct zfcp_fsf_req *req;
2266 struct fcp_cmnd *fcp_cmnd; 2168 struct fcp_cmnd *fcp_cmnd;
2267 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; 2169 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
2268 int real_bytes, retval = -EIO, dix_bytes = 0; 2170 int real_bytes, retval = -EIO, dix_bytes = 0;
2269 struct zfcp_adapter *adapter = unit->port->adapter; 2171 struct scsi_device *sdev = scsi_cmnd->device;
2172 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2173 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
2270 struct zfcp_qdio *qdio = adapter->qdio; 2174 struct zfcp_qdio *qdio = adapter->qdio;
2271 struct fsf_qtcb_bottom_io *io; 2175 struct fsf_qtcb_bottom_io *io;
2272 2176
2273 if (unlikely(!(atomic_read(&unit->status) & 2177 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2274 ZFCP_STATUS_COMMON_UNBLOCKED))) 2178 ZFCP_STATUS_COMMON_UNBLOCKED)))
2275 return -EBUSY; 2179 return -EBUSY;
2276 2180
@@ -2295,11 +2199,10 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2295 2199
2296 io = &req->qtcb->bottom.io; 2200 io = &req->qtcb->bottom.io;
2297 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2201 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2298 req->unit = unit;
2299 req->data = scsi_cmnd; 2202 req->data = scsi_cmnd;
2300 req->handler = zfcp_fsf_send_fcp_command_handler; 2203 req->handler = zfcp_fsf_fcp_cmnd_handler;
2301 req->qtcb->header.lun_handle = unit->handle; 2204 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2302 req->qtcb->header.port_handle = unit->port->handle; 2205 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2303 io->service_class = FSF_CLASS_3; 2206 io->service_class = FSF_CLASS_3;
2304 io->fcp_cmnd_length = FCP_CMND_LEN; 2207 io->fcp_cmnd_length = FCP_CMND_LEN;
2305 2208
@@ -2310,8 +2213,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2310 2213
2311 zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction); 2214 zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction);
2312 2215
2313 get_device(&unit->dev);
2314
2315 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; 2216 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2316 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd); 2217 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
2317 2218
@@ -2338,7 +2239,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2338 goto out; 2239 goto out;
2339 2240
2340failed_scsi_cmnd: 2241failed_scsi_cmnd:
2341 put_device(&unit->dev);
2342 zfcp_fsf_req_free(req); 2242 zfcp_fsf_req_free(req);
2343 scsi_cmnd->host_scribble = NULL; 2243 scsi_cmnd->host_scribble = NULL;
2344out: 2244out:
@@ -2346,23 +2246,40 @@ out:
2346 return retval; 2246 return retval;
2347} 2247}
2348 2248
2249static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
2250{
2251 struct fcp_resp_with_ext *fcp_rsp;
2252 struct fcp_resp_rsp_info *rsp_info;
2253
2254 zfcp_fsf_fcp_handler_common(req);
2255
2256 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2257 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
2258
2259 if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
2260 (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2261 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2262}
2263
2349/** 2264/**
2350 * zfcp_fsf_send_fcp_ctm - send SCSI task management command 2265 * zfcp_fsf_fcp_task_mgmt - send SCSI task management command
2351 * @unit: pointer to struct zfcp_unit 2266 * @scmnd: SCSI command to send the task management command for
2352 * @tm_flags: unsigned byte for task management flags 2267 * @tm_flags: unsigned byte for task management flags
2353 * Returns: on success pointer to struct fsf_req, NULL otherwise 2268 * Returns: on success pointer to struct fsf_req, NULL otherwise
2354 */ 2269 */
2355struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) 2270struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
2271 u8 tm_flags)
2356{ 2272{
2357 struct zfcp_fsf_req *req = NULL; 2273 struct zfcp_fsf_req *req = NULL;
2358 struct fcp_cmnd *fcp_cmnd; 2274 struct fcp_cmnd *fcp_cmnd;
2359 struct zfcp_qdio *qdio = unit->port->adapter->qdio; 2275 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device);
2276 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
2360 2277
2361 if (unlikely(!(atomic_read(&unit->status) & 2278 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2362 ZFCP_STATUS_COMMON_UNBLOCKED))) 2279 ZFCP_STATUS_COMMON_UNBLOCKED)))
2363 return NULL; 2280 return NULL;
2364 2281
2365 spin_lock_bh(&qdio->req_q_lock); 2282 spin_lock_irq(&qdio->req_q_lock);
2366 if (zfcp_qdio_sbal_get(qdio)) 2283 if (zfcp_qdio_sbal_get(qdio))
2367 goto out; 2284 goto out;
2368 2285
@@ -2376,10 +2293,10 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2376 } 2293 }
2377 2294
2378 req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT; 2295 req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
2379 req->data = unit; 2296 req->data = scmnd;
2380 req->handler = zfcp_fsf_send_fcp_command_handler; 2297 req->handler = zfcp_fsf_fcp_task_mgmt_handler;
2381 req->qtcb->header.lun_handle = unit->handle; 2298 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2382 req->qtcb->header.port_handle = unit->port->handle; 2299 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2383 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; 2300 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2384 req->qtcb->bottom.io.service_class = FSF_CLASS_3; 2301 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2385 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN; 2302 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
@@ -2387,7 +2304,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2387 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 2304 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2388 2305
2389 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; 2306 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2390 zfcp_fc_fcp_tm(fcp_cmnd, unit->device, tm_flags); 2307 zfcp_fc_fcp_tm(fcp_cmnd, scmnd->device, tm_flags);
2391 2308
2392 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); 2309 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
2393 if (!zfcp_fsf_req_send(req)) 2310 if (!zfcp_fsf_req_send(req))
@@ -2396,7 +2313,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2396 zfcp_fsf_req_free(req); 2313 zfcp_fsf_req_free(req);
2397 req = NULL; 2314 req = NULL;
2398out: 2315out:
2399 spin_unlock_bh(&qdio->req_q_lock); 2316 spin_unlock_irq(&qdio->req_q_lock);
2400 return req; 2317 return req;
2401} 2318}
2402 2319
@@ -2432,7 +2349,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2432 return ERR_PTR(-EINVAL); 2349 return ERR_PTR(-EINVAL);
2433 } 2350 }
2434 2351
2435 spin_lock_bh(&qdio->req_q_lock); 2352 spin_lock_irq(&qdio->req_q_lock);
2436 if (zfcp_qdio_sbal_get(qdio)) 2353 if (zfcp_qdio_sbal_get(qdio))
2437 goto out; 2354 goto out;
2438 2355
@@ -2459,7 +2376,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2459 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 2376 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2460 retval = zfcp_fsf_req_send(req); 2377 retval = zfcp_fsf_req_send(req);
2461out: 2378out:
2462 spin_unlock_bh(&qdio->req_q_lock); 2379 spin_unlock_irq(&qdio->req_q_lock);
2463 2380
2464 if (!retval) { 2381 if (!retval) {
2465 wait_for_completion(&req->completion); 2382 wait_for_completion(&req->completion);
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index b2635759721c..60e6e5714eb9 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -60,13 +60,11 @@ static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
60 unsigned long long now, span; 60 unsigned long long now, span;
61 int used; 61 int used;
62 62
63 spin_lock(&qdio->stat_lock);
64 now = get_clock_monotonic(); 63 now = get_clock_monotonic();
65 span = (now - qdio->req_q_time) >> 12; 64 span = (now - qdio->req_q_time) >> 12;
66 used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free); 65 used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
67 qdio->req_q_util += used * span; 66 qdio->req_q_util += used * span;
68 qdio->req_q_time = now; 67 qdio->req_q_time = now;
69 spin_unlock(&qdio->stat_lock);
70} 68}
71 69
72static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, 70static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
@@ -84,7 +82,9 @@ static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
84 /* cleanup all SBALs being program-owned now */ 82 /* cleanup all SBALs being program-owned now */
85 zfcp_qdio_zero_sbals(qdio->req_q, idx, count); 83 zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
86 84
85 spin_lock_irq(&qdio->stat_lock);
87 zfcp_qdio_account(qdio); 86 zfcp_qdio_account(qdio);
87 spin_unlock_irq(&qdio->stat_lock);
88 atomic_add(count, &qdio->req_q_free); 88 atomic_add(count, &qdio->req_q_free);
89 wake_up(&qdio->req_q_wq); 89 wake_up(&qdio->req_q_wq);
90} 90}
@@ -201,11 +201,11 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
201 201
202static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) 202static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
203{ 203{
204 spin_lock_bh(&qdio->req_q_lock); 204 spin_lock_irq(&qdio->req_q_lock);
205 if (atomic_read(&qdio->req_q_free) || 205 if (atomic_read(&qdio->req_q_free) ||
206 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 206 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
207 return 1; 207 return 1;
208 spin_unlock_bh(&qdio->req_q_lock); 208 spin_unlock_irq(&qdio->req_q_lock);
209 return 0; 209 return 0;
210} 210}
211 211
@@ -223,7 +223,7 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
223{ 223{
224 long ret; 224 long ret;
225 225
226 spin_unlock_bh(&qdio->req_q_lock); 226 spin_unlock_irq(&qdio->req_q_lock);
227 ret = wait_event_interruptible_timeout(qdio->req_q_wq, 227 ret = wait_event_interruptible_timeout(qdio->req_q_wq,
228 zfcp_qdio_sbal_check(qdio), 5 * HZ); 228 zfcp_qdio_sbal_check(qdio), 5 * HZ);
229 229
@@ -239,7 +239,7 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
239 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1", NULL); 239 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1", NULL);
240 } 240 }
241 241
242 spin_lock_bh(&qdio->req_q_lock); 242 spin_lock_irq(&qdio->req_q_lock);
243 return -EIO; 243 return -EIO;
244} 244}
245 245
@@ -254,7 +254,9 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
254 int retval; 254 int retval;
255 u8 sbal_number = q_req->sbal_number; 255 u8 sbal_number = q_req->sbal_number;
256 256
257 spin_lock(&qdio->stat_lock);
257 zfcp_qdio_account(qdio); 258 zfcp_qdio_account(qdio);
259 spin_unlock(&qdio->stat_lock);
258 260
259 retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, 261 retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
260 q_req->sbal_first, sbal_number); 262 q_req->sbal_first, sbal_number);
@@ -328,9 +330,9 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio)
328 return; 330 return;
329 331
330 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ 332 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
331 spin_lock_bh(&qdio->req_q_lock); 333 spin_lock_irq(&qdio->req_q_lock);
332 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); 334 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
333 spin_unlock_bh(&qdio->req_q_lock); 335 spin_unlock_irq(&qdio->req_q_lock);
334 336
335 wake_up(&qdio->req_q_wq); 337 wake_up(&qdio->req_q_wq);
336 338
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 208256e39def..50286d8707f3 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -49,11 +49,12 @@ static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
49 return sdev->queue_depth; 49 return sdev->queue_depth;
50} 50}
51 51
52static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) 52static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
53{ 53{
54 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; 54 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
55 unit->device = NULL; 55
56 put_device(&unit->dev); 56 zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
57 put_device(&zfcp_sdev->port->dev);
57} 58}
58 59
59static int zfcp_scsi_slave_configure(struct scsi_device *sdp) 60static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
@@ -78,23 +79,16 @@ static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
78static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, 79static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
79 void (*done) (struct scsi_cmnd *)) 80 void (*done) (struct scsi_cmnd *))
80{ 81{
81 struct zfcp_unit *unit; 82 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
82 struct zfcp_adapter *adapter; 83 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
83 int status, scsi_result, ret;
84 struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device)); 84 struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device));
85 int status, scsi_result, ret;
85 86
86 /* reset the status for this request */ 87 /* reset the status for this request */
87 scpnt->result = 0; 88 scpnt->result = 0;
88 scpnt->host_scribble = NULL; 89 scpnt->host_scribble = NULL;
89 scpnt->scsi_done = done; 90 scpnt->scsi_done = done;
90 91
91 /*
92 * figure out adapter and target device
93 * (stored there by zfcp_scsi_slave_alloc)
94 */
95 adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
96 unit = scpnt->device->hostdata;
97
98 scsi_result = fc_remote_port_chkready(rport); 92 scsi_result = fc_remote_port_chkready(rport);
99 if (unlikely(scsi_result)) { 93 if (unlikely(scsi_result)) {
100 scpnt->result = scsi_result; 94 scpnt->result = scsi_result;
@@ -103,11 +97,11 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
103 return 0; 97 return 0;
104 } 98 }
105 99
106 status = atomic_read(&unit->status); 100 status = atomic_read(&zfcp_sdev->status);
107 if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) && 101 if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) &&
108 !(atomic_read(&unit->port->status) & 102 !(atomic_read(&zfcp_sdev->port->status) &
109 ZFCP_STATUS_COMMON_ERP_FAILED)) { 103 ZFCP_STATUS_COMMON_ERP_FAILED)) {
110 /* only unit access denied, but port is good 104 /* only LUN access denied, but port is good
111 * not covered by FC transport, have to fail here */ 105 * not covered by FC transport, have to fail here */
112 zfcp_scsi_command_fail(scpnt, DID_ERROR); 106 zfcp_scsi_command_fail(scpnt, DID_ERROR);
113 return 0; 107 return 0;
@@ -115,8 +109,8 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
115 109
116 if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) { 110 if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) {
117 /* This could be either 111 /* This could be either
118 * open unit pending: this is temporary, will result in 112 * open LUN pending: this is temporary, will result in
119 * open unit or ERP_FAILED, so retry command 113 * open LUN or ERP_FAILED, so retry command
120 * call to rport_delete pending: mimic retry from 114 * call to rport_delete pending: mimic retry from
121 * fc_remote_port_chkready until rport is BLOCKED 115 * fc_remote_port_chkready until rport is BLOCKED
122 */ 116 */
@@ -124,7 +118,7 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
124 return 0; 118 return 0;
125 } 119 }
126 120
127 ret = zfcp_fsf_send_fcp_command_task(unit, scpnt); 121 ret = zfcp_fsf_fcp_cmnd(scpnt);
128 if (unlikely(ret == -EBUSY)) 122 if (unlikely(ret == -EBUSY))
129 return SCSI_MLQUEUE_DEVICE_BUSY; 123 return SCSI_MLQUEUE_DEVICE_BUSY;
130 else if (unlikely(ret < 0)) 124 else if (unlikely(ret < 0))
@@ -133,45 +127,42 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
133 return ret; 127 return ret;
134} 128}
135 129
136static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter, 130static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
137 unsigned int id, u64 lun)
138{ 131{
139 unsigned long flags; 132 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
133 struct zfcp_adapter *adapter =
134 (struct zfcp_adapter *) sdev->host->hostdata[0];
135 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
140 struct zfcp_port *port; 136 struct zfcp_port *port;
141 struct zfcp_unit *unit = NULL; 137 struct zfcp_unit *unit;
142 138
143 read_lock_irqsave(&adapter->port_list_lock, flags); 139 port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
144 list_for_each_entry(port, &adapter->port_list, list) { 140 if (!port)
145 if (!port->rport || (id != port->rport->scsi_target_id)) 141 return -ENXIO;
146 continue;
147 unit = zfcp_get_unit_by_lun(port, lun);
148 if (unit)
149 break;
150 }
151 read_unlock_irqrestore(&adapter->port_list_lock, flags);
152 142
153 return unit; 143 unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
154} 144 if (unit)
145 put_device(&unit->dev);
155 146
156static int zfcp_scsi_slave_alloc(struct scsi_device *sdp) 147 if (!unit && !(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
157{ 148 put_device(&port->dev);
158 struct zfcp_adapter *adapter; 149 return -ENXIO;
159 struct zfcp_unit *unit; 150 }
160 u64 lun;
161 151
162 adapter = (struct zfcp_adapter *) sdp->host->hostdata[0]; 152 zfcp_sdev->port = port;
163 if (!adapter) 153 zfcp_sdev->latencies.write.channel.min = 0xFFFFFFFF;
164 goto out; 154 zfcp_sdev->latencies.write.fabric.min = 0xFFFFFFFF;
155 zfcp_sdev->latencies.read.channel.min = 0xFFFFFFFF;
156 zfcp_sdev->latencies.read.fabric.min = 0xFFFFFFFF;
157 zfcp_sdev->latencies.cmd.channel.min = 0xFFFFFFFF;
158 zfcp_sdev->latencies.cmd.fabric.min = 0xFFFFFFFF;
159 spin_lock_init(&zfcp_sdev->latencies.lock);
165 160
166 int_to_scsilun(sdp->lun, (struct scsi_lun *)&lun); 161 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
167 unit = zfcp_unit_lookup(adapter, sdp->id, lun); 162 zfcp_erp_lun_reopen(sdev, 0, "scsla_1", NULL);
168 if (unit) { 163 zfcp_erp_wait(port->adapter);
169 sdp->hostdata = unit; 164
170 unit->device = sdp; 165 return 0;
171 return 0;
172 }
173out:
174 return -ENXIO;
175} 166}
176 167
177static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) 168static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
@@ -179,7 +170,6 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
179 struct Scsi_Host *scsi_host = scpnt->device->host; 170 struct Scsi_Host *scsi_host = scpnt->device->host;
180 struct zfcp_adapter *adapter = 171 struct zfcp_adapter *adapter =
181 (struct zfcp_adapter *) scsi_host->hostdata[0]; 172 (struct zfcp_adapter *) scsi_host->hostdata[0];
182 struct zfcp_unit *unit = scpnt->device->hostdata;
183 struct zfcp_fsf_req *old_req, *abrt_req; 173 struct zfcp_fsf_req *old_req, *abrt_req;
184 unsigned long flags; 174 unsigned long flags;
185 unsigned long old_reqid = (unsigned long) scpnt->host_scribble; 175 unsigned long old_reqid = (unsigned long) scpnt->host_scribble;
@@ -203,7 +193,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
203 write_unlock_irqrestore(&adapter->abort_lock, flags); 193 write_unlock_irqrestore(&adapter->abort_lock, flags);
204 194
205 while (retry--) { 195 while (retry--) {
206 abrt_req = zfcp_fsf_abort_fcp_command(old_reqid, unit); 196 abrt_req = zfcp_fsf_abort_fcp_cmnd(scpnt);
207 if (abrt_req) 197 if (abrt_req)
208 break; 198 break;
209 199
@@ -238,14 +228,14 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
238 228
239static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) 229static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
240{ 230{
241 struct zfcp_unit *unit = scpnt->device->hostdata; 231 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
242 struct zfcp_adapter *adapter = unit->port->adapter; 232 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
243 struct zfcp_fsf_req *fsf_req = NULL; 233 struct zfcp_fsf_req *fsf_req = NULL;
244 int retval = SUCCESS, ret; 234 int retval = SUCCESS, ret;
245 int retry = 3; 235 int retry = 3;
246 236
247 while (retry--) { 237 while (retry--) {
248 fsf_req = zfcp_fsf_send_fcp_ctm(unit, tm_flags); 238 fsf_req = zfcp_fsf_fcp_task_mgmt(scpnt, tm_flags);
249 if (fsf_req) 239 if (fsf_req)
250 break; 240 break;
251 241
@@ -256,7 +246,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
256 246
257 if (!(atomic_read(&adapter->status) & 247 if (!(atomic_read(&adapter->status) &
258 ZFCP_STATUS_COMMON_RUNNING)) { 248 ZFCP_STATUS_COMMON_RUNNING)) {
259 zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt); 249 zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags);
260 return SUCCESS; 250 return SUCCESS;
261 } 251 }
262 } 252 }
@@ -266,10 +256,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
266 wait_for_completion(&fsf_req->completion); 256 wait_for_completion(&fsf_req->completion);
267 257
268 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { 258 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
269 zfcp_dbf_scsi_devreset("fail", tm_flags, unit, scpnt); 259 zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
270 retval = FAILED; 260 retval = FAILED;
271 } else 261 } else
272 zfcp_dbf_scsi_devreset("okay", tm_flags, unit, scpnt); 262 zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
273 263
274 zfcp_fsf_req_free(fsf_req); 264 zfcp_fsf_req_free(fsf_req);
275 return retval; 265 return retval;
@@ -287,8 +277,8 @@ static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
287 277
288static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) 278static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
289{ 279{
290 struct zfcp_unit *unit = scpnt->device->hostdata; 280 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
291 struct zfcp_adapter *adapter = unit->port->adapter; 281 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
292 int ret; 282 int ret;
293 283
294 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt); 284 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt);
@@ -319,8 +309,8 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
319 } 309 }
320 310
321 /* tell the SCSI stack some characteristics of this adapter */ 311 /* tell the SCSI stack some characteristics of this adapter */
322 adapter->scsi_host->max_id = 1; 312 adapter->scsi_host->max_id = 511;
323 adapter->scsi_host->max_lun = 1; 313 adapter->scsi_host->max_lun = 0xFFFFFFFF;
324 adapter->scsi_host->max_channel = 0; 314 adapter->scsi_host->max_channel = 0;
325 adapter->scsi_host->unique_id = dev_id.devno; 315 adapter->scsi_host->unique_id = dev_id.devno;
326 adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */ 316 adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
@@ -534,20 +524,6 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
534 } 524 }
535} 525}
536 526
537static void zfcp_scsi_queue_unit_register(struct zfcp_port *port)
538{
539 struct zfcp_unit *unit;
540
541 read_lock_irq(&port->unit_list_lock);
542 list_for_each_entry(unit, &port->unit_list, list) {
543 get_device(&unit->dev);
544 if (scsi_queue_work(port->adapter->scsi_host,
545 &unit->scsi_work) <= 0)
546 put_device(&unit->dev);
547 }
548 read_unlock_irq(&port->unit_list_lock);
549}
550
551static void zfcp_scsi_rport_register(struct zfcp_port *port) 527static void zfcp_scsi_rport_register(struct zfcp_port *port)
552{ 528{
553 struct fc_rport_identifiers ids; 529 struct fc_rport_identifiers ids;
@@ -574,7 +550,7 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
574 port->rport = rport; 550 port->rport = rport;
575 port->starget_id = rport->scsi_target_id; 551 port->starget_id = rport->scsi_target_id;
576 552
577 zfcp_scsi_queue_unit_register(port); 553 zfcp_unit_queue_scsi_scan(port);
578} 554}
579 555
580static void zfcp_scsi_rport_block(struct zfcp_port *port) 556static void zfcp_scsi_rport_block(struct zfcp_port *port)
@@ -638,29 +614,6 @@ void zfcp_scsi_rport_work(struct work_struct *work)
638} 614}
639 615
640/** 616/**
641 * zfcp_scsi_scan - Register LUN with SCSI midlayer
642 * @unit: The LUN/unit to register
643 */
644void zfcp_scsi_scan(struct zfcp_unit *unit)
645{
646 struct fc_rport *rport = unit->port->rport;
647
648 if (rport && rport->port_state == FC_PORTSTATE_ONLINE)
649 scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
650 scsilun_to_int((struct scsi_lun *)
651 &unit->fcp_lun), 0);
652}
653
654void zfcp_scsi_scan_work(struct work_struct *work)
655{
656 struct zfcp_unit *unit = container_of(work, struct zfcp_unit,
657 scsi_work);
658
659 zfcp_scsi_scan(unit);
660 put_device(&unit->dev);
661}
662
663/**
664 * zfcp_scsi_set_prot - Configure DIF/DIX support in scsi_host 617 * zfcp_scsi_set_prot - Configure DIF/DIX support in scsi_host
665 * @adapter: The adapter where to configure DIF/DIX for the SCSI host 618 * @adapter: The adapter where to configure DIF/DIX for the SCSI host
666 */ 619 */
@@ -735,7 +688,6 @@ struct fc_function_template zfcp_transport_functions = {
735 .show_host_port_type = 1, 688 .show_host_port_type = 1,
736 .show_host_speed = 1, 689 .show_host_speed = 1,
737 .show_host_port_id = 1, 690 .show_host_port_id = 1,
738 .disable_target_scan = 1,
739 .dd_bsg_size = sizeof(struct zfcp_fsf_ct_els), 691 .dd_bsg_size = sizeof(struct zfcp_fsf_ct_els),
740}; 692};
741 693
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index b4561c86e230..2f2c54f4718f 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -68,63 +68,96 @@ ZFCP_DEFINE_ATTR(zfcp_port, port, access_denied, "%d\n",
68 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); 68 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
69 69
70ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n", 70ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n",
71 atomic_read(&unit->status)); 71 zfcp_unit_sdev_status(unit));
72ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n", 72ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n",
73 (atomic_read(&unit->status) & 73 (zfcp_unit_sdev_status(unit) &
74 ZFCP_STATUS_COMMON_ERP_INUSE) != 0); 74 ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
75ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n", 75ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
76 (atomic_read(&unit->status) & 76 (zfcp_unit_sdev_status(unit) &
77 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); 77 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
78ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_shared, "%d\n", 78ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_shared, "%d\n",
79 (atomic_read(&unit->status) & 79 (zfcp_unit_sdev_status(unit) &
80 ZFCP_STATUS_UNIT_SHARED) != 0); 80 ZFCP_STATUS_LUN_SHARED) != 0);
81ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_readonly, "%d\n", 81ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_readonly, "%d\n",
82 (atomic_read(&unit->status) & 82 (zfcp_unit_sdev_status(unit) &
83 ZFCP_STATUS_UNIT_READONLY) != 0); 83 ZFCP_STATUS_LUN_READONLY) != 0);
84 84
85#define ZFCP_SYSFS_FAILED(_feat_def, _feat, _adapter, _mod_id, _reopen_id) \ 85static ssize_t zfcp_sysfs_port_failed_show(struct device *dev,
86static ssize_t zfcp_sysfs_##_feat##_failed_show(struct device *dev, \ 86 struct device_attribute *attr,
87 struct device_attribute *attr, \ 87 char *buf)
88 char *buf) \ 88{
89{ \ 89 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
90 struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \ 90
91 \ 91 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
92 if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED) \ 92 return sprintf(buf, "1\n");
93 return sprintf(buf, "1\n"); \ 93
94 else \ 94 return sprintf(buf, "0\n");
95 return sprintf(buf, "0\n"); \ 95}
96} \ 96
97static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \ 97static ssize_t zfcp_sysfs_port_failed_store(struct device *dev,
98 struct device_attribute *attr,\ 98 struct device_attribute *attr,
99 const char *buf, size_t count)\ 99 const char *buf, size_t count)
100{ \ 100{
101 struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \ 101 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
102 unsigned long val; \ 102 unsigned long val;
103 int retval = 0; \ 103
104 \ 104 if (strict_strtoul(buf, 0, &val) || val != 0)
105 if (!(_feat && get_device(&_feat->dev))) \ 105 return -EINVAL;
106 return -EBUSY; \ 106
107 \ 107 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_RUNNING);
108 if (strict_strtoul(buf, 0, &val) || val != 0) { \ 108 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2",
109 retval = -EINVAL; \ 109 NULL);
110 goto out; \ 110 zfcp_erp_wait(port->adapter);
111 } \
112 \
113 zfcp_erp_modify_##_feat##_status(_feat, _mod_id, NULL, \
114 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);\
115 zfcp_erp_##_feat##_reopen(_feat, ZFCP_STATUS_COMMON_ERP_FAILED, \
116 _reopen_id, NULL); \
117 zfcp_erp_wait(_adapter); \
118out: \
119 put_device(&_feat->dev); \
120 return retval ? retval : (ssize_t) count; \
121} \
122static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \
123 zfcp_sysfs_##_feat##_failed_show, \
124 zfcp_sysfs_##_feat##_failed_store);
125 111
126ZFCP_SYSFS_FAILED(zfcp_port, port, port->adapter, "sypfai1", "sypfai2"); 112 return count;
127ZFCP_SYSFS_FAILED(zfcp_unit, unit, unit->port->adapter, "syufai1", "syufai2"); 113}
114static ZFCP_DEV_ATTR(port, failed, S_IWUSR | S_IRUGO,
115 zfcp_sysfs_port_failed_show,
116 zfcp_sysfs_port_failed_store);
117
118static ssize_t zfcp_sysfs_unit_failed_show(struct device *dev,
119 struct device_attribute *attr,
120 char *buf)
121{
122 struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
123 struct scsi_device *sdev;
124 unsigned int status, failed = 1;
125
126 sdev = zfcp_unit_sdev(unit);
127 if (sdev) {
128 status = atomic_read(&sdev_to_zfcp(sdev)->status);
129 failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0;
130 scsi_device_put(sdev);
131 }
132
133 return sprintf(buf, "%d\n", failed);
134}
135
136static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev,
137 struct device_attribute *attr,
138 const char *buf, size_t count)
139{
140 struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
141 unsigned long val;
142 struct scsi_device *sdev;
143
144 if (strict_strtoul(buf, 0, &val) || val != 0)
145 return -EINVAL;
146
147 sdev = zfcp_unit_sdev(unit);
148 if (sdev) {
149 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
150 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
151 "syufai2", NULL);
152 zfcp_erp_wait(unit->port->adapter);
153 } else
154 zfcp_unit_scsi_scan(unit);
155
156 return count;
157}
158static ZFCP_DEV_ATTR(unit, failed, S_IWUSR | S_IRUGO,
159 zfcp_sysfs_unit_failed_show,
160 zfcp_sysfs_unit_failed_store);
128 161
129static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev, 162static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev,
130 struct device_attribute *attr, 163 struct device_attribute *attr,
@@ -163,8 +196,7 @@ static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev,
163 goto out; 196 goto out;
164 } 197 }
165 198
166 zfcp_erp_modify_adapter_status(adapter, "syafai1", NULL, 199 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
167 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
168 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 200 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
169 "syafai2", NULL); 201 "syafai2", NULL);
170 zfcp_erp_wait(adapter); 202 zfcp_erp_wait(adapter);
@@ -257,28 +289,15 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
257 const char *buf, size_t count) 289 const char *buf, size_t count)
258{ 290{
259 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); 291 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
260 struct zfcp_unit *unit;
261 u64 fcp_lun; 292 u64 fcp_lun;
262 int retval = -EINVAL;
263
264 if (!(port && get_device(&port->dev)))
265 return -EBUSY;
266 293
267 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) 294 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
268 goto out; 295 return -EINVAL;
269 296
270 unit = zfcp_unit_enqueue(port, fcp_lun); 297 if (zfcp_unit_add(port, fcp_lun))
271 if (IS_ERR(unit)) 298 return -EINVAL;
272 goto out;
273 else
274 retval = 0;
275 299
276 zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL); 300 return count;
277 zfcp_erp_wait(unit->port->adapter);
278 zfcp_scsi_scan(unit);
279out:
280 put_device(&port->dev);
281 return retval ? retval : (ssize_t) count;
282} 301}
283static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store); 302static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
284 303
@@ -287,42 +306,15 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
287 const char *buf, size_t count) 306 const char *buf, size_t count)
288{ 307{
289 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); 308 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
290 struct zfcp_unit *unit;
291 u64 fcp_lun; 309 u64 fcp_lun;
292 int retval = -EINVAL;
293 struct scsi_device *sdev;
294
295 if (!(port && get_device(&port->dev)))
296 return -EBUSY;
297 310
298 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) 311 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
299 goto out; 312 return -EINVAL;
300 313
301 unit = zfcp_get_unit_by_lun(port, fcp_lun); 314 if (zfcp_unit_remove(port, fcp_lun))
302 if (!unit) 315 return -EINVAL;
303 goto out;
304 else
305 retval = 0;
306
307 sdev = scsi_device_lookup(port->adapter->scsi_host, 0,
308 port->starget_id,
309 scsilun_to_int((struct scsi_lun *)&fcp_lun));
310 if (sdev) {
311 scsi_remove_device(sdev);
312 scsi_device_put(sdev);
313 }
314
315 write_lock_irq(&port->unit_list_lock);
316 list_del(&unit->list);
317 write_unlock_irq(&port->unit_list_lock);
318
319 put_device(&unit->dev);
320 316
321 zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL); 317 return count;
322 zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
323out:
324 put_device(&port->dev);
325 return retval ? retval : (ssize_t) count;
326} 318}
327static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store); 319static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
328 320
@@ -363,9 +355,9 @@ zfcp_sysfs_unit_##_name##_latency_show(struct device *dev, \
363 struct device_attribute *attr, \ 355 struct device_attribute *attr, \
364 char *buf) { \ 356 char *buf) { \
365 struct scsi_device *sdev = to_scsi_device(dev); \ 357 struct scsi_device *sdev = to_scsi_device(dev); \
366 struct zfcp_unit *unit = sdev->hostdata; \ 358 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
367 struct zfcp_latencies *lat = &unit->latencies; \ 359 struct zfcp_latencies *lat = &zfcp_sdev->latencies; \
368 struct zfcp_adapter *adapter = unit->port->adapter; \ 360 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; \
369 unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc; \ 361 unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc; \
370 \ 362 \
371 spin_lock_bh(&lat->lock); \ 363 spin_lock_bh(&lat->lock); \
@@ -394,8 +386,8 @@ zfcp_sysfs_unit_##_name##_latency_store(struct device *dev, \
394 const char *buf, size_t count) \ 386 const char *buf, size_t count) \
395{ \ 387{ \
396 struct scsi_device *sdev = to_scsi_device(dev); \ 388 struct scsi_device *sdev = to_scsi_device(dev); \
397 struct zfcp_unit *unit = sdev->hostdata; \ 389 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
398 struct zfcp_latencies *lat = &unit->latencies; \ 390 struct zfcp_latencies *lat = &zfcp_sdev->latencies; \
399 unsigned long flags; \ 391 unsigned long flags; \
400 \ 392 \
401 spin_lock_irqsave(&lat->lock, flags); \ 393 spin_lock_irqsave(&lat->lock, flags); \
@@ -423,19 +415,28 @@ static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \
423 struct device_attribute *attr,\ 415 struct device_attribute *attr,\
424 char *buf) \ 416 char *buf) \
425{ \ 417{ \
426 struct scsi_device *sdev = to_scsi_device(dev); \ 418 struct scsi_device *sdev = to_scsi_device(dev); \
427 struct zfcp_unit *unit = sdev->hostdata; \ 419 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
420 struct zfcp_port *port = zfcp_sdev->port; \
428 \ 421 \
429 return sprintf(buf, _format, _value); \ 422 return sprintf(buf, _format, _value); \
430} \ 423} \
431static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL); 424static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
432 425
433ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n", 426ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n",
434 dev_name(&unit->port->adapter->ccw_device->dev)); 427 dev_name(&port->adapter->ccw_device->dev));
435ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", 428ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n",
436 (unsigned long long) unit->port->wwpn); 429 (unsigned long long) port->wwpn);
437ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n", 430
438 (unsigned long long) unit->fcp_lun); 431static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev,
432 struct device_attribute *attr,
433 char *buf)
434{
435 struct scsi_device *sdev = to_scsi_device(dev);
436
437 return sprintf(buf, "0x%016llx\n", zfcp_scsi_dev_lun(sdev));
438}
439static DEVICE_ATTR(fcp_lun, S_IRUGO, zfcp_sysfs_scsi_fcp_lun_show, NULL);
439 440
440struct device_attribute *zfcp_sysfs_sdev_attrs[] = { 441struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
441 &dev_attr_fcp_lun, 442 &dev_attr_fcp_lun,
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
new file mode 100644
index 000000000000..1119c535a667
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -0,0 +1,244 @@
1/*
2 * zfcp device driver
3 *
4 * Tracking of manually configured LUNs and helper functions to
5 * register the LUNs with the SCSI midlayer.
6 *
7 * Copyright IBM Corporation 2010
8 */
9
10#include "zfcp_def.h"
11#include "zfcp_ext.h"
12
13/**
14 * zfcp_unit_scsi_scan - Register LUN with SCSI midlayer
15 * @unit: The zfcp LUN/unit to register
16 *
17 * When the SCSI midlayer is not allowed to automatically scan and
18 * attach SCSI devices, zfcp has to register the single devices with
19 * the SCSI midlayer.
20 */
21void zfcp_unit_scsi_scan(struct zfcp_unit *unit)
22{
23 struct fc_rport *rport = unit->port->rport;
24 unsigned int lun;
25
26 lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun);
27
28 if (rport && rport->port_state == FC_PORTSTATE_ONLINE)
29 scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, lun, 1);
30}
31
32static void zfcp_unit_scsi_scan_work(struct work_struct *work)
33{
34 struct zfcp_unit *unit = container_of(work, struct zfcp_unit,
35 scsi_work);
36
37 zfcp_unit_scsi_scan(unit);
38 put_device(&unit->dev);
39}
40
41/**
42 * zfcp_unit_queue_scsi_scan - Register configured units on port
43 * @port: The zfcp_port where to register units
44 *
45 * After opening a port, all units configured on this port have to be
46 * registered with the SCSI midlayer. This function should be called
47 * after calling fc_remote_port_add, so that the fc_rport is already
48 * ONLINE and the call to scsi_scan_target runs the same way as the
49 * call in the FC transport class.
50 */
51void zfcp_unit_queue_scsi_scan(struct zfcp_port *port)
52{
53 struct zfcp_unit *unit;
54
55 read_lock_irq(&port->unit_list_lock);
56 list_for_each_entry(unit, &port->unit_list, list) {
57 get_device(&unit->dev);
58 if (scsi_queue_work(port->adapter->scsi_host,
59 &unit->scsi_work) <= 0)
60 put_device(&unit->dev);
61 }
62 read_unlock_irq(&port->unit_list_lock);
63}
64
65static struct zfcp_unit *_zfcp_unit_find(struct zfcp_port *port, u64 fcp_lun)
66{
67 struct zfcp_unit *unit;
68
69 list_for_each_entry(unit, &port->unit_list, list)
70 if (unit->fcp_lun == fcp_lun) {
71 get_device(&unit->dev);
72 return unit;
73 }
74
75 return NULL;
76}
77
78/**
79 * zfcp_unit_find - Find and return zfcp_unit with specified FCP LUN
80 * @port: zfcp_port where to look for the unit
81 * @fcp_lun: 64 Bit FCP LUN used to identify the zfcp_unit
82 *
83 * If zfcp_unit is found, a reference is acquired that has to be
84 * released later.
85 *
86 * Returns: Pointer to the zfcp_unit, or NULL if there is no zfcp_unit
87 * with the specified FCP LUN.
88 */
89struct zfcp_unit *zfcp_unit_find(struct zfcp_port *port, u64 fcp_lun)
90{
91 struct zfcp_unit *unit;
92
93 read_lock_irq(&port->unit_list_lock);
94 unit = _zfcp_unit_find(port, fcp_lun);
95 read_unlock_irq(&port->unit_list_lock);
96 return unit;
97}
98
99/**
100 * zfcp_unit_release - Drop reference to zfcp_port and free memory of zfcp_unit.
101 * @dev: pointer to device in zfcp_unit
102 */
103static void zfcp_unit_release(struct device *dev)
104{
105 struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
106
107 put_device(&unit->port->dev);
108 kfree(unit);
109}
110
111/**
112 * zfcp_unit_enqueue - enqueue unit to unit list of a port.
113 * @port: pointer to port where unit is added
114 * @fcp_lun: FCP LUN of unit to be enqueued
115 * Returns: 0 success
116 *
117 * Sets up some unit internal structures and creates sysfs entry.
118 */
119int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
120{
121 struct zfcp_unit *unit;
122
123 unit = zfcp_unit_find(port, fcp_lun);
124 if (unit) {
125 put_device(&unit->dev);
126 return -EEXIST;
127 }
128
129 unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
130 if (!unit)
131 return -ENOMEM;
132
133 unit->port = port;
134 unit->fcp_lun = fcp_lun;
135 unit->dev.parent = &port->dev;
136 unit->dev.release = zfcp_unit_release;
137 INIT_WORK(&unit->scsi_work, zfcp_unit_scsi_scan_work);
138
139 if (dev_set_name(&unit->dev, "0x%016llx",
140 (unsigned long long) fcp_lun)) {
141 kfree(unit);
142 return -ENOMEM;
143 }
144
145 if (device_register(&unit->dev)) {
146 put_device(&unit->dev);
147 return -ENOMEM;
148 }
149
150 if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) {
151 device_unregister(&unit->dev);
152 return -EINVAL;
153 }
154
155 get_device(&port->dev);
156
157 write_lock_irq(&port->unit_list_lock);
158 list_add_tail(&unit->list, &port->unit_list);
159 write_unlock_irq(&port->unit_list_lock);
160
161 zfcp_unit_scsi_scan(unit);
162
163 return 0;
164}
165
166/**
167 * zfcp_unit_sdev - Return SCSI device for zfcp_unit
168 * @unit: The zfcp_unit where to get the SCSI device for
169 *
170 * Returns: scsi_device pointer on success, NULL if there is no SCSI
171 * device for this zfcp_unit
172 *
173 * On success, the caller also holds a reference to the SCSI device
174 * that must be released with scsi_device_put.
175 */
176struct scsi_device *zfcp_unit_sdev(struct zfcp_unit *unit)
177{
178 struct Scsi_Host *shost;
179 struct zfcp_port *port;
180 unsigned int lun;
181
182 lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun);
183 port = unit->port;
184 shost = port->adapter->scsi_host;
185 return scsi_device_lookup(shost, 0, port->starget_id, lun);
186}
187
188/**
189 * zfcp_unit_sdev_status - Return zfcp LUN status for SCSI device
190 * @unit: The unit to lookup the SCSI device for
191 *
192 * Returns the zfcp LUN status field of the SCSI device if the SCSI device
193 * for the zfcp_unit exists, 0 otherwise.
194 */
195unsigned int zfcp_unit_sdev_status(struct zfcp_unit *unit)
196{
197 unsigned int status = 0;
198 struct scsi_device *sdev;
199 struct zfcp_scsi_dev *zfcp_sdev;
200
201 sdev = zfcp_unit_sdev(unit);
202 if (sdev) {
203 zfcp_sdev = sdev_to_zfcp(sdev);
204 status = atomic_read(&zfcp_sdev->status);
205 scsi_device_put(sdev);
206 }
207
208 return status;
209}
210
211/**
212 * zfcp_unit_remove - Remove entry from list of configured units
213 * @port: The port where to remove the unit from the configuration
214 * @fcp_lun: The 64 bit LUN of the unit to remove
215 *
216 * Returns: -EINVAL if a unit with the specified LUN does not exist,
217 * 0 on success.
218 */
219int zfcp_unit_remove(struct zfcp_port *port, u64 fcp_lun)
220{
221 struct zfcp_unit *unit;
222 struct scsi_device *sdev;
223
224 write_lock_irq(&port->unit_list_lock);
225 unit = _zfcp_unit_find(port, fcp_lun);
226 if (unit)
227 list_del(&unit->list);
228 write_unlock_irq(&port->unit_list_lock);
229
230 if (!unit)
231 return -EINVAL;
232
233 sdev = zfcp_unit_sdev(unit);
234 if (sdev) {
235 scsi_remove_device(sdev);
236 scsi_device_put(sdev);
237 }
238
239 put_device(&unit->dev);
240
241 zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
242
243 return 0;
244}
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index bbf91aec64f5..2e9632e2c98b 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -316,7 +316,8 @@ config SCSI_ISCSI_ATTRS
316 316
317config SCSI_SAS_ATTRS 317config SCSI_SAS_ATTRS
318 tristate "SAS Transport Attributes" 318 tristate "SAS Transport Attributes"
319 depends on SCSI && BLK_DEV_BSG 319 depends on SCSI
320 select BLK_DEV_BSG
320 help 321 help
321 If you wish to export transport-specific information about 322 If you wish to export transport-specific information about
322 each attached SAS device to sysfs, say Y. 323 each attached SAS device to sysfs, say Y.
@@ -378,7 +379,7 @@ config ISCSI_BOOT_SYSFS
378 via sysfs to userspace. If you wish to export this information, 379 via sysfs to userspace. If you wish to export this information,
379 say Y. Otherwise, say N. 380 say Y. Otherwise, say N.
380 381
381source "drivers/scsi/cxgb3i/Kconfig" 382source "drivers/scsi/cxgbi/Kconfig"
382source "drivers/scsi/bnx2i/Kconfig" 383source "drivers/scsi/bnx2i/Kconfig"
383source "drivers/scsi/be2iscsi/Kconfig" 384source "drivers/scsi/be2iscsi/Kconfig"
384 385
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 2703c6ec5e36..2e9a87e8e7d8 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -133,7 +133,8 @@ obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
133obj-$(CONFIG_SCSI_STEX) += stex.o 133obj-$(CONFIG_SCSI_STEX) += stex.o
134obj-$(CONFIG_SCSI_MVSAS) += mvsas/ 134obj-$(CONFIG_SCSI_MVSAS) += mvsas/
135obj-$(CONFIG_PS3_ROM) += ps3rom.o 135obj-$(CONFIG_PS3_ROM) += ps3rom.o
136obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/ 136obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
137obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
137obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/ 138obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
138obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/ 139obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
139obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o 140obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 1a5bf5724750..645ddd9d9b9e 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -190,7 +190,7 @@ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
190 /* 190 /*
191 * Initialize the mutex used to wait for the next AIF. 191 * Initialize the mutex used to wait for the next AIF.
192 */ 192 */
193 init_MUTEX_LOCKED(&fibctx->wait_sem); 193 sema_init(&fibctx->wait_sem, 0);
194 fibctx->wait = 0; 194 fibctx->wait = 0;
195 /* 195 /*
196 * Initialize the fibs and set the count of fibs on 196 * Initialize the fibs and set the count of fibs on
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 70079146e203..afc9aeba5edb 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -124,7 +124,7 @@ int aac_fib_setup(struct aac_dev * dev)
124 fibptr->hw_fib_va = hw_fib; 124 fibptr->hw_fib_va = hw_fib;
125 fibptr->data = (void *) fibptr->hw_fib_va->data; 125 fibptr->data = (void *) fibptr->hw_fib_va->data;
126 fibptr->next = fibptr+1; /* Forward chain the fibs */ 126 fibptr->next = fibptr+1; /* Forward chain the fibs */
127 init_MUTEX_LOCKED(&fibptr->event_wait); 127 sema_init(&fibptr->event_wait, 0);
128 spin_lock_init(&fibptr->event_lock); 128 spin_lock_init(&fibptr->event_lock);
129 hw_fib->header.XferState = cpu_to_le32(0xffffffff); 129 hw_fib->header.XferState = cpu_to_le32(0xffffffff);
130 hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size); 130 hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size);
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index c8dc392edd57..05a78e515a24 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -878,8 +878,8 @@ static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
878 if (!error) { 878 if (!error) {
879 if (acb->devstate[id][lun] == ARECA_RAID_GONE) 879 if (acb->devstate[id][lun] == ARECA_RAID_GONE)
880 acb->devstate[id][lun] = ARECA_RAID_GOOD; 880 acb->devstate[id][lun] = ARECA_RAID_GOOD;
881 ccb->pcmd->result = DID_OK << 16; 881 ccb->pcmd->result = DID_OK << 16;
882 arcmsr_ccb_complete(ccb); 882 arcmsr_ccb_complete(ccb);
883 }else{ 883 }else{
884 switch (ccb->arcmsr_cdb.DeviceStatus) { 884 switch (ccb->arcmsr_cdb.DeviceStatus) {
885 case ARCMSR_DEV_SELECT_TIMEOUT: { 885 case ARCMSR_DEV_SELECT_TIMEOUT: {
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 7c7537335c88..ad246369d373 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -335,7 +335,7 @@ static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
335 if (ready) 335 if (ready)
336 break; 336 break;
337 337
338 if (cnt > 6000000) { 338 if (cnt > 12000000) {
339 dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n"); 339 dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n");
340 return -EBUSY; 340 return -EBUSY;
341 } 341 }
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 7f11f3e48e12..eaaa8813067d 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -522,7 +522,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
522 if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start + 522 if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start +
523 phba->params.cxns_per_ctrl * 2)) { 523 phba->params.cxns_per_ctrl * 2)) {
524 SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n"); 524 SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n");
525 beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
526 goto free_ep; 525 goto free_ep;
527 } 526 }
528 527
@@ -559,7 +558,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
559 SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed" 558 SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed"
560 " status = %d extd_status = %d\n", 559 " status = %d extd_status = %d\n",
561 status, extd_status); 560 status, extd_status);
562 beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
563 free_mcc_tag(&phba->ctrl, tag); 561 free_mcc_tag(&phba->ctrl, tag);
564 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 562 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
565 nonemb_cmd.va, nonemb_cmd.dma); 563 nonemb_cmd.va, nonemb_cmd.dma);
@@ -574,7 +572,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
574 beiscsi_ep->cid_vld = 1; 572 beiscsi_ep->cid_vld = 1;
575 SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n"); 573 SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n");
576 } 574 }
577 beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
578 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 575 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
579 nonemb_cmd.va, nonemb_cmd.dma); 576 nonemb_cmd.va, nonemb_cmd.dma);
580 return 0; 577 return 0;
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 8220bde6c04c..75a85aa9e882 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -2040,7 +2040,7 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
2040 unsigned int num_sg, struct beiscsi_io_task *io_task) 2040 unsigned int num_sg, struct beiscsi_io_task *io_task)
2041{ 2041{
2042 struct iscsi_sge *psgl; 2042 struct iscsi_sge *psgl;
2043 unsigned short sg_len, index; 2043 unsigned int sg_len, index;
2044 unsigned int sge_len = 0; 2044 unsigned int sge_len = 0;
2045 unsigned long long addr; 2045 unsigned long long addr;
2046 struct scatterlist *l_sg; 2046 struct scatterlist *l_sg;
diff --git a/drivers/scsi/bfa/Makefile b/drivers/scsi/bfa/Makefile
index ac3fdf02d5f6..d2eefd3e3bd5 100644
--- a/drivers/scsi/bfa/Makefile
+++ b/drivers/scsi/bfa/Makefile
@@ -1,15 +1,8 @@
1obj-$(CONFIG_SCSI_BFA_FC) := bfa.o 1obj-$(CONFIG_SCSI_BFA_FC) := bfa.o
2 2
3bfa-y := bfad.o bfad_intr.o bfad_os.o bfad_im.o bfad_attr.o bfad_fwimg.o 3bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o
4bfa-y += bfad_debugfs.o 4bfa-y += bfa_ioc.o bfa_ioc_cb.o bfa_ioc_ct.o bfa_hw_cb.o bfa_hw_ct.o
5bfa-y += bfa_core.o bfa_ioc.o bfa_ioc_ct.o bfa_ioc_cb.o bfa_iocfc.o bfa_fcxp.o 5bfa-y += bfa_fcs.o bfa_fcs_lport.o bfa_fcs_rport.o bfa_fcs_fcpim.o bfa_fcbuild.o
6bfa-y += bfa_lps.o bfa_hw_cb.o bfa_hw_ct.o bfa_intr.o bfa_timer.o bfa_rport.o 6bfa-y += bfa_port.o bfa_fcpim.o bfa_core.o bfa_drv.o bfa_svc.o
7bfa-y += bfa_fcport.o bfa_port.o bfa_uf.o bfa_sgpg.o bfa_module.o bfa_ioim.o
8bfa-y += bfa_itnim.o bfa_fcpim.o bfa_tskim.o bfa_log.o bfa_log_module.o
9bfa-y += bfa_csdebug.o bfa_sm.o plog.o
10 7
11bfa-y += fcbuild.o fabric.o fcpim.o vfapi.o fcptm.o bfa_fcs.o bfa_fcs_port.o 8ccflags-y := -DBFA_PERF_BUILD
12bfa-y += bfa_fcs_uf.o bfa_fcs_lport.o fab.o fdmi.o ms.o ns.o scn.o loop.o
13bfa-y += lport_api.o n2n.o rport.o rport_api.o rport_ftrs.o vport.o
14
15ccflags-y := -I$(obj) -I$(obj)/include -I$(obj)/include/cna -DBFA_PERF_BUILD
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
new file mode 100644
index 000000000000..ceaac65a91ff
--- /dev/null
+++ b/drivers/scsi/bfa/bfa.h
@@ -0,0 +1,438 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_H__
18#define __BFA_H__
19
20#include "bfa_os_inc.h"
21#include "bfa_cs.h"
22#include "bfa_plog.h"
23#include "bfa_defs_svc.h"
24#include "bfi.h"
25#include "bfa_ioc.h"
26
27struct bfa_s;
28
29typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
30typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
31
32/**
33 * Interrupt message handlers
34 */
35void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
36void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
37
38/**
39 * Request and response queue related defines
40 */
41#define BFA_REQQ_NELEMS_MIN (4)
42#define BFA_RSPQ_NELEMS_MIN (4)
43
44#define bfa_reqq_pi(__bfa, __reqq) ((__bfa)->iocfc.req_cq_pi[__reqq])
45#define bfa_reqq_ci(__bfa, __reqq) \
46 (*(u32 *)((__bfa)->iocfc.req_cq_shadow_ci[__reqq].kva))
47
48#define bfa_reqq_full(__bfa, __reqq) \
49 (((bfa_reqq_pi(__bfa, __reqq) + 1) & \
50 ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1)) == \
51 bfa_reqq_ci(__bfa, __reqq))
52
53#define bfa_reqq_next(__bfa, __reqq) \
54 (bfa_reqq_full(__bfa, __reqq) ? NULL : \
55 ((void *)((struct bfi_msg_s *)((__bfa)->iocfc.req_cq_ba[__reqq].kva) \
56 + bfa_reqq_pi((__bfa), (__reqq)))))
57
58#define bfa_reqq_produce(__bfa, __reqq) do { \
59 (__bfa)->iocfc.req_cq_pi[__reqq]++; \
60 (__bfa)->iocfc.req_cq_pi[__reqq] &= \
61 ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \
62 bfa_reg_write((__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq], \
63 (__bfa)->iocfc.req_cq_pi[__reqq]); \
64 mmiowb(); \
65 } while (0)
66
67#define bfa_rspq_pi(__bfa, __rspq) \
68 (*(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva))
69
70#define bfa_rspq_ci(__bfa, __rspq) ((__bfa)->iocfc.rsp_cq_ci[__rspq])
71#define bfa_rspq_elem(__bfa, __rspq, __ci) \
72 (&((struct bfi_msg_s *)((__bfa)->iocfc.rsp_cq_ba[__rspq].kva))[__ci])
73
74#define CQ_INCR(__index, __size) do { \
75 (__index)++; \
76 (__index) &= ((__size) - 1); \
77} while (0)
78
79/**
80 * Queue element to wait for room in request queue. FIFO order is
81 * maintained when fullfilling requests.
82 */
83struct bfa_reqq_wait_s {
84 struct list_head qe;
85 void (*qresume) (void *cbarg);
86 void *cbarg;
87};
88
89/**
90 * Circular queue usage assignments
91 */
92enum {
93 BFA_REQQ_IOC = 0, /* all low-priority IOC msgs */
94 BFA_REQQ_FCXP = 0, /* all FCXP messages */
95 BFA_REQQ_LPS = 0, /* all lport service msgs */
96 BFA_REQQ_PORT = 0, /* all port messages */
97 BFA_REQQ_FLASH = 0, /* for flash module */
98 BFA_REQQ_DIAG = 0, /* for diag module */
99 BFA_REQQ_RPORT = 0, /* all port messages */
100 BFA_REQQ_SBOOT = 0, /* all san boot messages */
101 BFA_REQQ_QOS_LO = 1, /* all low priority IO */
102 BFA_REQQ_QOS_MD = 2, /* all medium priority IO */
103 BFA_REQQ_QOS_HI = 3, /* all high priority IO */
104};
105
106static inline void
107bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
108 void *cbarg)
109{
110 wqe->qresume = qresume;
111 wqe->cbarg = cbarg;
112}
113
114#define bfa_reqq(__bfa, __reqq) (&(__bfa)->reqq_waitq[__reqq])
115
116/**
117 * static inline void
118 * bfa_reqq_wait(struct bfa_s *bfa, int reqq, struct bfa_reqq_wait_s *wqe)
119 */
120#define bfa_reqq_wait(__bfa, __reqq, __wqe) do { \
121 \
122 struct list_head *waitq = bfa_reqq(__bfa, __reqq); \
123 \
124 bfa_assert(((__reqq) < BFI_IOC_MAX_CQS)); \
125 bfa_assert((__wqe)->qresume && (__wqe)->cbarg); \
126 \
127 list_add_tail(&(__wqe)->qe, waitq); \
128 } while (0)
129
130#define bfa_reqq_wcancel(__wqe) list_del(&(__wqe)->qe)
131
132
133/**
134 * Generic BFA callback element.
135 */
136struct bfa_cb_qe_s {
137 struct list_head qe;
138 bfa_cb_cbfn_t cbfn;
139 bfa_boolean_t once;
140 u32 rsvd;
141 void *cbarg;
142};
143
144#define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do { \
145 (__hcb_qe)->cbfn = (__cbfn); \
146 (__hcb_qe)->cbarg = (__cbarg); \
147 list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
148 } while (0)
149
150#define bfa_cb_dequeue(__hcb_qe) list_del(&(__hcb_qe)->qe)
151
152#define bfa_cb_queue_once(__bfa, __hcb_qe, __cbfn, __cbarg) do { \
153 (__hcb_qe)->cbfn = (__cbfn); \
154 (__hcb_qe)->cbarg = (__cbarg); \
155 if (!(__hcb_qe)->once) { \
156 list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
157 (__hcb_qe)->once = BFA_TRUE; \
158 } \
159 } while (0)
160
161#define bfa_cb_queue_done(__hcb_qe) do { \
162 (__hcb_qe)->once = BFA_FALSE; \
163 } while (0)
164
165
166/**
167 * PCI devices supported by the current BFA
168 */
169struct bfa_pciid_s {
170 u16 device_id;
171 u16 vendor_id;
172};
173
174extern char bfa_version[];
175
176/**
177 * BFA memory resources
178 */
179enum bfa_mem_type {
180 BFA_MEM_TYPE_KVA = 1, /* Kernel Virtual Memory *(non-dma-able) */
181 BFA_MEM_TYPE_DMA = 2, /* DMA-able memory */
182 BFA_MEM_TYPE_MAX = BFA_MEM_TYPE_DMA,
183};
184
185struct bfa_mem_elem_s {
186 enum bfa_mem_type mem_type; /* see enum bfa_mem_type */
187 u32 mem_len; /* Total Length in Bytes */
188 u8 *kva; /* kernel virtual address */
189 u64 dma; /* dma address if DMA memory */
190 u8 *kva_curp; /* kva allocation cursor */
191 u64 dma_curp; /* dma allocation cursor */
192};
193
194struct bfa_meminfo_s {
195 struct bfa_mem_elem_s meminfo[BFA_MEM_TYPE_MAX];
196};
197#define bfa_meminfo_kva(_m) \
198 ((_m)->meminfo[BFA_MEM_TYPE_KVA - 1].kva_curp)
199#define bfa_meminfo_dma_virt(_m) \
200 ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].kva_curp)
201#define bfa_meminfo_dma_phys(_m) \
202 ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp)
203
204struct bfa_iocfc_regs_s {
205 bfa_os_addr_t intr_status;
206 bfa_os_addr_t intr_mask;
207 bfa_os_addr_t cpe_q_pi[BFI_IOC_MAX_CQS];
208 bfa_os_addr_t cpe_q_ci[BFI_IOC_MAX_CQS];
209 bfa_os_addr_t cpe_q_depth[BFI_IOC_MAX_CQS];
210 bfa_os_addr_t cpe_q_ctrl[BFI_IOC_MAX_CQS];
211 bfa_os_addr_t rme_q_ci[BFI_IOC_MAX_CQS];
212 bfa_os_addr_t rme_q_pi[BFI_IOC_MAX_CQS];
213 bfa_os_addr_t rme_q_depth[BFI_IOC_MAX_CQS];
214 bfa_os_addr_t rme_q_ctrl[BFI_IOC_MAX_CQS];
215};
216
217/**
218 * MSIX vector handlers
219 */
220#define BFA_MSIX_MAX_VECTORS 22
221typedef void (*bfa_msix_handler_t)(struct bfa_s *bfa, int vec);
222struct bfa_msix_s {
223 int nvecs;
224 bfa_msix_handler_t handler[BFA_MSIX_MAX_VECTORS];
225};
226
227/**
228 * Chip specific interfaces
229 */
230struct bfa_hwif_s {
231 void (*hw_reginit)(struct bfa_s *bfa);
232 void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq);
233 void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq);
234 void (*hw_msix_init)(struct bfa_s *bfa, int nvecs);
235 void (*hw_msix_install)(struct bfa_s *bfa);
236 void (*hw_msix_uninstall)(struct bfa_s *bfa);
237 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
238 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
239 u32 *nvecs, u32 *maxvec);
240 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
241 u32 *end);
242};
243typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
244
245struct bfa_iocfc_s {
246 struct bfa_s *bfa;
247 struct bfa_iocfc_cfg_s cfg;
248 int action;
249 u32 req_cq_pi[BFI_IOC_MAX_CQS];
250 u32 rsp_cq_ci[BFI_IOC_MAX_CQS];
251 struct bfa_cb_qe_s init_hcb_qe;
252 struct bfa_cb_qe_s stop_hcb_qe;
253 struct bfa_cb_qe_s dis_hcb_qe;
254 struct bfa_cb_qe_s stats_hcb_qe;
255 bfa_boolean_t cfgdone;
256
257 struct bfa_dma_s cfg_info;
258 struct bfi_iocfc_cfg_s *cfginfo;
259 struct bfa_dma_s cfgrsp_dma;
260 struct bfi_iocfc_cfgrsp_s *cfgrsp;
261 struct bfi_iocfc_cfg_reply_s *cfg_reply;
262 struct bfa_dma_s req_cq_ba[BFI_IOC_MAX_CQS];
263 struct bfa_dma_s req_cq_shadow_ci[BFI_IOC_MAX_CQS];
264 struct bfa_dma_s rsp_cq_ba[BFI_IOC_MAX_CQS];
265 struct bfa_dma_s rsp_cq_shadow_pi[BFI_IOC_MAX_CQS];
266 struct bfa_iocfc_regs_s bfa_regs; /* BFA device registers */
267 struct bfa_hwif_s hwif;
268 bfa_cb_iocfc_t updateq_cbfn; /* bios callback function */
269 void *updateq_cbarg; /* bios callback arg */
270 u32 intr_mask;
271};
272
273#define bfa_lpuid(__bfa) \
274 bfa_ioc_portid(&(__bfa)->ioc)
275#define bfa_msix_init(__bfa, __nvecs) \
276 ((__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs))
277#define bfa_msix_install(__bfa) \
278 ((__bfa)->iocfc.hwif.hw_msix_install(__bfa))
279#define bfa_msix_uninstall(__bfa) \
280 ((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa))
281#define bfa_isr_mode_set(__bfa, __msix) \
282 ((__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix))
283#define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) \
284 ((__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap, \
285 __nvecs, __maxvec))
286#define bfa_msix_get_rme_range(__bfa, __start, __end) \
287 ((__bfa)->iocfc.hwif.hw_msix_get_rme_range(__bfa, __start, __end))
288#define bfa_msix(__bfa, __vec) \
289 ((__bfa)->msix.handler[__vec](__bfa, __vec))
290
291/*
292 * FC specific IOC functions.
293 */
294void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
295 u32 *dm_len);
296void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad,
297 struct bfa_iocfc_cfg_s *cfg,
298 struct bfa_meminfo_s *meminfo,
299 struct bfa_pcidev_s *pcidev);
300void bfa_iocfc_detach(struct bfa_s *bfa);
301void bfa_iocfc_init(struct bfa_s *bfa);
302void bfa_iocfc_start(struct bfa_s *bfa);
303void bfa_iocfc_stop(struct bfa_s *bfa);
304void bfa_iocfc_isr(void *bfa, struct bfi_mbmsg_s *msg);
305void bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa);
306bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa);
307void bfa_iocfc_reset_queues(struct bfa_s *bfa);
308
309void bfa_msix_all(struct bfa_s *bfa, int vec);
310void bfa_msix_reqq(struct bfa_s *bfa, int vec);
311void bfa_msix_rspq(struct bfa_s *bfa, int vec);
312void bfa_msix_lpu_err(struct bfa_s *bfa, int vec);
313
314void bfa_hwcb_reginit(struct bfa_s *bfa);
315void bfa_hwcb_reqq_ack(struct bfa_s *bfa, int rspq);
316void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq);
317void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs);
318void bfa_hwcb_msix_install(struct bfa_s *bfa);
319void bfa_hwcb_msix_uninstall(struct bfa_s *bfa);
320void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
321void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
322 u32 *maxvec);
323void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
324 u32 *end);
325void bfa_hwct_reginit(struct bfa_s *bfa);
326void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
327void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq);
328void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs);
329void bfa_hwct_msix_install(struct bfa_s *bfa);
330void bfa_hwct_msix_uninstall(struct bfa_s *bfa);
331void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
332void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
333 u32 *maxvec);
334void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
335 u32 *end);
336void bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi);
337void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns);
338wwn_t bfa_iocfc_get_pwwn(struct bfa_s *bfa);
339wwn_t bfa_iocfc_get_nwwn(struct bfa_s *bfa);
340void bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa,
341 struct bfa_boot_pbc_s *pbcfg);
342int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
343 struct bfi_pbc_vport_s *pbc_vport);
344
345
346/**
347 *----------------------------------------------------------------------
348 * BFA public interfaces
349 *----------------------------------------------------------------------
350 */
351#define bfa_stats(_mod, _stats) ((_mod)->stats._stats++)
352#define bfa_ioc_get_stats(__bfa, __ioc_stats) \
353 bfa_ioc_fetch_stats(&(__bfa)->ioc, __ioc_stats)
354#define bfa_ioc_clear_stats(__bfa) \
355 bfa_ioc_clr_stats(&(__bfa)->ioc)
356#define bfa_get_nports(__bfa) \
357 bfa_ioc_get_nports(&(__bfa)->ioc)
358#define bfa_get_adapter_manufacturer(__bfa, __manufacturer) \
359 bfa_ioc_get_adapter_manufacturer(&(__bfa)->ioc, __manufacturer)
360#define bfa_get_adapter_model(__bfa, __model) \
361 bfa_ioc_get_adapter_model(&(__bfa)->ioc, __model)
362#define bfa_get_adapter_serial_num(__bfa, __serial_num) \
363 bfa_ioc_get_adapter_serial_num(&(__bfa)->ioc, __serial_num)
364#define bfa_get_adapter_fw_ver(__bfa, __fw_ver) \
365 bfa_ioc_get_adapter_fw_ver(&(__bfa)->ioc, __fw_ver)
366#define bfa_get_adapter_optrom_ver(__bfa, __optrom_ver) \
367 bfa_ioc_get_adapter_optrom_ver(&(__bfa)->ioc, __optrom_ver)
368#define bfa_get_pci_chip_rev(__bfa, __chip_rev) \
369 bfa_ioc_get_pci_chip_rev(&(__bfa)->ioc, __chip_rev)
370#define bfa_get_ioc_state(__bfa) \
371 bfa_ioc_get_state(&(__bfa)->ioc)
372#define bfa_get_type(__bfa) \
373 bfa_ioc_get_type(&(__bfa)->ioc)
374#define bfa_get_mac(__bfa) \
375 bfa_ioc_get_mac(&(__bfa)->ioc)
376#define bfa_get_mfg_mac(__bfa) \
377 bfa_ioc_get_mfg_mac(&(__bfa)->ioc)
378#define bfa_get_fw_clock_res(__bfa) \
379 ((__bfa)->iocfc.cfgrsp->fwcfg.fw_tick_res)
380
381void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids);
382void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg);
383void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg);
384void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg,
385 struct bfa_meminfo_s *meminfo);
386void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
387 struct bfa_meminfo_s *meminfo,
388 struct bfa_pcidev_s *pcidev);
389void bfa_init_trc(struct bfa_s *bfa, struct bfa_trc_mod_s *trcmod);
390void bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog);
391void bfa_detach(struct bfa_s *bfa);
392void bfa_init(struct bfa_s *bfa);
393void bfa_start(struct bfa_s *bfa);
394void bfa_stop(struct bfa_s *bfa);
395void bfa_attach_fcs(struct bfa_s *bfa);
396void bfa_cb_init(void *bfad, bfa_status_t status);
397void bfa_cb_updateq(void *bfad, bfa_status_t status);
398
399bfa_boolean_t bfa_intx(struct bfa_s *bfa);
400void bfa_intx_disable(struct bfa_s *bfa);
401void bfa_intx_enable(struct bfa_s *bfa);
402void bfa_isr_enable(struct bfa_s *bfa);
403void bfa_isr_disable(struct bfa_s *bfa);
404
405void bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q);
406void bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q);
407void bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q);
408
409typedef void (*bfa_cb_ioc_t) (void *cbarg, enum bfa_status status);
410void bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr);
411void bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr);
412
413void bfa_adapter_get_attr(struct bfa_s *bfa,
414 struct bfa_adapter_attr_s *ad_attr);
415u64 bfa_adapter_get_id(struct bfa_s *bfa);
416
417bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
418 struct bfa_iocfc_intr_attr_s *attr);
419
420void bfa_iocfc_enable(struct bfa_s *bfa);
421void bfa_iocfc_disable(struct bfa_s *bfa);
422void bfa_chip_reset(struct bfa_s *bfa);
423void bfa_timer_tick(struct bfa_s *bfa);
424#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \
425 bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout)
426
427/*
428 * BFA debug API functions
429 */
430bfa_status_t bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen);
431bfa_status_t bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen);
432bfa_status_t bfa_debug_fwcore(struct bfa_s *bfa, void *buf,
433 u32 *offset, int *buflen);
434void bfa_debug_fwsave_clear(struct bfa_s *bfa);
435bfa_status_t bfa_fw_stats_get(struct bfa_s *bfa, void *data);
436bfa_status_t bfa_fw_stats_clear(struct bfa_s *bfa);
437
438#endif /* __BFA_H__ */
diff --git a/drivers/scsi/bfa/bfa_callback_priv.h b/drivers/scsi/bfa/bfa_callback_priv.h
deleted file mode 100644
index 1e3265c9f7d4..000000000000
--- a/drivers/scsi/bfa/bfa_callback_priv.h
+++ /dev/null
@@ -1,57 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_CALLBACK_PRIV_H__
19#define __BFA_CALLBACK_PRIV_H__
20
21#include <cs/bfa_q.h>
22
23typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
24
25/**
26 * Generic BFA callback element.
27 */
28struct bfa_cb_qe_s {
29 struct list_head qe;
30 bfa_cb_cbfn_t cbfn;
31 bfa_boolean_t once;
32 u32 rsvd;
33 void *cbarg;
34};
35
36#define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do { \
37 (__hcb_qe)->cbfn = (__cbfn); \
38 (__hcb_qe)->cbarg = (__cbarg); \
39 list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
40} while (0)
41
42#define bfa_cb_dequeue(__hcb_qe) list_del(&(__hcb_qe)->qe)
43
44#define bfa_cb_queue_once(__bfa, __hcb_qe, __cbfn, __cbarg) do { \
45 (__hcb_qe)->cbfn = (__cbfn); \
46 (__hcb_qe)->cbarg = (__cbarg); \
47 if (!(__hcb_qe)->once) { \
48 list_add_tail((__hcb_qe), &(__bfa)->comp_q); \
49 (__hcb_qe)->once = BFA_TRUE; \
50 } \
51} while (0)
52
53#define bfa_cb_queue_done(__hcb_qe) do { \
54 (__hcb_qe)->once = BFA_FALSE; \
55} while (0)
56
57#endif /* __BFA_CALLBACK_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_cb_ioim_macros.h b/drivers/scsi/bfa/bfa_cb_ioim.h
index 3906ed926966..a989a94c38da 100644
--- a/drivers/scsi/bfa/bfa_cb_ioim_macros.h
+++ b/drivers/scsi/bfa/bfa_cb_ioim.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -15,37 +15,25 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18/** 18#ifndef __BFA_HCB_IOIM_H__
19 * bfa_cb_ioim_macros.h BFA IOIM driver interface macros. 19#define __BFA_HCB_IOIM_H__
20 */
21
22#ifndef __BFA_HCB_IOIM_MACROS_H__
23#define __BFA_HCB_IOIM_MACROS_H__
24
25#include <bfa_os_inc.h>
26/*
27 * #include <linux/dma-mapping.h>
28 *
29 * #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include
30 * <scsi/scsi_device.h> #include <scsi/scsi_host.h>
31 */
32#include "bfad_im_compat.h"
33 20
21#include "bfa_os_inc.h"
34/* 22/*
35 * task attribute values in FCP-2 FCP_CMND IU 23 * task attribute values in FCP-2 FCP_CMND IU
36 */ 24 */
37#define SIMPLE_Q 0 25#define SIMPLE_Q 0
38#define HEAD_OF_Q 1 26#define HEAD_OF_Q 1
39#define ORDERED_Q 2 27#define ORDERED_Q 2
40#define ACA_Q 4 28#define ACA_Q 4
41#define UNTAGGED 5 29#define UNTAGGED 5
42 30
43static inline lun_t 31static inline lun_t
44bfad_int_to_lun(u32 luno) 32bfad_int_to_lun(u32 luno)
45{ 33{
46 union { 34 union {
47 u16 scsi_lun[4]; 35 u16 scsi_lun[4];
48 lun_t bfa_lun; 36 lun_t bfa_lun;
49 } lun; 37 } lun;
50 38
51 lun.bfa_lun = 0; 39 lun.bfa_lun = 0;
@@ -141,7 +129,7 @@ static inline u8
141bfa_cb_ioim_get_taskattr(struct bfad_ioim_s *dio) 129bfa_cb_ioim_get_taskattr(struct bfad_ioim_s *dio)
142{ 130{
143 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; 131 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
144 u8 task_attr = UNTAGGED; 132 u8 task_attr = UNTAGGED;
145 133
146 if (cmnd->device->tagged_supported) { 134 if (cmnd->device->tagged_supported) {
147 switch (cmnd->tag) { 135 switch (cmnd->tag) {
@@ -178,4 +166,4 @@ bfa_cb_ioim_get_cdblen(struct bfad_ioim_s *dio)
178 */ 166 */
179#define bfa_cb_ioim_get_reqq(__dio) BFA_FALSE 167#define bfa_cb_ioim_get_reqq(__dio) BFA_FALSE
180 168
181#endif /* __BFA_HCB_IOIM_MACROS_H__ */ 169#endif /* __BFA_HCB_IOIM_H__ */
diff --git a/drivers/scsi/bfa/bfa_cee.c b/drivers/scsi/bfa/bfa_cee.c
deleted file mode 100644
index 2b917792c6bc..000000000000
--- a/drivers/scsi/bfa/bfa_cee.c
+++ /dev/null
@@ -1,492 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <defs/bfa_defs_cee.h>
19#include <cs/bfa_trc.h>
20#include <cs/bfa_log.h>
21#include <cs/bfa_debug.h>
22#include <cee/bfa_cee.h>
23#include <bfi/bfi_cee.h>
24#include <bfi/bfi.h>
25#include <bfa_ioc.h>
26#include <cna/bfa_cna_trcmod.h>
27
28BFA_TRC_FILE(CNA, CEE);
29
30#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
31#define bfa_lpuid(__arg) bfa_ioc_portid(&(__arg)->ioc)
32
33static void bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg_s *lldp_cfg);
34static void bfa_cee_format_dcbcx_stats(struct bfa_cee_dcbx_stats_s
35 *dcbcx_stats);
36static void bfa_cee_format_lldp_stats(struct bfa_cee_lldp_stats_s
37 *lldp_stats);
38static void bfa_cee_format_cfg_stats(struct bfa_cee_cfg_stats_s *cfg_stats);
39static void bfa_cee_format_cee_cfg(void *buffer);
40static void bfa_cee_format_cee_stats(void *buffer);
41
42static void
43bfa_cee_format_cee_stats(void *buffer)
44{
45 struct bfa_cee_stats_s *cee_stats = buffer;
46 bfa_cee_format_dcbcx_stats(&cee_stats->dcbx_stats);
47 bfa_cee_format_lldp_stats(&cee_stats->lldp_stats);
48 bfa_cee_format_cfg_stats(&cee_stats->cfg_stats);
49}
50
51static void
52bfa_cee_format_cee_cfg(void *buffer)
53{
54 struct bfa_cee_attr_s *cee_cfg = buffer;
55 bfa_cee_format_lldp_cfg(&cee_cfg->lldp_remote);
56}
57
58static void
59bfa_cee_format_dcbcx_stats(struct bfa_cee_dcbx_stats_s *dcbcx_stats)
60{
61 dcbcx_stats->subtlvs_unrecognized =
62 bfa_os_ntohl(dcbcx_stats->subtlvs_unrecognized);
63 dcbcx_stats->negotiation_failed =
64 bfa_os_ntohl(dcbcx_stats->negotiation_failed);
65 dcbcx_stats->remote_cfg_changed =
66 bfa_os_ntohl(dcbcx_stats->remote_cfg_changed);
67 dcbcx_stats->tlvs_received = bfa_os_ntohl(dcbcx_stats->tlvs_received);
68 dcbcx_stats->tlvs_invalid = bfa_os_ntohl(dcbcx_stats->tlvs_invalid);
69 dcbcx_stats->seqno = bfa_os_ntohl(dcbcx_stats->seqno);
70 dcbcx_stats->ackno = bfa_os_ntohl(dcbcx_stats->ackno);
71 dcbcx_stats->recvd_seqno = bfa_os_ntohl(dcbcx_stats->recvd_seqno);
72 dcbcx_stats->recvd_ackno = bfa_os_ntohl(dcbcx_stats->recvd_ackno);
73}
74
75static void
76bfa_cee_format_lldp_stats(struct bfa_cee_lldp_stats_s *lldp_stats)
77{
78 lldp_stats->frames_transmitted =
79 bfa_os_ntohl(lldp_stats->frames_transmitted);
80 lldp_stats->frames_aged_out = bfa_os_ntohl(lldp_stats->frames_aged_out);
81 lldp_stats->frames_discarded =
82 bfa_os_ntohl(lldp_stats->frames_discarded);
83 lldp_stats->frames_in_error = bfa_os_ntohl(lldp_stats->frames_in_error);
84 lldp_stats->frames_rcvd = bfa_os_ntohl(lldp_stats->frames_rcvd);
85 lldp_stats->tlvs_discarded = bfa_os_ntohl(lldp_stats->tlvs_discarded);
86 lldp_stats->tlvs_unrecognized =
87 bfa_os_ntohl(lldp_stats->tlvs_unrecognized);
88}
89
90static void
91bfa_cee_format_cfg_stats(struct bfa_cee_cfg_stats_s *cfg_stats)
92{
93 cfg_stats->cee_status_down = bfa_os_ntohl(cfg_stats->cee_status_down);
94 cfg_stats->cee_status_up = bfa_os_ntohl(cfg_stats->cee_status_up);
95 cfg_stats->cee_hw_cfg_changed =
96 bfa_os_ntohl(cfg_stats->cee_hw_cfg_changed);
97 cfg_stats->recvd_invalid_cfg =
98 bfa_os_ntohl(cfg_stats->recvd_invalid_cfg);
99}
100
101static void
102bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg_s *lldp_cfg)
103{
104 lldp_cfg->time_to_interval = bfa_os_ntohs(lldp_cfg->time_to_interval);
105 lldp_cfg->enabled_system_cap =
106 bfa_os_ntohs(lldp_cfg->enabled_system_cap);
107}
108
109/**
110 * bfa_cee_attr_meminfo()
111 *
112 *
113 * @param[in] void
114 *
115 * @return Size of DMA region
116 */
117static u32
118bfa_cee_attr_meminfo(void)
119{
120 return BFA_ROUNDUP(sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ);
121}
122
123/**
124 * bfa_cee_stats_meminfo()
125 *
126 *
127 * @param[in] void
128 *
129 * @return Size of DMA region
130 */
131static u32
132bfa_cee_stats_meminfo(void)
133{
134 return BFA_ROUNDUP(sizeof(struct bfa_cee_stats_s), BFA_DMA_ALIGN_SZ);
135}
136
137/**
138 * bfa_cee_get_attr_isr()
139 *
140 *
141 * @param[in] cee - Pointer to the CEE module
142 * status - Return status from the f/w
143 *
144 * @return void
145 */
146static void
147bfa_cee_get_attr_isr(struct bfa_cee_s *cee, bfa_status_t status)
148{
149 cee->get_attr_status = status;
150 bfa_trc(cee, 0);
151 if (status == BFA_STATUS_OK) {
152 bfa_trc(cee, 0);
153 /*
154 * The requested data has been copied to the DMA area, *process
155 * it.
156 */
157 memcpy(cee->attr, cee->attr_dma.kva,
158 sizeof(struct bfa_cee_attr_s));
159 bfa_cee_format_cee_cfg(cee->attr);
160 }
161 cee->get_attr_pending = BFA_FALSE;
162 if (cee->cbfn.get_attr_cbfn) {
163 bfa_trc(cee, 0);
164 cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status);
165 }
166 bfa_trc(cee, 0);
167}
168
169/**
170 * bfa_cee_get_attr_isr()
171 *
172 *
173 * @param[in] cee - Pointer to the CEE module
174 * status - Return status from the f/w
175 *
176 * @return void
177 */
178static void
179bfa_cee_get_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
180{
181 cee->get_stats_status = status;
182 bfa_trc(cee, 0);
183 if (status == BFA_STATUS_OK) {
184 bfa_trc(cee, 0);
185 /*
186 * The requested data has been copied to the DMA area, process
187 * it.
188 */
189 memcpy(cee->stats, cee->stats_dma.kva,
190 sizeof(struct bfa_cee_stats_s));
191 bfa_cee_format_cee_stats(cee->stats);
192 }
193 cee->get_stats_pending = BFA_FALSE;
194 bfa_trc(cee, 0);
195 if (cee->cbfn.get_stats_cbfn) {
196 bfa_trc(cee, 0);
197 cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status);
198 }
199 bfa_trc(cee, 0);
200}
201
202/**
203 * bfa_cee_get_attr_isr()
204 *
205 *
206 * @param[in] cee - Pointer to the CEE module
207 * status - Return status from the f/w
208 *
209 * @return void
210 */
211static void
212bfa_cee_reset_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
213{
214 cee->reset_stats_status = status;
215 cee->reset_stats_pending = BFA_FALSE;
216 if (cee->cbfn.reset_stats_cbfn)
217 cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
218}
219
220/**
221 * bfa_cee_meminfo()
222 *
223 *
224 * @param[in] void
225 *
226 * @return Size of DMA region
227 */
228u32
229bfa_cee_meminfo(void)
230{
231 return bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo();
232}
233
234/**
235 * bfa_cee_mem_claim()
236 *
237 *
238 * @param[in] cee CEE module pointer
239 * dma_kva Kernel Virtual Address of CEE DMA Memory
240 * dma_pa Physical Address of CEE DMA Memory
241 *
242 * @return void
243 */
244void
245bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva, u64 dma_pa)
246{
247 cee->attr_dma.kva = dma_kva;
248 cee->attr_dma.pa = dma_pa;
249 cee->stats_dma.kva = dma_kva + bfa_cee_attr_meminfo();
250 cee->stats_dma.pa = dma_pa + bfa_cee_attr_meminfo();
251 cee->attr = (struct bfa_cee_attr_s *)dma_kva;
252 cee->stats =
253 (struct bfa_cee_stats_s *)(dma_kva + bfa_cee_attr_meminfo());
254}
255
256/**
257 * bfa_cee_get_attr()
258 *
259 * Send the request to the f/w to fetch CEE attributes.
260 *
261 * @param[in] Pointer to the CEE module data structure.
262 *
263 * @return Status
264 */
265
266bfa_status_t
267bfa_cee_get_attr(struct bfa_cee_s *cee, struct bfa_cee_attr_s *attr,
268 bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
269{
270 struct bfi_cee_get_req_s *cmd;
271
272 bfa_assert((cee != NULL) && (cee->ioc != NULL));
273 bfa_trc(cee, 0);
274 if (!bfa_ioc_is_operational(cee->ioc)) {
275 bfa_trc(cee, 0);
276 return BFA_STATUS_IOC_FAILURE;
277 }
278 if (cee->get_attr_pending == BFA_TRUE) {
279 bfa_trc(cee, 0);
280 return BFA_STATUS_DEVBUSY;
281 }
282 cee->get_attr_pending = BFA_TRUE;
283 cmd = (struct bfi_cee_get_req_s *)cee->get_cfg_mb.msg;
284 cee->attr = attr;
285 cee->cbfn.get_attr_cbfn = cbfn;
286 cee->cbfn.get_attr_cbarg = cbarg;
287 bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
288 bfa_ioc_portid(cee->ioc));
289 bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
290 bfa_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb);
291 bfa_trc(cee, 0);
292
293 return BFA_STATUS_OK;
294}
295
296/**
297 * bfa_cee_get_stats()
298 *
299 * Send the request to the f/w to fetch CEE statistics.
300 *
301 * @param[in] Pointer to the CEE module data structure.
302 *
303 * @return Status
304 */
305
306bfa_status_t
307bfa_cee_get_stats(struct bfa_cee_s *cee, struct bfa_cee_stats_s *stats,
308 bfa_cee_get_stats_cbfn_t cbfn, void *cbarg)
309{
310 struct bfi_cee_get_req_s *cmd;
311
312 bfa_assert((cee != NULL) && (cee->ioc != NULL));
313
314 if (!bfa_ioc_is_operational(cee->ioc)) {
315 bfa_trc(cee, 0);
316 return BFA_STATUS_IOC_FAILURE;
317 }
318 if (cee->get_stats_pending == BFA_TRUE) {
319 bfa_trc(cee, 0);
320 return BFA_STATUS_DEVBUSY;
321 }
322 cee->get_stats_pending = BFA_TRUE;
323 cmd = (struct bfi_cee_get_req_s *)cee->get_stats_mb.msg;
324 cee->stats = stats;
325 cee->cbfn.get_stats_cbfn = cbfn;
326 cee->cbfn.get_stats_cbarg = cbarg;
327 bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_STATS_REQ,
328 bfa_ioc_portid(cee->ioc));
329 bfa_dma_be_addr_set(cmd->dma_addr, cee->stats_dma.pa);
330 bfa_ioc_mbox_queue(cee->ioc, &cee->get_stats_mb);
331 bfa_trc(cee, 0);
332
333 return BFA_STATUS_OK;
334}
335
336/**
337 * bfa_cee_reset_stats()
338 *
339 *
340 * @param[in] Pointer to the CEE module data structure.
341 *
342 * @return Status
343 */
344
345bfa_status_t
346bfa_cee_reset_stats(struct bfa_cee_s *cee, bfa_cee_reset_stats_cbfn_t cbfn,
347 void *cbarg)
348{
349 struct bfi_cee_reset_stats_s *cmd;
350
351 bfa_assert((cee != NULL) && (cee->ioc != NULL));
352 if (!bfa_ioc_is_operational(cee->ioc)) {
353 bfa_trc(cee, 0);
354 return BFA_STATUS_IOC_FAILURE;
355 }
356 if (cee->reset_stats_pending == BFA_TRUE) {
357 bfa_trc(cee, 0);
358 return BFA_STATUS_DEVBUSY;
359 }
360 cee->reset_stats_pending = BFA_TRUE;
361 cmd = (struct bfi_cee_reset_stats_s *)cee->reset_stats_mb.msg;
362 cee->cbfn.reset_stats_cbfn = cbfn;
363 cee->cbfn.reset_stats_cbarg = cbarg;
364 bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_RESET_STATS,
365 bfa_ioc_portid(cee->ioc));
366 bfa_ioc_mbox_queue(cee->ioc, &cee->reset_stats_mb);
367 bfa_trc(cee, 0);
368 return BFA_STATUS_OK;
369}
370
371/**
372 * bfa_cee_isrs()
373 *
374 *
375 * @param[in] Pointer to the CEE module data structure.
376 *
377 * @return void
378 */
379
380void
381bfa_cee_isr(void *cbarg, struct bfi_mbmsg_s *m)
382{
383 union bfi_cee_i2h_msg_u *msg;
384 struct bfi_cee_get_rsp_s *get_rsp;
385 struct bfa_cee_s *cee = (struct bfa_cee_s *)cbarg;
386 msg = (union bfi_cee_i2h_msg_u *)m;
387 get_rsp = (struct bfi_cee_get_rsp_s *)m;
388 bfa_trc(cee, msg->mh.msg_id);
389 switch (msg->mh.msg_id) {
390 case BFI_CEE_I2H_GET_CFG_RSP:
391 bfa_trc(cee, get_rsp->cmd_status);
392 bfa_cee_get_attr_isr(cee, get_rsp->cmd_status);
393 break;
394 case BFI_CEE_I2H_GET_STATS_RSP:
395 bfa_cee_get_stats_isr(cee, get_rsp->cmd_status);
396 break;
397 case BFI_CEE_I2H_RESET_STATS_RSP:
398 bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status);
399 break;
400 default:
401 bfa_assert(0);
402 }
403}
404
405/**
406 * bfa_cee_hbfail()
407 *
408 *
409 * @param[in] Pointer to the CEE module data structure.
410 *
411 * @return void
412 */
413
414void
415bfa_cee_hbfail(void *arg)
416{
417 struct bfa_cee_s *cee;
418 cee = (struct bfa_cee_s *)arg;
419
420 if (cee->get_attr_pending == BFA_TRUE) {
421 cee->get_attr_status = BFA_STATUS_FAILED;
422 cee->get_attr_pending = BFA_FALSE;
423 if (cee->cbfn.get_attr_cbfn) {
424 cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg,
425 BFA_STATUS_FAILED);
426 }
427 }
428 if (cee->get_stats_pending == BFA_TRUE) {
429 cee->get_stats_status = BFA_STATUS_FAILED;
430 cee->get_stats_pending = BFA_FALSE;
431 if (cee->cbfn.get_stats_cbfn) {
432 cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg,
433 BFA_STATUS_FAILED);
434 }
435 }
436 if (cee->reset_stats_pending == BFA_TRUE) {
437 cee->reset_stats_status = BFA_STATUS_FAILED;
438 cee->reset_stats_pending = BFA_FALSE;
439 if (cee->cbfn.reset_stats_cbfn) {
440 cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg,
441 BFA_STATUS_FAILED);
442 }
443 }
444}
445
446/**
447 * bfa_cee_attach()
448 *
449 *
450 * @param[in] cee - Pointer to the CEE module data structure
451 * ioc - Pointer to the ioc module data structure
452 * dev - Pointer to the device driver module data structure
453 * The device driver specific mbox ISR functions have
454 * this pointer as one of the parameters.
455 * trcmod -
456 * logmod -
457 *
458 * @return void
459 */
460void
461bfa_cee_attach(struct bfa_cee_s *cee, struct bfa_ioc_s *ioc, void *dev,
462 struct bfa_trc_mod_s *trcmod, struct bfa_log_mod_s *logmod)
463{
464 bfa_assert(cee != NULL);
465 cee->dev = dev;
466 cee->trcmod = trcmod;
467 cee->logmod = logmod;
468 cee->ioc = ioc;
469
470 bfa_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
471 bfa_ioc_hbfail_init(&cee->hbfail, bfa_cee_hbfail, cee);
472 bfa_ioc_hbfail_register(cee->ioc, &cee->hbfail);
473 bfa_trc(cee, 0);
474}
475
476/**
477 * bfa_cee_detach()
478 *
479 *
480 * @param[in] cee - Pointer to the CEE module data structure
481 *
482 * @return void
483 */
484void
485bfa_cee_detach(struct bfa_cee_s *cee)
486{
487 /*
488 * For now, just check if there is some ioctl pending and mark that as
489 * failed?
490 */
491 /* bfa_cee_hbfail(cee); */
492}
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 76fa5c5b40dd..c2fa07f2485d 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -15,27 +15,992 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#include <bfa.h> 18#include "bfa_modules.h"
19#include <defs/bfa_defs_pci.h> 19#include "bfi_ctreg.h"
20#include <cs/bfa_debug.h> 20#include "bfad_drv.h"
21#include <bfa_iocfc.h>
22 21
23#define DEF_CFG_NUM_FABRICS 1 22BFA_TRC_FILE(HAL, CORE);
24#define DEF_CFG_NUM_LPORTS 256
25#define DEF_CFG_NUM_CQS 4
26#define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
27#define DEF_CFG_NUM_TSKIM_REQS 128
28#define DEF_CFG_NUM_FCXP_REQS 64
29#define DEF_CFG_NUM_UF_BUFS 64
30#define DEF_CFG_NUM_RPORTS 1024
31#define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
32#define DEF_CFG_NUM_TINS 256
33 23
34#define DEF_CFG_NUM_SGPGS 2048 24/**
35#define DEF_CFG_NUM_REQQ_ELEMS 256 25 * BFA IOC FC related definitions
36#define DEF_CFG_NUM_RSPQ_ELEMS 64 26 */
37#define DEF_CFG_NUM_SBOOT_TGTS 16 27
38#define DEF_CFG_NUM_SBOOT_LUNS 16 28/**
29 * IOC local definitions
30 */
31#define BFA_IOCFC_TOV 5000 /* msecs */
32
33enum {
34 BFA_IOCFC_ACT_NONE = 0,
35 BFA_IOCFC_ACT_INIT = 1,
36 BFA_IOCFC_ACT_STOP = 2,
37 BFA_IOCFC_ACT_DISABLE = 3,
38};
39
40#define DEF_CFG_NUM_FABRICS 1
41#define DEF_CFG_NUM_LPORTS 256
42#define DEF_CFG_NUM_CQS 4
43#define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
44#define DEF_CFG_NUM_TSKIM_REQS 128
45#define DEF_CFG_NUM_FCXP_REQS 64
46#define DEF_CFG_NUM_UF_BUFS 64
47#define DEF_CFG_NUM_RPORTS 1024
48#define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
49#define DEF_CFG_NUM_TINS 256
50
51#define DEF_CFG_NUM_SGPGS 2048
52#define DEF_CFG_NUM_REQQ_ELEMS 256
53#define DEF_CFG_NUM_RSPQ_ELEMS 64
54#define DEF_CFG_NUM_SBOOT_TGTS 16
55#define DEF_CFG_NUM_SBOOT_LUNS 16
56
57/**
58 * forward declaration for IOC FC functions
59 */
60static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
61static void bfa_iocfc_disable_cbfn(void *bfa_arg);
62static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
63static void bfa_iocfc_reset_cbfn(void *bfa_arg);
64static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
65
66/**
67 * BFA Interrupt handling functions
68 */
69static void
70bfa_msix_errint(struct bfa_s *bfa, u32 intr)
71{
72 bfa_ioc_error_isr(&bfa->ioc);
73}
74
75static void
76bfa_msix_lpu(struct bfa_s *bfa)
77{
78 bfa_ioc_mbox_isr(&bfa->ioc);
79}
80
81static void
82bfa_reqq_resume(struct bfa_s *bfa, int qid)
83{
84 struct list_head *waitq, *qe, *qen;
85 struct bfa_reqq_wait_s *wqe;
86
87 waitq = bfa_reqq(bfa, qid);
88 list_for_each_safe(qe, qen, waitq) {
89 /**
90 * Callback only as long as there is room in request queue
91 */
92 if (bfa_reqq_full(bfa, qid))
93 break;
94
95 list_del(qe);
96 wqe = (struct bfa_reqq_wait_s *) qe;
97 wqe->qresume(wqe->cbarg);
98 }
99}
100
101void
102bfa_msix_all(struct bfa_s *bfa, int vec)
103{
104 bfa_intx(bfa);
105}
106
107/**
108 * hal_intr_api
109 */
110bfa_boolean_t
111bfa_intx(struct bfa_s *bfa)
112{
113 u32 intr, qintr;
114 int queue;
115
116 intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
117 if (!intr)
118 return BFA_FALSE;
119
120 /**
121 * RME completion queue interrupt
122 */
123 qintr = intr & __HFN_INT_RME_MASK;
124 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
125
126 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
127 if (intr & (__HFN_INT_RME_Q0 << queue))
128 bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
129 }
130 intr &= ~qintr;
131 if (!intr)
132 return BFA_TRUE;
133
134 /**
135 * CPE completion queue interrupt
136 */
137 qintr = intr & __HFN_INT_CPE_MASK;
138 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
139
140 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
141 if (intr & (__HFN_INT_CPE_Q0 << queue))
142 bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
143 }
144 intr &= ~qintr;
145 if (!intr)
146 return BFA_TRUE;
147
148 bfa_msix_lpu_err(bfa, intr);
149
150 return BFA_TRUE;
151}
152
153void
154bfa_intx_enable(struct bfa_s *bfa)
155{
156 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, bfa->iocfc.intr_mask);
157}
158
159void
160bfa_intx_disable(struct bfa_s *bfa)
161{
162 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L);
163}
164
165void
166bfa_isr_enable(struct bfa_s *bfa)
167{
168 u32 intr_unmask;
169 int pci_func = bfa_ioc_pcifn(&bfa->ioc);
170
171 bfa_trc(bfa, pci_func);
172
173 bfa_msix_install(bfa);
174 intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
175 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS |
176 __HFN_INT_LL_HALT);
177
178 if (pci_func == 0)
179 intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
180 __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
181 __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
182 __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
183 __HFN_INT_MBOX_LPU0);
184 else
185 intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
186 __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
187 __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
188 __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
189 __HFN_INT_MBOX_LPU1);
190
191 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr_unmask);
192 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, ~intr_unmask);
193 bfa->iocfc.intr_mask = ~intr_unmask;
194 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
195}
196
197void
198bfa_isr_disable(struct bfa_s *bfa)
199{
200 bfa_isr_mode_set(bfa, BFA_FALSE);
201 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L);
202 bfa_msix_uninstall(bfa);
203}
204
205void
206bfa_msix_reqq(struct bfa_s *bfa, int qid)
207{
208 struct list_head *waitq;
209
210 qid &= (BFI_IOC_MAX_CQS - 1);
211
212 bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
213
214 /**
215 * Resume any pending requests in the corresponding reqq.
216 */
217 waitq = bfa_reqq(bfa, qid);
218 if (!list_empty(waitq))
219 bfa_reqq_resume(bfa, qid);
220}
221
222void
223bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
224{
225 bfa_trc(bfa, m->mhdr.msg_class);
226 bfa_trc(bfa, m->mhdr.msg_id);
227 bfa_trc(bfa, m->mhdr.mtag.i2htok);
228 bfa_assert(0);
229 bfa_trc_stop(bfa->trcmod);
230}
231
232void
233bfa_msix_rspq(struct bfa_s *bfa, int qid)
234{
235 struct bfi_msg_s *m;
236 u32 pi, ci;
237 struct list_head *waitq;
238
239 bfa_trc_fp(bfa, qid);
240
241 qid &= (BFI_IOC_MAX_CQS - 1);
242
243 bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
244
245 ci = bfa_rspq_ci(bfa, qid);
246 pi = bfa_rspq_pi(bfa, qid);
247
248 bfa_trc_fp(bfa, ci);
249 bfa_trc_fp(bfa, pi);
250
251 if (bfa->rme_process) {
252 while (ci != pi) {
253 m = bfa_rspq_elem(bfa, qid, ci);
254 bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);
255
256 bfa_isrs[m->mhdr.msg_class] (bfa, m);
257
258 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
259 }
260 }
261
262 /**
263 * update CI
264 */
265 bfa_rspq_ci(bfa, qid) = pi;
266 bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi);
267 mmiowb();
268
269 /**
270 * Resume any pending requests in the corresponding reqq.
271 */
272 waitq = bfa_reqq(bfa, qid);
273 if (!list_empty(waitq))
274 bfa_reqq_resume(bfa, qid);
275}
276
277void
278bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
279{
280 u32 intr, curr_value;
281
282 intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
283
284 if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
285 bfa_msix_lpu(bfa);
286
287 intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
288 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT);
289
290 if (intr) {
291 if (intr & __HFN_INT_LL_HALT) {
292 /**
293 * If LL_HALT bit is set then FW Init Halt LL Port
294 * Register needs to be cleared as well so Interrupt
295 * Status Register will be cleared.
296 */
297 curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt);
298 curr_value &= ~__FW_INIT_HALT_P;
299 bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value);
300 }
301
302 if (intr & __HFN_INT_ERR_PSS) {
303 /**
304 * ERR_PSS bit needs to be cleared as well in case
305 * interrups are shared so driver's interrupt handler is
306 * still called eventhough it is already masked out.
307 */
308 curr_value = bfa_reg_read(
309 bfa->ioc.ioc_regs.pss_err_status_reg);
310 curr_value &= __PSS_ERR_STATUS_SET;
311 bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg,
312 curr_value);
313 }
314
315 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr);
316 bfa_msix_errint(bfa, intr);
317 }
318}
319
320void
321bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func)
322{
323 bfa_isrs[mc] = isr_func;
324}
325
326/**
327 * BFA IOC FC related functions
328 */
329
330/**
331 * hal_ioc_pvt BFA IOC private functions
332 */
333
334static void
335bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
336{
337 int i, per_reqq_sz, per_rspq_sz;
338
339 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
340 BFA_DMA_ALIGN_SZ);
341 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
342 BFA_DMA_ALIGN_SZ);
343
344 /*
345 * Calculate CQ size
346 */
347 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
348 *dm_len = *dm_len + per_reqq_sz;
349 *dm_len = *dm_len + per_rspq_sz;
350 }
351
352 /*
353 * Calculate Shadow CI/PI size
354 */
355 for (i = 0; i < cfg->fwcfg.num_cqs; i++)
356 *dm_len += (2 * BFA_CACHELINE_SZ);
357}
358
359static void
360bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
361{
362 *dm_len +=
363 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
364 *dm_len +=
365 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
366 BFA_CACHELINE_SZ);
367}
368
369/**
370 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
371 */
372static void
373bfa_iocfc_send_cfg(void *bfa_arg)
374{
375 struct bfa_s *bfa = bfa_arg;
376 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
377 struct bfi_iocfc_cfg_req_s cfg_req;
378 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
379 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
380 int i;
381
382 bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS);
383 bfa_trc(bfa, cfg->fwcfg.num_cqs);
384
385 bfa_iocfc_reset_queues(bfa);
386
387 /**
388 * initialize IOC configuration info
389 */
390 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
391 cfg_info->num_cqs = cfg->fwcfg.num_cqs;
392
393 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
394 /**
395 * dma map REQ and RSP circular queues and shadow pointers
396 */
397 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
398 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
399 iocfc->req_cq_ba[i].pa);
400 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
401 iocfc->req_cq_shadow_ci[i].pa);
402 cfg_info->req_cq_elems[i] =
403 bfa_os_htons(cfg->drvcfg.num_reqq_elems);
404
405 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
406 iocfc->rsp_cq_ba[i].pa);
407 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
408 iocfc->rsp_cq_shadow_pi[i].pa);
409 cfg_info->rsp_cq_elems[i] =
410 bfa_os_htons(cfg->drvcfg.num_rspq_elems);
411 }
412
413 /**
414 * Enable interrupt coalescing if it is driver init path
415 * and not ioc disable/enable path.
416 */
417 if (!iocfc->cfgdone)
418 cfg_info->intr_attr.coalesce = BFA_TRUE;
419
420 iocfc->cfgdone = BFA_FALSE;
421
422 /**
423 * dma map IOC configuration itself
424 */
425 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
426 bfa_lpuid(bfa));
427 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
428
429 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
430 sizeof(struct bfi_iocfc_cfg_req_s));
431}
432
433static void
434bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
435 struct bfa_pcidev_s *pcidev)
436{
437 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
438
439 bfa->bfad = bfad;
440 iocfc->bfa = bfa;
441 iocfc->action = BFA_IOCFC_ACT_NONE;
442
443 bfa_os_assign(iocfc->cfg, *cfg);
444
445 /**
446 * Initialize chip specific handlers.
447 */
448 if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) {
449 iocfc->hwif.hw_reginit = bfa_hwct_reginit;
450 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
451 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
452 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
453 iocfc->hwif.hw_msix_install = bfa_hwct_msix_install;
454 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
455 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
456 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
457 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
458 } else {
459 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
460 iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
461 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
462 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
463 iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install;
464 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
465 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
466 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
467 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
468 }
469
470 iocfc->hwif.hw_reginit(bfa);
471 bfa->msix.nvecs = 0;
472}
473
474static void
475bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
476 struct bfa_meminfo_s *meminfo)
477{
478 u8 *dm_kva;
479 u64 dm_pa;
480 int i, per_reqq_sz, per_rspq_sz;
481 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
482 int dbgsz;
483
484 dm_kva = bfa_meminfo_dma_virt(meminfo);
485 dm_pa = bfa_meminfo_dma_phys(meminfo);
486
487 /*
488 * First allocate dma memory for IOC.
489 */
490 bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
491 dm_kva += bfa_ioc_meminfo();
492 dm_pa += bfa_ioc_meminfo();
493
494 /*
495 * Claim DMA-able memory for the request/response queues and for shadow
496 * ci/pi registers
497 */
498 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
499 BFA_DMA_ALIGN_SZ);
500 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
501 BFA_DMA_ALIGN_SZ);
502
503 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
504 iocfc->req_cq_ba[i].kva = dm_kva;
505 iocfc->req_cq_ba[i].pa = dm_pa;
506 bfa_os_memset(dm_kva, 0, per_reqq_sz);
507 dm_kva += per_reqq_sz;
508 dm_pa += per_reqq_sz;
509
510 iocfc->rsp_cq_ba[i].kva = dm_kva;
511 iocfc->rsp_cq_ba[i].pa = dm_pa;
512 bfa_os_memset(dm_kva, 0, per_rspq_sz);
513 dm_kva += per_rspq_sz;
514 dm_pa += per_rspq_sz;
515 }
516
517 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
518 iocfc->req_cq_shadow_ci[i].kva = dm_kva;
519 iocfc->req_cq_shadow_ci[i].pa = dm_pa;
520 dm_kva += BFA_CACHELINE_SZ;
521 dm_pa += BFA_CACHELINE_SZ;
522
523 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
524 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
525 dm_kva += BFA_CACHELINE_SZ;
526 dm_pa += BFA_CACHELINE_SZ;
527 }
528
529 /*
530 * Claim DMA-able memory for the config info page
531 */
532 bfa->iocfc.cfg_info.kva = dm_kva;
533 bfa->iocfc.cfg_info.pa = dm_pa;
534 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
535 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
536 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
537
538 /*
539 * Claim DMA-able memory for the config response
540 */
541 bfa->iocfc.cfgrsp_dma.kva = dm_kva;
542 bfa->iocfc.cfgrsp_dma.pa = dm_pa;
543 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
544
545 dm_kva +=
546 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
547 BFA_CACHELINE_SZ);
548 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
549 BFA_CACHELINE_SZ);
550
551
552 bfa_meminfo_dma_virt(meminfo) = dm_kva;
553 bfa_meminfo_dma_phys(meminfo) = dm_pa;
554
555 dbgsz = bfa_ioc_debug_trcsz(bfa_auto_recover);
556 if (dbgsz > 0) {
557 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
558 bfa_meminfo_kva(meminfo) += dbgsz;
559 }
560}
561
562/**
563 * Start BFA submodules.
564 */
565static void
566bfa_iocfc_start_submod(struct bfa_s *bfa)
567{
568 int i;
569
570 bfa->rme_process = BFA_TRUE;
571
572 for (i = 0; hal_mods[i]; i++)
573 hal_mods[i]->start(bfa);
574}
575
576/**
577 * Disable BFA submodules.
578 */
579static void
580bfa_iocfc_disable_submod(struct bfa_s *bfa)
581{
582 int i;
583
584 for (i = 0; hal_mods[i]; i++)
585 hal_mods[i]->iocdisable(bfa);
586}
587
588static void
589bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
590{
591 struct bfa_s *bfa = bfa_arg;
592
593 if (complete) {
594 if (bfa->iocfc.cfgdone)
595 bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
596 else
597 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
598 } else {
599 if (bfa->iocfc.cfgdone)
600 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
601 }
602}
603
604static void
605bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
606{
607 struct bfa_s *bfa = bfa_arg;
608 struct bfad_s *bfad = bfa->bfad;
609
610 if (compl)
611 complete(&bfad->comp);
612 else
613 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
614}
615
616static void
617bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
618{
619 struct bfa_s *bfa = bfa_arg;
620 struct bfad_s *bfad = bfa->bfad;
621
622 if (compl)
623 complete(&bfad->disable_comp);
624}
625
626/**
627 * Update BFA configuration from firmware configuration.
628 */
629static void
630bfa_iocfc_cfgrsp(struct bfa_s *bfa)
631{
632 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
633 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
634 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
635
636 fwcfg->num_cqs = fwcfg->num_cqs;
637 fwcfg->num_ioim_reqs = bfa_os_ntohs(fwcfg->num_ioim_reqs);
638 fwcfg->num_tskim_reqs = bfa_os_ntohs(fwcfg->num_tskim_reqs);
639 fwcfg->num_fcxp_reqs = bfa_os_ntohs(fwcfg->num_fcxp_reqs);
640 fwcfg->num_uf_bufs = bfa_os_ntohs(fwcfg->num_uf_bufs);
641 fwcfg->num_rports = bfa_os_ntohs(fwcfg->num_rports);
642
643 iocfc->cfgdone = BFA_TRUE;
644
645 /**
646 * Configuration is complete - initialize/start submodules
647 */
648 bfa_fcport_init(bfa);
649
650 if (iocfc->action == BFA_IOCFC_ACT_INIT)
651 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
652 else
653 bfa_iocfc_start_submod(bfa);
654}
655void
656bfa_iocfc_reset_queues(struct bfa_s *bfa)
657{
658 int q;
659
660 for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
661 bfa_reqq_ci(bfa, q) = 0;
662 bfa_reqq_pi(bfa, q) = 0;
663 bfa_rspq_ci(bfa, q) = 0;
664 bfa_rspq_pi(bfa, q) = 0;
665 }
666}
667
668/**
669 * IOC enable request is complete
670 */
671static void
672bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
673{
674 struct bfa_s *bfa = bfa_arg;
675
676 if (status != BFA_STATUS_OK) {
677 bfa_isr_disable(bfa);
678 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
679 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
680 bfa_iocfc_init_cb, bfa);
681 return;
682 }
683
684 bfa_iocfc_send_cfg(bfa);
685}
686
687/**
688 * IOC disable request is complete
689 */
690static void
691bfa_iocfc_disable_cbfn(void *bfa_arg)
692{
693 struct bfa_s *bfa = bfa_arg;
694
695 bfa_isr_disable(bfa);
696 bfa_iocfc_disable_submod(bfa);
697
698 if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
699 bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
700 bfa);
701 else {
702 bfa_assert(bfa->iocfc.action == BFA_IOCFC_ACT_DISABLE);
703 bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
704 bfa);
705 }
706}
707
708/**
709 * Notify sub-modules of hardware failure.
710 */
711static void
712bfa_iocfc_hbfail_cbfn(void *bfa_arg)
713{
714 struct bfa_s *bfa = bfa_arg;
715
716 bfa->rme_process = BFA_FALSE;
717
718 bfa_isr_disable(bfa);
719 bfa_iocfc_disable_submod(bfa);
720
721 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
722 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
723 bfa);
724}
725
726/**
727 * Actions on chip-reset completion.
728 */
729static void
730bfa_iocfc_reset_cbfn(void *bfa_arg)
731{
732 struct bfa_s *bfa = bfa_arg;
733
734 bfa_iocfc_reset_queues(bfa);
735 bfa_isr_enable(bfa);
736}
737
738/**
739 * hal_ioc_public
740 */
741
742/**
743 * Query IOC memory requirement information.
744 */
745void
746bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
747 u32 *dm_len)
748{
749 /* dma memory for IOC */
750 *dm_len += bfa_ioc_meminfo();
751
752 bfa_iocfc_fw_cfg_sz(cfg, dm_len);
753 bfa_iocfc_cqs_sz(cfg, dm_len);
754 *km_len += bfa_ioc_debug_trcsz(bfa_auto_recover);
755}
756
757/**
758 * Query IOC memory requirement information.
759 */
760void
761bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
762 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
763{
764 int i;
765 struct bfa_ioc_s *ioc = &bfa->ioc;
766
767 bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
768 bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
769 bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
770 bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
771
772 ioc->trcmod = bfa->trcmod;
773 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
774
775 /**
776 * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
777 */
778 if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
779 bfa_ioc_set_fcmode(&bfa->ioc);
780
781 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
782 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
783
784 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
785 bfa_iocfc_mem_claim(bfa, cfg, meminfo);
786 bfa_timer_init(&bfa->timer_mod);
787
788 INIT_LIST_HEAD(&bfa->comp_q);
789 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
790 INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
791}
792
793/**
794 * Query IOC memory requirement information.
795 */
796void
797bfa_iocfc_detach(struct bfa_s *bfa)
798{
799 bfa_ioc_detach(&bfa->ioc);
800}
801
802/**
803 * Query IOC memory requirement information.
804 */
805void
806bfa_iocfc_init(struct bfa_s *bfa)
807{
808 bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
809 bfa_ioc_enable(&bfa->ioc);
810}
811
812/**
813 * IOC start called from bfa_start(). Called to start IOC operations
814 * at driver instantiation for this instance.
815 */
816void
817bfa_iocfc_start(struct bfa_s *bfa)
818{
819 if (bfa->iocfc.cfgdone)
820 bfa_iocfc_start_submod(bfa);
821}
822
823/**
824 * IOC stop called from bfa_stop(). Called only when driver is unloaded
825 * for this instance.
826 */
827void
828bfa_iocfc_stop(struct bfa_s *bfa)
829{
830 bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
831
832 bfa->rme_process = BFA_FALSE;
833 bfa_ioc_disable(&bfa->ioc);
834}
835
836void
837bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
838{
839 struct bfa_s *bfa = bfaarg;
840 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
841 union bfi_iocfc_i2h_msg_u *msg;
842
843 msg = (union bfi_iocfc_i2h_msg_u *) m;
844 bfa_trc(bfa, msg->mh.msg_id);
845
846 switch (msg->mh.msg_id) {
847 case BFI_IOCFC_I2H_CFG_REPLY:
848 iocfc->cfg_reply = &msg->cfg_reply;
849 bfa_iocfc_cfgrsp(bfa);
850 break;
851 case BFI_IOCFC_I2H_UPDATEQ_RSP:
852 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
853 break;
854 default:
855 bfa_assert(0);
856 }
857}
858
859void
860bfa_adapter_get_attr(struct bfa_s *bfa, struct bfa_adapter_attr_s *ad_attr)
861{
862 bfa_ioc_get_adapter_attr(&bfa->ioc, ad_attr);
863}
864
865u64
866bfa_adapter_get_id(struct bfa_s *bfa)
867{
868 return bfa_ioc_get_adid(&bfa->ioc);
869}
870
871void
872bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
873{
874 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
875
876 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
877
878 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
879 bfa_os_ntohs(iocfc->cfginfo->intr_attr.delay) :
880 bfa_os_ntohs(iocfc->cfgrsp->intr_attr.delay);
881
882 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
883 bfa_os_ntohs(iocfc->cfginfo->intr_attr.latency) :
884 bfa_os_ntohs(iocfc->cfgrsp->intr_attr.latency);
885
886 attr->config = iocfc->cfg;
887}
888
889bfa_status_t
890bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
891{
892 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
893 struct bfi_iocfc_set_intr_req_s *m;
894
895 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
896 iocfc->cfginfo->intr_attr.delay = bfa_os_htons(attr->delay);
897 iocfc->cfginfo->intr_attr.latency = bfa_os_htons(attr->latency);
898
899 if (!bfa_iocfc_is_operational(bfa))
900 return BFA_STATUS_OK;
901
902 m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
903 if (!m)
904 return BFA_STATUS_DEVBUSY;
905
906 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
907 bfa_lpuid(bfa));
908 m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
909 m->delay = iocfc->cfginfo->intr_attr.delay;
910 m->latency = iocfc->cfginfo->intr_attr.latency;
911
912 bfa_trc(bfa, attr->delay);
913 bfa_trc(bfa, attr->latency);
914
915 bfa_reqq_produce(bfa, BFA_REQQ_IOC);
916 return BFA_STATUS_OK;
917}
918
919void
920bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
921{
922 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
923
924 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
925 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
926}
927/**
928 * Enable IOC after it is disabled.
929 */
930void
931bfa_iocfc_enable(struct bfa_s *bfa)
932{
933 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
934 "IOC Enable");
935 bfa_ioc_enable(&bfa->ioc);
936}
937
938void
939bfa_iocfc_disable(struct bfa_s *bfa)
940{
941 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
942 "IOC Disable");
943 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
944
945 bfa->rme_process = BFA_FALSE;
946 bfa_ioc_disable(&bfa->ioc);
947}
948
949
950bfa_boolean_t
951bfa_iocfc_is_operational(struct bfa_s *bfa)
952{
953 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
954}
955
956/**
957 * Return boot target port wwns -- read from boot information in flash.
958 */
959void
960bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
961{
962 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
963 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
964 int i;
965
966 if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
967 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
968 *nwwns = cfgrsp->pbc_cfg.nbluns;
969 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
970 wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
971
972 return;
973 }
974
975 *nwwns = cfgrsp->bootwwns.nwwns;
976 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
977}
978
979void
980bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa, struct bfa_boot_pbc_s *pbcfg)
981{
982 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
983 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
984
985 pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
986 pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
987 pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
988 memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
989}
990
991int
992bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
993{
994 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
995 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
996
997 memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
998 return cfgrsp->pbc_cfg.nvports;
999}
1000
1001/**
1002 * hal_api
1003 */
39 1004
40/** 1005/**
41 * Use this function query the memory requirement of the BFA library. 1006 * Use this function query the memory requirement of the BFA library.
@@ -45,16 +1010,16 @@
45 * This call will fail, if the cap is out of range compared to pre-defined 1010 * This call will fail, if the cap is out of range compared to pre-defined
46 * values within the BFA library 1011 * values within the BFA library
47 * 1012 *
48 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate 1013 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate
49 * its configuration in this structure. 1014 * its configuration in this structure.
50 * The default values for struct bfa_iocfc_cfg_s can be 1015 * The default values for struct bfa_iocfc_cfg_s can be
51 * fetched using bfa_cfg_get_default() API. 1016 * fetched using bfa_cfg_get_default() API.
52 * 1017 *
53 * If cap's boundary check fails, the library will use 1018 * If cap's boundary check fails, the library will use
54 * the default bfa_cap_t values (and log a warning msg). 1019 * the default bfa_cap_t values (and log a warning msg).
55 * 1020 *
56 * @param[out] meminfo - pointer to bfa_meminfo_t. This content 1021 * @param[out] meminfo - pointer to bfa_meminfo_t. This content
57 * indicates the memory type (see bfa_mem_type_t) and 1022 * indicates the memory type (see bfa_mem_type_t) and
58 * amount of memory required. 1023 * amount of memory required.
59 * 1024 *
60 * Driver should allocate the memory, populate the 1025 * Driver should allocate the memory, populate the
@@ -68,8 +1033,8 @@
68void 1033void
69bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo) 1034bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
70{ 1035{
71 int i; 1036 int i;
72 u32 km_len = 0, dm_len = 0; 1037 u32 km_len = 0, dm_len = 0;
73 1038
74 bfa_assert((cfg != NULL) && (meminfo != NULL)); 1039 bfa_assert((cfg != NULL) && (meminfo != NULL));
75 1040
@@ -90,26 +1055,6 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
90 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len; 1055 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
91} 1056}
92 1057
93static void
94bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
95{
96 struct bfa_port_s *port = &bfa->modules.port;
97 uint32_t dm_len;
98 uint8_t *dm_kva;
99 uint64_t dm_pa;
100
101 dm_len = bfa_port_meminfo();
102 dm_kva = bfa_meminfo_dma_virt(mi);
103 dm_pa = bfa_meminfo_dma_phys(mi);
104
105 memset(port, 0, sizeof(struct bfa_port_s));
106 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod, bfa->logm);
107 bfa_port_mem_claim(port, dm_kva, dm_pa);
108
109 bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
110 bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
111}
112
113/** 1058/**
114 * Use this function to do attach the driver instance with the BFA 1059 * Use this function to do attach the driver instance with the BFA
115 * library. This function will not trigger any HW initialization 1060 * library. This function will not trigger any HW initialization
@@ -119,14 +1064,14 @@ bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
119 * pre-defined values within the BFA library 1064 * pre-defined values within the BFA library
120 * 1065 *
121 * @param[out] bfa Pointer to bfa_t. 1066 * @param[out] bfa Pointer to bfa_t.
122 * @param[in] bfad Opaque handle back to the driver's IOC structure 1067 * @param[in] bfad Opaque handle back to the driver's IOC structure
123 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure 1068 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure
124 * that was used in bfa_cfg_get_meminfo(). 1069 * that was used in bfa_cfg_get_meminfo().
125 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should 1070 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should
126 * use the bfa_cfg_get_meminfo() call to 1071 * use the bfa_cfg_get_meminfo() call to
127 * find the memory blocks required, allocate the 1072 * find the memory blocks required, allocate the
128 * required memory and provide the starting addresses. 1073 * required memory and provide the starting addresses.
129 * @param[in] pcidev pointer to struct bfa_pcidev_s 1074 * @param[in] pcidev pointer to struct bfa_pcidev_s
130 * 1075 *
131 * @return 1076 * @return
132 * void 1077 * void
@@ -140,8 +1085,8 @@ void
140bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 1085bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
141 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 1086 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
142{ 1087{
143 int i; 1088 int i;
144 struct bfa_mem_elem_s *melem; 1089 struct bfa_mem_elem_s *melem;
145 1090
146 bfa->fcs = BFA_FALSE; 1091 bfa->fcs = BFA_FALSE;
147 1092
@@ -195,20 +1140,6 @@ bfa_init_trc(struct bfa_s *bfa, struct bfa_trc_mod_s *trcmod)
195 bfa->trcmod = trcmod; 1140 bfa->trcmod = trcmod;
196} 1141}
197 1142
198
199void
200bfa_init_log(struct bfa_s *bfa, struct bfa_log_mod_s *logmod)
201{
202 bfa->logm = logmod;
203}
204
205
206void
207bfa_init_aen(struct bfa_s *bfa, struct bfa_aen_s *aen)
208{
209 bfa->aen = aen;
210}
211
212void 1143void
213bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog) 1144bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog)
214{ 1145{
@@ -254,14 +1185,14 @@ bfa_start(struct bfa_s *bfa)
254 1185
255/** 1186/**
256 * Use this function quiese the IOC. This function will return immediately, 1187 * Use this function quiese the IOC. This function will return immediately,
257 * when the IOC is actually stopped, the bfa_cb_stop() will be called. 1188 * when the IOC is actually stopped, the bfad->comp will be set.
258 * 1189 *
259 * @param[in] bfa - pointer to bfa_t. 1190 * @param[in]bfa - pointer to bfa_t.
260 * 1191 *
261 * @return None 1192 * @return None
262 * 1193 *
263 * Special Considerations: 1194 * Special Considerations:
264 * bfa_cb_stop() could be called before or after bfa_stop() returns. 1195 * bfad->comp can be set before or after bfa_stop() returns.
265 * 1196 *
266 * @note 1197 * @note
267 * In case of any failure, we could handle it automatically by doing a 1198 * In case of any failure, we could handle it automatically by doing a
@@ -283,9 +1214,9 @@ bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
283void 1214void
284bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q) 1215bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
285{ 1216{
286 struct list_head *qe; 1217 struct list_head *qe;
287 struct list_head *qen; 1218 struct list_head *qen;
288 struct bfa_cb_qe_s *hcb_qe; 1219 struct bfa_cb_qe_s *hcb_qe;
289 1220
290 list_for_each_safe(qe, qen, comp_q) { 1221 list_for_each_safe(qe, qen, comp_q) {
291 hcb_qe = (struct bfa_cb_qe_s *) qe; 1222 hcb_qe = (struct bfa_cb_qe_s *) qe;
@@ -296,8 +1227,8 @@ bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
296void 1227void
297bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q) 1228bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
298{ 1229{
299 struct list_head *qe; 1230 struct list_head *qe;
300 struct bfa_cb_qe_s *hcb_qe; 1231 struct bfa_cb_qe_s *hcb_qe;
301 1232
302 while (!list_empty(comp_q)) { 1233 while (!list_empty(comp_q)) {
303 bfa_q_deq(comp_q, &qe); 1234 bfa_q_deq(comp_q, &qe);
@@ -321,7 +1252,6 @@ bfa_timer_tick(struct bfa_s *bfa)
321 bfa_timer_beat(&bfa->timer_mod); 1252 bfa_timer_beat(&bfa->timer_mod);
322} 1253}
323 1254
324#ifndef BFA_BIOS_BUILD
325/** 1255/**
326 * Return the list of PCI vendor/device id lists supported by this 1256 * Return the list of PCI vendor/device id lists supported by this
327 * BFA instance. 1257 * BFA instance.
@@ -336,7 +1266,7 @@ bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
336 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC}, 1266 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
337 }; 1267 };
338 1268
339 *npciids = ARRAY_SIZE(__pciids); 1269 *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
340 *pciids = __pciids; 1270 *pciids = __pciids;
341} 1271}
342 1272
@@ -351,7 +1281,7 @@ bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
351 * void 1281 * void
352 * 1282 *
353 * Special Considerations: 1283 * Special Considerations:
354 * note 1284 * note
355 */ 1285 */
356void 1286void
357bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg) 1287bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
@@ -389,7 +1319,7 @@ bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
389 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; 1319 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
390 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN; 1320 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
391 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN; 1321 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
392 cfg->drvcfg.min_cfg = BFA_TRUE; 1322 cfg->drvcfg.min_cfg = BFA_TRUE;
393} 1323}
394 1324
395void 1325void
@@ -417,7 +1347,7 @@ bfa_debug_fwsave_clear(struct bfa_s *bfa)
417} 1347}
418 1348
419/** 1349/**
420 * Fetch firmware trace data. 1350 * Fetch firmware trace data.
421 * 1351 *
422 * @param[in] bfa BFA instance 1352 * @param[in] bfa BFA instance
423 * @param[out] trcdata Firmware trace buffer 1353 * @param[out] trcdata Firmware trace buffer
@@ -433,6 +1363,22 @@ bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen)
433} 1363}
434 1364
435/** 1365/**
1366 * Dump firmware memory.
1367 *
1368 * @param[in] bfa BFA instance
1369 * @param[out] buf buffer for dump
1370 * @param[in,out] offset smem offset to start read
1371 * @param[in,out] buflen length of buffer
1372 *
1373 * @retval BFA_STATUS_OK Firmware memory is dumped.
1374 * @retval BFA_STATUS_INPROGRESS Firmware memory dump is in progress.
1375 */
1376bfa_status_t
1377bfa_debug_fwcore(struct bfa_s *bfa, void *buf, u32 *offset, int *buflen)
1378{
1379 return bfa_ioc_debug_fwcore(&bfa->ioc, buf, offset, buflen);
1380}
1381/**
436 * Reset hw semaphore & usage cnt regs and initialize. 1382 * Reset hw semaphore & usage cnt regs and initialize.
437 */ 1383 */
438void 1384void
@@ -441,4 +1387,23 @@ bfa_chip_reset(struct bfa_s *bfa)
441 bfa_ioc_ownership_reset(&bfa->ioc); 1387 bfa_ioc_ownership_reset(&bfa->ioc);
442 bfa_ioc_pll_init(&bfa->ioc); 1388 bfa_ioc_pll_init(&bfa->ioc);
443} 1389}
444#endif 1390
1391/**
1392 * Fetch firmware statistics data.
1393 *
1394 * @param[in] bfa BFA instance
1395 * @param[out] data Firmware stats buffer
1396 *
1397 * @retval BFA_STATUS_OK Firmware trace is fetched.
1398 */
1399bfa_status_t
1400bfa_fw_stats_get(struct bfa_s *bfa, void *data)
1401{
1402 return bfa_ioc_fw_stats_get(&bfa->ioc, data);
1403}
1404
1405bfa_status_t
1406bfa_fw_stats_clear(struct bfa_s *bfa)
1407{
1408 return bfa_ioc_fw_stats_clear(&bfa->ioc);
1409}
diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h
new file mode 100644
index 000000000000..7260c74620f8
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_cs.h
@@ -0,0 +1,364 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_cs.h BFA common services
20 */
21
22#ifndef __BFA_CS_H__
23#define __BFA_CS_H__
24
25#include "bfa_os_inc.h"
26
27/**
28 * BFA TRC
29 */
30
31#ifndef BFA_TRC_MAX
32#define BFA_TRC_MAX (4 * 1024)
33#endif
34
35#ifndef BFA_TRC_TS
36#define BFA_TRC_TS(_trcm) ((_trcm)->ticks++)
37#endif
38
39struct bfa_trc_s {
40#ifdef __BIGENDIAN
41 u16 fileno;
42 u16 line;
43#else
44 u16 line;
45 u16 fileno;
46#endif
47 u32 timestamp;
48 union {
49 struct {
50 u32 rsvd;
51 u32 u32;
52 } u32;
53 u64 u64;
54 } data;
55};
56
57struct bfa_trc_mod_s {
58 u32 head;
59 u32 tail;
60 u32 ntrc;
61 u32 stopped;
62 u32 ticks;
63 u32 rsvd[3];
64 struct bfa_trc_s trc[BFA_TRC_MAX];
65};
66
67enum {
68 BFA_TRC_HAL = 1, /* BFA modules */
69 BFA_TRC_FCS = 2, /* BFA FCS modules */
70 BFA_TRC_LDRV = 3, /* Linux driver modules */
71 BFA_TRC_CNA = 4, /* Common modules */
72};
73#define BFA_TRC_MOD_SH 10
74#define BFA_TRC_MOD(__mod) ((BFA_TRC_ ## __mod) << BFA_TRC_MOD_SH)
75
76/**
77 * Define a new tracing file (module). Module should match one defined above.
78 */
79#define BFA_TRC_FILE(__mod, __submod) \
80 static int __trc_fileno = ((BFA_TRC_ ## __mod ## _ ## __submod) | \
81 BFA_TRC_MOD(__mod))
82
83
84#define bfa_trc32(_trcp, _data) \
85 __bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u32)_data)
86#define bfa_trc(_trcp, _data) \
87 __bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u64)_data)
88
89static inline void
90bfa_trc_init(struct bfa_trc_mod_s *trcm)
91{
92 trcm->head = trcm->tail = trcm->stopped = 0;
93 trcm->ntrc = BFA_TRC_MAX;
94}
95
96static inline void
97bfa_trc_stop(struct bfa_trc_mod_s *trcm)
98{
99 trcm->stopped = 1;
100}
101
102#ifdef FWTRC
103extern void dc_flush(void *data);
104#else
105#define dc_flush(data)
106#endif
107
108
109static inline void
110__bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data)
111{
112 int tail = trcm->tail;
113 struct bfa_trc_s *trc = &trcm->trc[tail];
114
115 if (trcm->stopped)
116 return;
117
118 trc->fileno = (u16) fileno;
119 trc->line = (u16) line;
120 trc->data.u64 = data;
121 trc->timestamp = BFA_TRC_TS(trcm);
122 dc_flush(trc);
123
124 trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
125 if (trcm->tail == trcm->head)
126 trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
127 dc_flush(trcm);
128}
129
130
131static inline void
132__bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
133{
134 int tail = trcm->tail;
135 struct bfa_trc_s *trc = &trcm->trc[tail];
136
137 if (trcm->stopped)
138 return;
139
140 trc->fileno = (u16) fileno;
141 trc->line = (u16) line;
142 trc->data.u32.u32 = data;
143 trc->timestamp = BFA_TRC_TS(trcm);
144 dc_flush(trc);
145
146 trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
147 if (trcm->tail == trcm->head)
148 trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
149 dc_flush(trcm);
150}
151
152#ifndef BFA_PERF_BUILD
153#define bfa_trc_fp(_trcp, _data) bfa_trc(_trcp, _data)
154#else
155#define bfa_trc_fp(_trcp, _data)
156#endif
157
158/**
159 * @ BFA LOG interfaces
160 */
161#define bfa_assert(__cond) do { \
162 if (!(__cond)) { \
163 printk(KERN_ERR "assert(%s) failed at %s:%d\\n", \
164 #__cond, __FILE__, __LINE__); \
165 } \
166} while (0)
167
168#define bfa_sm_fault(__mod, __event) do { \
169 bfa_trc(__mod, (((u32)0xDEAD << 16) | __event)); \
170 printk(KERN_ERR "Assertion failure: %s:%d: %d", \
171 __FILE__, __LINE__, (__event)); \
172} while (0)
173
174#ifndef BFA_PERF_BUILD
175#define bfa_assert_fp(__cond) bfa_assert(__cond)
176#else
177#define bfa_assert_fp(__cond)
178#endif
179
180/* BFA queue definitions */
181#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
182#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
183#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev)
184
185/*
186 * bfa_q_qe_init - to initialize a queue element
187 */
188#define bfa_q_qe_init(_qe) { \
189 bfa_q_next(_qe) = (struct list_head *) NULL; \
190 bfa_q_prev(_qe) = (struct list_head *) NULL; \
191}
192
193/*
194 * bfa_q_deq - dequeue an element from head of the queue
195 */
196#define bfa_q_deq(_q, _qe) { \
197 if (!list_empty(_q)) { \
198 (*((struct list_head **) (_qe))) = bfa_q_next(_q); \
199 bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \
200 (struct list_head *) (_q); \
201 bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe));\
202 BFA_Q_DBG_INIT(*((struct list_head **) _qe)); \
203 } else { \
204 *((struct list_head **) (_qe)) = (struct list_head *) NULL;\
205 } \
206}
207
208/*
209 * bfa_q_deq_tail - dequeue an element from tail of the queue
210 */
211#define bfa_q_deq_tail(_q, _qe) { \
212 if (!list_empty(_q)) { \
213 *((struct list_head **) (_qe)) = bfa_q_prev(_q); \
214 bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) = \
215 (struct list_head *) (_q); \
216 bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\
217 BFA_Q_DBG_INIT(*((struct list_head **) _qe)); \
218 } else { \
219 *((struct list_head **) (_qe)) = (struct list_head *) NULL;\
220 } \
221}
222
223static inline int
224bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
225{
226 struct list_head *tqe;
227
228 tqe = bfa_q_next(q);
229 while (tqe != q) {
230 if (tqe == qe)
231 return 1;
232 tqe = bfa_q_next(tqe);
233 if (tqe == NULL)
234 break;
235 }
236 return 0;
237}
238
239/*
240 * #ifdef BFA_DEBUG (Using bfa_assert to check for debug_build is not
241 * consistent across modules)
242 */
243#ifndef BFA_PERF_BUILD
244#define BFA_Q_DBG_INIT(_qe) bfa_q_qe_init(_qe)
245#else
246#define BFA_Q_DBG_INIT(_qe)
247#endif
248
249#define bfa_q_is_on_q(_q, _qe) \
250 bfa_q_is_on_q_func(_q, (struct list_head *)(_qe))
251
252/**
253 * @ BFA state machine interfaces
254 */
255
256typedef void (*bfa_sm_t)(void *sm, int event);
257
258/**
259 * oc - object class eg. bfa_ioc
260 * st - state, eg. reset
261 * otype - object type, eg. struct bfa_ioc_s
262 * etype - object type, eg. enum ioc_event
263 */
264#define bfa_sm_state_decl(oc, st, otype, etype) \
265 static void oc ## _sm_ ## st(otype * fsm, etype event)
266
267#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state))
268#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event)))
269#define bfa_sm_get_state(_sm) ((_sm)->sm)
270#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
271
272/**
273 * For converting from state machine function to state encoding.
274 */
275struct bfa_sm_table_s {
276 bfa_sm_t sm; /* state machine function */
277 int state; /* state machine encoding */
278 char *name; /* state name for display */
279};
280#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
281
282/**
283 * State machine with entry actions.
284 */
285typedef void (*bfa_fsm_t)(void *fsm, int event);
286
287/**
288 * oc - object class eg. bfa_ioc
289 * st - state, eg. reset
290 * otype - object type, eg. struct bfa_ioc_s
291 * etype - object type, eg. enum ioc_event
292 */
293#define bfa_fsm_state_decl(oc, st, otype, etype) \
294 static void oc ## _sm_ ## st(otype * fsm, etype event); \
295 static void oc ## _sm_ ## st ## _entry(otype * fsm)
296
297#define bfa_fsm_set_state(_fsm, _state) do { \
298 (_fsm)->fsm = (bfa_fsm_t)(_state); \
299 _state ## _entry(_fsm); \
300} while (0)
301
302#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event)))
303#define bfa_fsm_get_state(_fsm) ((_fsm)->fsm)
304#define bfa_fsm_cmp_state(_fsm, _state) \
305 ((_fsm)->fsm == (bfa_fsm_t)(_state))
306
307static inline int
308bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm)
309{
310 int i = 0;
311
312 while (smt[i].sm && smt[i].sm != sm)
313 i++;
314 return smt[i].state;
315}
316
317/**
318 * @ Generic wait counter.
319 */
320
321typedef void (*bfa_wc_resume_t) (void *cbarg);
322
323struct bfa_wc_s {
324 bfa_wc_resume_t wc_resume;
325 void *wc_cbarg;
326 int wc_count;
327};
328
329static inline void
330bfa_wc_up(struct bfa_wc_s *wc)
331{
332 wc->wc_count++;
333}
334
335static inline void
336bfa_wc_down(struct bfa_wc_s *wc)
337{
338 wc->wc_count--;
339 if (wc->wc_count == 0)
340 wc->wc_resume(wc->wc_cbarg);
341}
342
343/**
344 * Initialize a waiting counter.
345 */
346static inline void
347bfa_wc_init(struct bfa_wc_s *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
348{
349 wc->wc_resume = wc_resume;
350 wc->wc_cbarg = wc_cbarg;
351 wc->wc_count = 0;
352 bfa_wc_up(wc);
353}
354
355/**
356 * Wait for counter to reach zero
357 */
358static inline void
359bfa_wc_wait(struct bfa_wc_s *wc)
360{
361 bfa_wc_down(wc);
362}
363
364#endif /* __BFA_CS_H__ */
diff --git a/drivers/scsi/bfa/bfa_csdebug.c b/drivers/scsi/bfa/bfa_csdebug.c
deleted file mode 100644
index caeb1143a4e6..000000000000
--- a/drivers/scsi/bfa/bfa_csdebug.c
+++ /dev/null
@@ -1,58 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <cs/bfa_debug.h>
19#include <bfa_os_inc.h>
20#include <cs/bfa_q.h>
21#include <log/bfa_log_hal.h>
22
23/**
24 * cs_debug_api
25 */
26
27
28void
29bfa_panic(int line, char *file, char *panicstr)
30{
31 bfa_log(NULL, BFA_LOG_HAL_ASSERT, file, line, panicstr);
32 bfa_os_panic();
33}
34
35void
36bfa_sm_panic(struct bfa_log_mod_s *logm, int line, char *file, int event)
37{
38 bfa_log(logm, BFA_LOG_HAL_SM_ASSERT, file, line, event);
39 bfa_os_panic();
40}
41
42int
43bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
44{
45 struct list_head *tqe;
46
47 tqe = bfa_q_next(q);
48 while (tqe != q) {
49 if (tqe == qe)
50 return 1;
51 tqe = bfa_q_next(tqe);
52 if (tqe == NULL)
53 break;
54 }
55 return 0;
56}
57
58
diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h
new file mode 100644
index 000000000000..d49877ff5140
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_defs.h
@@ -0,0 +1,466 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_H__
19#define __BFA_DEFS_H__
20
21#include "bfa_fc.h"
22#include "bfa_os_inc.h"
23
24#define BFA_MFG_SERIALNUM_SIZE 11
25#define STRSZ(_n) (((_n) + 4) & ~3)
26
27/**
28 * Manufacturing card type
29 */
30enum {
31 BFA_MFG_TYPE_CB_MAX = 825, /* Crossbow card type max */
32 BFA_MFG_TYPE_FC8P2 = 825, /* 8G 2port FC card */
33 BFA_MFG_TYPE_FC8P1 = 815, /* 8G 1port FC card */
34 BFA_MFG_TYPE_FC4P2 = 425, /* 4G 2port FC card */
35 BFA_MFG_TYPE_FC4P1 = 415, /* 4G 1port FC card */
36 BFA_MFG_TYPE_CNA10P2 = 1020, /* 10G 2port CNA card */
37 BFA_MFG_TYPE_CNA10P1 = 1010, /* 10G 1port CNA card */
38 BFA_MFG_TYPE_JAYHAWK = 804, /* Jayhawk mezz card */
39 BFA_MFG_TYPE_WANCHESE = 1007, /* Wanchese mezz card */
40 BFA_MFG_TYPE_ASTRA = 807, /* Astra mezz card */
41 BFA_MFG_TYPE_LIGHTNING_P0 = 902, /* Lightning mezz card - old */
42 BFA_MFG_TYPE_LIGHTNING = 1741, /* Lightning mezz card */
43 BFA_MFG_TYPE_INVALID = 0, /* Invalid card type */
44};
45
46#pragma pack(1)
47
48/**
49 * Check if Mezz card
50 */
51#define bfa_mfg_is_mezz(type) (( \
52 (type) == BFA_MFG_TYPE_JAYHAWK || \
53 (type) == BFA_MFG_TYPE_WANCHESE || \
54 (type) == BFA_MFG_TYPE_ASTRA || \
55 (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \
56 (type) == BFA_MFG_TYPE_LIGHTNING))
57
58/**
59 * Check if the card having old wwn/mac handling
60 */
61#define bfa_mfg_is_old_wwn_mac_model(type) (( \
62 (type) == BFA_MFG_TYPE_FC8P2 || \
63 (type) == BFA_MFG_TYPE_FC8P1 || \
64 (type) == BFA_MFG_TYPE_FC4P2 || \
65 (type) == BFA_MFG_TYPE_FC4P1 || \
66 (type) == BFA_MFG_TYPE_CNA10P2 || \
67 (type) == BFA_MFG_TYPE_CNA10P1 || \
68 (type) == BFA_MFG_TYPE_JAYHAWK || \
69 (type) == BFA_MFG_TYPE_WANCHESE))
70
71#define bfa_mfg_increment_wwn_mac(m, i) \
72do { \
73 u32 t = ((u32)(m)[0] << 16) | ((u32)(m)[1] << 8) | \
74 (u32)(m)[2]; \
75 t += (i); \
76 (m)[0] = (t >> 16) & 0xFF; \
77 (m)[1] = (t >> 8) & 0xFF; \
78 (m)[2] = t & 0xFF; \
79} while (0)
80
81/**
82 * VPD data length
83 */
84#define BFA_MFG_VPD_LEN 512
85
86/**
87 * VPD vendor tag
88 */
89enum {
90 BFA_MFG_VPD_UNKNOWN = 0, /* vendor unknown */
91 BFA_MFG_VPD_IBM = 1, /* vendor IBM */
92 BFA_MFG_VPD_HP = 2, /* vendor HP */
93 BFA_MFG_VPD_DELL = 3, /* vendor DELL */
94 BFA_MFG_VPD_PCI_IBM = 0x08, /* PCI VPD IBM */
95 BFA_MFG_VPD_PCI_HP = 0x10, /* PCI VPD HP */
96 BFA_MFG_VPD_PCI_DELL = 0x20, /* PCI VPD DELL */
97 BFA_MFG_VPD_PCI_BRCD = 0xf8, /* PCI VPD Brocade */
98};
99
100/**
101 * All numerical fields are in big-endian format.
102 */
103struct bfa_mfg_vpd_s {
104 u8 version; /* vpd data version */
105 u8 vpd_sig[3]; /* characters 'V', 'P', 'D' */
106 u8 chksum; /* u8 checksum */
107 u8 vendor; /* vendor */
108 u8 len; /* vpd data length excluding header */
109 u8 rsv;
110 u8 data[BFA_MFG_VPD_LEN]; /* vpd data */
111};
112
113#pragma pack()
114
115/**
116 * Status return values
117 */
118enum bfa_status {
119 BFA_STATUS_OK = 0, /* Success */
120 BFA_STATUS_FAILED = 1, /* Operation failed */
121 BFA_STATUS_EINVAL = 2, /* Invalid params Check input
122 * parameters */
123 BFA_STATUS_ENOMEM = 3, /* Out of resources */
124 BFA_STATUS_ETIMER = 5, /* Timer expired - Retry, if persists,
125 * contact support */
126 BFA_STATUS_EPROTOCOL = 6, /* Protocol error */
127 BFA_STATUS_DEVBUSY = 13, /* Device busy - Retry operation */
128 BFA_STATUS_UNKNOWN_LWWN = 18, /* LPORT PWWN not found */
129 BFA_STATUS_UNKNOWN_RWWN = 19, /* RPORT PWWN not found */
130 BFA_STATUS_VPORT_EXISTS = 21, /* VPORT already exists */
131 BFA_STATUS_VPORT_MAX = 22, /* Reached max VPORT supported limit */
132 BFA_STATUS_UNSUPP_SPEED = 23, /* Invalid Speed Check speed setting */
133 BFA_STATUS_INVLD_DFSZ = 24, /* Invalid Max data field size */
134 BFA_STATUS_FABRIC_RJT = 29, /* Reject from attached fabric */
135 BFA_STATUS_VPORT_WWN_BP = 46, /* WWN is same as base port's WWN */
136 BFA_STATUS_NO_FCPIM_NEXUS = 52, /* No FCP Nexus exists with the rport */
137 BFA_STATUS_IOC_FAILURE = 56, /* IOC failure - Retry, if persists
138 * contact support */
139 BFA_STATUS_INVALID_WWN = 57, /* Invalid WWN */
140 BFA_STATUS_DIAG_BUSY = 71, /* diag busy */
141 BFA_STATUS_ENOFSAVE = 78, /* No saved firmware trace */
142 BFA_STATUS_IOC_DISABLED = 82, /* IOC is already disabled */
143 BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */
144 BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot
145 * configuration */
146 BFA_STATUS_TRUNK_ENABLED = 164, /* Trunk is already enabled on
147 * this adapter */
148 BFA_STATUS_TRUNK_DISABLED = 165, /* Trunking is disabled on
149 * the adapter */
150 BFA_STATUS_IOPROFILE_OFF = 175, /* IO profile OFF */
151 BFA_STATUS_MAX_VAL /* Unknown error code */
152};
153#define bfa_status_t enum bfa_status
154
155enum bfa_eproto_status {
156 BFA_EPROTO_BAD_ACCEPT = 0,
157 BFA_EPROTO_UNKNOWN_RSP = 1
158};
159#define bfa_eproto_status_t enum bfa_eproto_status
160
161enum bfa_boolean {
162 BFA_FALSE = 0,
163 BFA_TRUE = 1
164};
165#define bfa_boolean_t enum bfa_boolean
166
167#define BFA_STRING_32 32
168#define BFA_VERSION_LEN 64
169
170/**
171 * ---------------------- adapter definitions ------------
172 */
173
174/**
175 * BFA adapter level attributes.
176 */
177enum {
178 BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE),
179 /*
180 *!< adapter serial num length
181 */
182 BFA_ADAPTER_MODEL_NAME_LEN = 16, /* model name length */
183 BFA_ADAPTER_MODEL_DESCR_LEN = 128, /* model description length */
184 BFA_ADAPTER_MFG_NAME_LEN = 8, /* manufacturer name length */
185 BFA_ADAPTER_SYM_NAME_LEN = 64, /* adapter symbolic name length */
186 BFA_ADAPTER_OS_TYPE_LEN = 64, /* adapter os type length */
187};
188
189struct bfa_adapter_attr_s {
190 char manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
191 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
192 u32 card_type;
193 char model[BFA_ADAPTER_MODEL_NAME_LEN];
194 char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
195 wwn_t pwwn;
196 char node_symname[FC_SYMNAME_MAX];
197 char hw_ver[BFA_VERSION_LEN];
198 char fw_ver[BFA_VERSION_LEN];
199 char optrom_ver[BFA_VERSION_LEN];
200 char os_type[BFA_ADAPTER_OS_TYPE_LEN];
201 struct bfa_mfg_vpd_s vpd;
202 struct mac_s mac;
203
204 u8 nports;
205 u8 max_speed;
206 u8 prototype;
207 char asic_rev;
208
209 u8 pcie_gen;
210 u8 pcie_lanes_orig;
211 u8 pcie_lanes;
212 u8 cna_capable;
213
214 u8 is_mezz;
215 u8 trunk_capable;
216};
217
218/**
219 * ---------------------- IOC definitions ------------
220 */
221
222enum {
223 BFA_IOC_DRIVER_LEN = 16,
224 BFA_IOC_CHIP_REV_LEN = 8,
225};
226
227/**
228 * Driver and firmware versions.
229 */
230struct bfa_ioc_driver_attr_s {
231 char driver[BFA_IOC_DRIVER_LEN]; /* driver name */
232 char driver_ver[BFA_VERSION_LEN]; /* driver version */
233 char fw_ver[BFA_VERSION_LEN]; /* firmware version */
234 char bios_ver[BFA_VERSION_LEN]; /* bios version */
235 char efi_ver[BFA_VERSION_LEN]; /* EFI version */
236 char ob_ver[BFA_VERSION_LEN]; /* openboot version */
237};
238
239/**
240 * IOC PCI device attributes
241 */
242struct bfa_ioc_pci_attr_s {
243 u16 vendor_id; /* PCI vendor ID */
244 u16 device_id; /* PCI device ID */
245 u16 ssid; /* subsystem ID */
246 u16 ssvid; /* subsystem vendor ID */
247 u32 pcifn; /* PCI device function */
248 u32 rsvd; /* padding */
249 char chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */
250};
251
252/**
253 * IOC states
254 */
255enum bfa_ioc_state {
256 BFA_IOC_UNINIT = 1, /* IOC is in uninit state */
257 BFA_IOC_RESET = 2, /* IOC is in reset state */
258 BFA_IOC_SEMWAIT = 3, /* Waiting for IOC h/w semaphore */
259 BFA_IOC_HWINIT = 4, /* IOC h/w is being initialized */
260 BFA_IOC_GETATTR = 5, /* IOC is being configured */
261 BFA_IOC_OPERATIONAL = 6, /* IOC is operational */
262 BFA_IOC_INITFAIL = 7, /* IOC hardware failure */
263 BFA_IOC_FAIL = 8, /* IOC heart-beat failure */
264 BFA_IOC_DISABLING = 9, /* IOC is being disabled */
265 BFA_IOC_DISABLED = 10, /* IOC is disabled */
266 BFA_IOC_FWMISMATCH = 11, /* IOC f/w different from drivers */
267 BFA_IOC_ENABLING = 12, /* IOC is being enabled */
268};
269
270/**
271 * IOC firmware stats
272 */
273struct bfa_fw_ioc_stats_s {
274 u32 enable_reqs;
275 u32 disable_reqs;
276 u32 get_attr_reqs;
277 u32 dbg_sync;
278 u32 dbg_dump;
279 u32 unknown_reqs;
280};
281
282/**
283 * IOC driver stats
284 */
285struct bfa_ioc_drv_stats_s {
286 u32 ioc_isrs;
287 u32 ioc_enables;
288 u32 ioc_disables;
289 u32 ioc_hbfails;
290 u32 ioc_boots;
291 u32 stats_tmos;
292 u32 hb_count;
293 u32 disable_reqs;
294 u32 enable_reqs;
295 u32 disable_replies;
296 u32 enable_replies;
297};
298
299/**
300 * IOC statistics
301 */
302struct bfa_ioc_stats_s {
303 struct bfa_ioc_drv_stats_s drv_stats; /* driver IOC stats */
304 struct bfa_fw_ioc_stats_s fw_stats; /* firmware IOC stats */
305};
306
307enum bfa_ioc_type_e {
308 BFA_IOC_TYPE_FC = 1,
309 BFA_IOC_TYPE_FCoE = 2,
310 BFA_IOC_TYPE_LL = 3,
311};
312
313/**
314 * IOC attributes returned in queries
315 */
316struct bfa_ioc_attr_s {
317 enum bfa_ioc_type_e ioc_type;
318 enum bfa_ioc_state state; /* IOC state */
319 struct bfa_adapter_attr_s adapter_attr; /* HBA attributes */
320 struct bfa_ioc_driver_attr_s driver_attr; /* driver attr */
321 struct bfa_ioc_pci_attr_s pci_attr;
322 u8 port_id; /* port number */
323 u8 rsvd[7]; /* 64bit align */
324};
325
326/**
327 * ---------------------- mfg definitions ------------
328 */
329
330/**
331 * Checksum size
332 */
333#define BFA_MFG_CHKSUM_SIZE 16
334
335#define BFA_MFG_PARTNUM_SIZE 14
336#define BFA_MFG_SUPPLIER_ID_SIZE 10
337#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20
338#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20
339#define BFA_MFG_SUPPLIER_REVISION_SIZE 4
340
341#pragma pack(1)
342
343/**
344 * All numerical fields are in big-endian format.
345 */
346struct bfa_mfg_block_s {
347 u8 version; /* manufacturing block version */
348 u8 mfg_sig[3]; /* characters 'M', 'F', 'G' */
349 u16 mfgsize; /* mfg block size */
350 u16 u16_chksum; /* old u16 checksum */
351 char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
352 char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
353 u8 mfg_day; /* manufacturing day */
354 u8 mfg_month; /* manufacturing month */
355 u16 mfg_year; /* manufacturing year */
356 wwn_t mfg_wwn; /* wwn base for this adapter */
357 u8 num_wwn; /* number of wwns assigned */
358 u8 mfg_speeds; /* speeds allowed for this adapter */
359 u8 rsv[2];
360 char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
361 char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
362 char
363 supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
364 char
365 supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
366 mac_t mfg_mac; /* mac address */
367 u8 num_mac; /* number of mac addresses */
368 u8 rsv2;
369 u32 mfg_type; /* card type */
370 u8 rsv3[108];
371 u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /* md5 checksum */
372};
373
374#pragma pack()
375
376/**
377 * ---------------------- pci definitions ------------
378 */
379
380/**
381 * PCI device and vendor ID information
382 */
383enum {
384 BFA_PCI_VENDOR_ID_BROCADE = 0x1657,
385 BFA_PCI_DEVICE_ID_FC_8G2P = 0x13,
386 BFA_PCI_DEVICE_ID_FC_8G1P = 0x17,
387 BFA_PCI_DEVICE_ID_CT = 0x14,
388 BFA_PCI_DEVICE_ID_CT_FC = 0x21,
389};
390
391#define bfa_asic_id_ct(devid) \
392 ((devid) == BFA_PCI_DEVICE_ID_CT || \
393 (devid) == BFA_PCI_DEVICE_ID_CT_FC)
394
395/**
396 * PCI sub-system device and vendor ID information
397 */
398enum {
399 BFA_PCI_FCOE_SSDEVICE_ID = 0x14,
400};
401
402/**
403 * Maximum number of device address ranges mapped through different BAR(s)
404 */
405#define BFA_PCI_ACCESS_RANGES 1
406
407/*
408 * Port speed settings. Each specific speed is a bit field. Use multiple
409 * bits to specify speeds to be selected for auto-negotiation.
410 */
411enum bfa_port_speed {
412 BFA_PORT_SPEED_UNKNOWN = 0,
413 BFA_PORT_SPEED_1GBPS = 1,
414 BFA_PORT_SPEED_2GBPS = 2,
415 BFA_PORT_SPEED_4GBPS = 4,
416 BFA_PORT_SPEED_8GBPS = 8,
417 BFA_PORT_SPEED_10GBPS = 10,
418 BFA_PORT_SPEED_16GBPS = 16,
419 BFA_PORT_SPEED_AUTO =
420 (BFA_PORT_SPEED_1GBPS | BFA_PORT_SPEED_2GBPS |
421 BFA_PORT_SPEED_4GBPS | BFA_PORT_SPEED_8GBPS),
422};
423#define bfa_port_speed_t enum bfa_port_speed
424
425enum {
426 BFA_BOOT_BOOTLUN_MAX = 4, /* maximum boot lun per IOC */
427 BFA_PREBOOT_BOOTLUN_MAX = 8, /* maximum preboot lun per IOC */
428};
429
430#define BOOT_CFG_REV1 1
431#define BOOT_CFG_VLAN 1
432
433/**
434 * Boot options setting. Boot options setting determines from where
435 * to get the boot lun information
436 */
437enum bfa_boot_bootopt {
438 BFA_BOOT_AUTO_DISCOVER = 0, /* Boot from blun provided by fabric */
439 BFA_BOOT_STORED_BLUN = 1, /* Boot from bluns stored in flash */
440 BFA_BOOT_FIRST_LUN = 2, /* Boot from first discovered blun */
441 BFA_BOOT_PBC = 3, /* Boot from pbc configured blun */
442};
443
444#pragma pack(1)
445/**
446 * Boot lun information.
447 */
448struct bfa_boot_bootlun_s {
449 wwn_t pwwn; /* port wwn of target */
450 lun_t lun; /* 64-bit lun */
451};
452#pragma pack()
453
454/**
455 * BOOT boot configuraton
456 */
457struct bfa_boot_pbc_s {
458 u8 enable; /* enable/disable SAN boot */
459 u8 speed; /* boot speed settings */
460 u8 topology; /* boot topology setting */
461 u8 rsvd1;
462 u32 nbluns; /* number of boot luns */
463 struct bfa_boot_bootlun_s pblun[BFA_PREBOOT_BOOTLUN_MAX];
464};
465
466#endif /* __BFA_DEFS_H__ */
diff --git a/drivers/scsi/bfa/bfa_defs_fcs.h b/drivers/scsi/bfa/bfa_defs_fcs.h
new file mode 100644
index 000000000000..96905d301828
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_defs_fcs.h
@@ -0,0 +1,457 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_FCS_H__
19#define __BFA_DEFS_FCS_H__
20
21#include "bfa_fc.h"
22#include "bfa_defs_svc.h"
23
24/**
25 * VF states
26 */
27enum bfa_vf_state {
28 BFA_VF_UNINIT = 0, /* fabric is not yet initialized */
29 BFA_VF_LINK_DOWN = 1, /* link is down */
30 BFA_VF_FLOGI = 2, /* flogi is in progress */
31 BFA_VF_AUTH = 3, /* authentication in progress */
32 BFA_VF_NOFABRIC = 4, /* fabric is not present */
33 BFA_VF_ONLINE = 5, /* login to fabric is complete */
34 BFA_VF_EVFP = 6, /* EVFP is in progress */
35 BFA_VF_ISOLATED = 7, /* port isolated due to vf_id mismatch */
36};
37
38/**
39 * VF statistics
40 */
41struct bfa_vf_stats_s {
42 u32 flogi_sent; /* Num FLOGIs sent */
43 u32 flogi_rsp_err; /* FLOGI response errors */
44 u32 flogi_acc_err; /* FLOGI accept errors */
45 u32 flogi_accepts; /* FLOGI accepts received */
46 u32 flogi_rejects; /* FLOGI rejects received */
47 u32 flogi_unknown_rsp; /* Unknown responses for FLOGI */
48 u32 flogi_alloc_wait; /* Allocation waits prior to sending FLOGI */
49 u32 flogi_rcvd; /* FLOGIs received */
50 u32 flogi_rejected; /* Incoming FLOGIs rejected */
51 u32 fabric_onlines; /* Internal fabric online notification sent
52 * to other modules */
53 u32 fabric_offlines; /* Internal fabric offline notification sent
54 * to other modules */
55 u32 resvd; /* padding for 64 bit alignment */
56};
57
58/**
59 * VF attributes returned in queries
60 */
61struct bfa_vf_attr_s {
62 enum bfa_vf_state state; /* VF state */
63 u32 rsvd;
64 wwn_t fabric_name; /* fabric name */
65};
66
67#define BFA_FCS_MAX_LPORTS 256
68#define BFA_FCS_FABRIC_IPADDR_SZ 16
69
70/**
71 * symbolic names for base port/virtual port
72 */
73#define BFA_SYMNAME_MAXLEN 128 /* 128 bytes */
74struct bfa_lport_symname_s {
75 char symname[BFA_SYMNAME_MAXLEN];
76};
77
78/**
79* Roles of FCS port:
80 * - FCP IM and FCP TM roles cannot be enabled together for a FCS port
81 * - Create multiple ports if both IM and TM functions required.
82 * - Atleast one role must be specified.
83 */
84enum bfa_lport_role {
85 BFA_LPORT_ROLE_FCP_IM = 0x01, /* FCP initiator role */
86 BFA_LPORT_ROLE_FCP_MAX = BFA_LPORT_ROLE_FCP_IM,
87};
88
89/**
90 * FCS port configuration.
91 */
92struct bfa_lport_cfg_s {
93 wwn_t pwwn; /* port wwn */
94 wwn_t nwwn; /* node wwn */
95 struct bfa_lport_symname_s sym_name; /* vm port symbolic name */
96 bfa_boolean_t preboot_vp; /* vport created from PBC */
97 enum bfa_lport_role roles; /* FCS port roles */
98 u8 tag[16]; /* opaque tag from application */
99};
100
101/**
102 * FCS port states
103 */
104enum bfa_lport_state {
105 BFA_LPORT_UNINIT = 0, /* PORT is not yet initialized */
106 BFA_LPORT_FDISC = 1, /* FDISC is in progress */
107 BFA_LPORT_ONLINE = 2, /* login to fabric is complete */
108 BFA_LPORT_OFFLINE = 3, /* No login to fabric */
109};
110
111/**
112 * FCS port type.
113 */
114enum bfa_lport_type {
115 BFA_LPORT_TYPE_PHYSICAL = 0,
116 BFA_LPORT_TYPE_VIRTUAL,
117};
118
119/**
120 * FCS port offline reason.
121 */
122enum bfa_lport_offline_reason {
123 BFA_LPORT_OFFLINE_UNKNOWN = 0,
124 BFA_LPORT_OFFLINE_LINKDOWN,
125 BFA_LPORT_OFFLINE_FAB_UNSUPPORTED, /* NPIV not supported by the
126 * fabric */
127 BFA_LPORT_OFFLINE_FAB_NORESOURCES,
128 BFA_LPORT_OFFLINE_FAB_LOGOUT,
129};
130
131/**
132 * FCS lport info.
133 */
134struct bfa_lport_info_s {
135 u8 port_type; /* bfa_lport_type_t : physical or
136 * virtual */
137 u8 port_state; /* one of bfa_lport_state values */
138 u8 offline_reason; /* one of bfa_lport_offline_reason_t
139 * values */
140 wwn_t port_wwn;
141 wwn_t node_wwn;
142
143 /*
144 * following 4 feilds are valid for Physical Ports only
145 */
146 u32 max_vports_supp; /* Max supported vports */
147 u32 num_vports_inuse; /* Num of in use vports */
148 u32 max_rports_supp; /* Max supported rports */
149 u32 num_rports_inuse; /* Num of doscovered rports */
150
151};
152
153/**
154 * FCS port statistics
155 */
156struct bfa_lport_stats_s {
157 u32 ns_plogi_sent;
158 u32 ns_plogi_rsp_err;
159 u32 ns_plogi_acc_err;
160 u32 ns_plogi_accepts;
161 u32 ns_rejects; /* NS command rejects */
162 u32 ns_plogi_unknown_rsp;
163 u32 ns_plogi_alloc_wait;
164
165 u32 ns_retries; /* NS command retries */
166 u32 ns_timeouts; /* NS command timeouts */
167
168 u32 ns_rspnid_sent;
169 u32 ns_rspnid_accepts;
170 u32 ns_rspnid_rsp_err;
171 u32 ns_rspnid_rejects;
172 u32 ns_rspnid_alloc_wait;
173
174 u32 ns_rftid_sent;
175 u32 ns_rftid_accepts;
176 u32 ns_rftid_rsp_err;
177 u32 ns_rftid_rejects;
178 u32 ns_rftid_alloc_wait;
179
180 u32 ns_rffid_sent;
181 u32 ns_rffid_accepts;
182 u32 ns_rffid_rsp_err;
183 u32 ns_rffid_rejects;
184 u32 ns_rffid_alloc_wait;
185
186 u32 ns_gidft_sent;
187 u32 ns_gidft_accepts;
188 u32 ns_gidft_rsp_err;
189 u32 ns_gidft_rejects;
190 u32 ns_gidft_unknown_rsp;
191 u32 ns_gidft_alloc_wait;
192
193 /*
194 * Mgmt Server stats
195 */
196 u32 ms_retries; /* MS command retries */
197 u32 ms_timeouts; /* MS command timeouts */
198 u32 ms_plogi_sent;
199 u32 ms_plogi_rsp_err;
200 u32 ms_plogi_acc_err;
201 u32 ms_plogi_accepts;
202 u32 ms_rejects; /* MS command rejects */
203 u32 ms_plogi_unknown_rsp;
204 u32 ms_plogi_alloc_wait;
205
206 u32 num_rscn; /* Num of RSCN received */
207 u32 num_portid_rscn;/* Num portid format RSCN
208 * received */
209
210 u32 uf_recvs; /* Unsolicited recv frames */
211 u32 uf_recv_drops; /* Dropped received frames */
212
213 u32 plogi_rcvd; /* Received plogi */
214 u32 prli_rcvd; /* Received prli */
215 u32 adisc_rcvd; /* Received adisc */
216 u32 prlo_rcvd; /* Received prlo */
217 u32 logo_rcvd; /* Received logo */
218 u32 rpsc_rcvd; /* Received rpsc */
219 u32 un_handled_els_rcvd; /* Received unhandled ELS */
220 u32 rport_plogi_timeouts; /* Rport plogi retry timeout count */
221 u32 rport_del_max_plogi_retry; /* Deleted rport
222 * (max retry of plogi) */
223};
224
225/**
226 * BFA port attribute returned in queries
227 */
228struct bfa_lport_attr_s {
229 enum bfa_lport_state state; /* port state */
230 u32 pid; /* port ID */
231 struct bfa_lport_cfg_s port_cfg; /* port configuration */
232 enum bfa_port_type port_type; /* current topology */
233 u32 loopback; /* cable is externally looped back */
234 wwn_t fabric_name; /* attached switch's nwwn */
235 u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ]; /* attached
236 * fabric's ip addr */
237 mac_t fpma_mac; /* Lport's FPMA Mac address */
238 u16 authfail; /* auth failed state */
239};
240
241
242/**
243 * VPORT states
244 */
245enum bfa_vport_state {
246 BFA_FCS_VPORT_UNINIT = 0,
247 BFA_FCS_VPORT_CREATED = 1,
248 BFA_FCS_VPORT_OFFLINE = 1,
249 BFA_FCS_VPORT_FDISC_SEND = 2,
250 BFA_FCS_VPORT_FDISC = 3,
251 BFA_FCS_VPORT_FDISC_RETRY = 4,
252 BFA_FCS_VPORT_ONLINE = 5,
253 BFA_FCS_VPORT_DELETING = 6,
254 BFA_FCS_VPORT_CLEANUP = 6,
255 BFA_FCS_VPORT_LOGO_SEND = 7,
256 BFA_FCS_VPORT_LOGO = 8,
257 BFA_FCS_VPORT_ERROR = 9,
258 BFA_FCS_VPORT_MAX_STATE,
259};
260
261/**
262 * vport statistics
263 */
264struct bfa_vport_stats_s {
265 struct bfa_lport_stats_s port_stats; /* base class (port) stats */
266 /*
267 * TODO - remove
268 */
269
270 u32 fdisc_sent; /* num fdisc sent */
271 u32 fdisc_accepts; /* fdisc accepts */
272 u32 fdisc_retries; /* fdisc retries */
273 u32 fdisc_timeouts; /* fdisc timeouts */
274 u32 fdisc_rsp_err; /* fdisc response error */
275 u32 fdisc_acc_bad; /* bad fdisc accepts */
276 u32 fdisc_rejects; /* fdisc rejects */
277 u32 fdisc_unknown_rsp;
278 /*
279 *!< fdisc rsp unknown error
280 */
281 u32 fdisc_alloc_wait;/* fdisc req (fcxp)alloc wait */
282
283 u32 logo_alloc_wait;/* logo req (fcxp) alloc wait */
284 u32 logo_sent; /* logo sent */
285 u32 logo_accepts; /* logo accepts */
286 u32 logo_rejects; /* logo rejects */
287 u32 logo_rsp_err; /* logo rsp errors */
288 u32 logo_unknown_rsp;
289 /* logo rsp unknown errors */
290
291 u32 fab_no_npiv; /* fabric does not support npiv */
292
293 u32 fab_offline; /* offline events from fab SM */
294 u32 fab_online; /* online events from fab SM */
295 u32 fab_cleanup; /* cleanup request from fab SM */
296 u32 rsvd;
297};
298
299/**
300 * BFA vport attribute returned in queries
301 */
302struct bfa_vport_attr_s {
303 struct bfa_lport_attr_s port_attr; /* base class (port) attributes */
304 enum bfa_vport_state vport_state; /* vport state */
305 u32 rsvd;
306};
307
308/**
309 * FCS remote port states
310 */
311enum bfa_rport_state {
312 BFA_RPORT_UNINIT = 0, /* PORT is not yet initialized */
313 BFA_RPORT_OFFLINE = 1, /* rport is offline */
314 BFA_RPORT_PLOGI = 2, /* PLOGI to rport is in progress */
315 BFA_RPORT_ONLINE = 3, /* login to rport is complete */
316 BFA_RPORT_PLOGI_RETRY = 4, /* retrying login to rport */
317 BFA_RPORT_NSQUERY = 5, /* nameserver query */
318 BFA_RPORT_ADISC = 6, /* ADISC authentication */
319 BFA_RPORT_LOGO = 7, /* logging out with rport */
320 BFA_RPORT_LOGORCV = 8, /* handling LOGO from rport */
321 BFA_RPORT_NSDISC = 9, /* re-discover rport */
322};
323
324/**
325 * Rport Scsi Function : Initiator/Target.
326 */
327enum bfa_rport_function {
328 BFA_RPORT_INITIATOR = 0x01, /* SCSI Initiator */
329 BFA_RPORT_TARGET = 0x02, /* SCSI Target */
330};
331
332/**
333 * port/node symbolic names for rport
334 */
335#define BFA_RPORT_SYMNAME_MAXLEN 255
336struct bfa_rport_symname_s {
337 char symname[BFA_RPORT_SYMNAME_MAXLEN];
338};
339
340/**
341 * FCS remote port statistics
342 */
343struct bfa_rport_stats_s {
344 u32 offlines; /* remote port offline count */
345 u32 onlines; /* remote port online count */
346 u32 rscns; /* RSCN affecting rport */
347 u32 plogis; /* plogis sent */
348 u32 plogi_accs; /* plogi accepts */
349 u32 plogi_timeouts; /* plogi timeouts */
350 u32 plogi_rejects; /* rcvd plogi rejects */
351 u32 plogi_failed; /* local failure */
352 u32 plogi_rcvd; /* plogis rcvd */
353 u32 prli_rcvd; /* inbound PRLIs */
354 u32 adisc_rcvd; /* ADISCs received */
355 u32 adisc_rejects; /* recvd ADISC rejects */
356 u32 adisc_sent; /* ADISC requests sent */
357 u32 adisc_accs; /* ADISC accepted by rport */
358 u32 adisc_failed; /* ADISC failed (no response) */
359 u32 adisc_rejected; /* ADISC rejected by us */
360 u32 logos; /* logos sent */
361 u32 logo_accs; /* LOGO accepts from rport */
362 u32 logo_failed; /* LOGO failures */
363 u32 logo_rejected; /* LOGO rejects from rport */
364 u32 logo_rcvd; /* LOGO from remote port */
365
366 u32 rpsc_rcvd; /* RPSC received */
367 u32 rpsc_rejects; /* recvd RPSC rejects */
368 u32 rpsc_sent; /* RPSC requests sent */
369 u32 rpsc_accs; /* RPSC accepted by rport */
370 u32 rpsc_failed; /* RPSC failed (no response) */
371 u32 rpsc_rejected; /* RPSC rejected by us */
372
373 u32 rjt_insuff_res; /* LS RJT with insuff resources */
374 struct bfa_rport_hal_stats_s hal_stats; /* BFA rport stats */
375};
376
377/**
378 * FCS remote port attributes returned in queries
379 */
380struct bfa_rport_attr_s {
381 wwn_t nwwn; /* node wwn */
382 wwn_t pwwn; /* port wwn */
383 enum fc_cos cos_supported; /* supported class of services */
384 u32 pid; /* port ID */
385 u32 df_sz; /* Max payload size */
386 enum bfa_rport_state state; /* Rport State machine state */
387 enum fc_cos fc_cos; /* FC classes of services */
388 bfa_boolean_t cisc; /* CISC capable device */
389 struct bfa_rport_symname_s symname; /* Symbolic Name */
390 enum bfa_rport_function scsi_function; /* Initiator/Target */
391 struct bfa_rport_qos_attr_s qos_attr; /* qos attributes */
392 enum bfa_port_speed curr_speed; /* operating speed got from
393 * RPSC ELS. UNKNOWN, if RPSC
394 * is not supported */
395 bfa_boolean_t trl_enforced; /* TRL enforced ? TRUE/FALSE */
396 enum bfa_port_speed assigned_speed; /* Speed assigned by the user.
397 * will be used if RPSC is not
398 * supported by the rport */
399};
400
401struct bfa_rport_remote_link_stats_s {
402 u32 lfc; /* Link Failure Count */
403 u32 lsyc; /* Loss of Synchronization Count */
404 u32 lsic; /* Loss of Signal Count */
405 u32 pspec; /* Primitive Sequence Protocol Error Count */
406 u32 itwc; /* Invalid Transmission Word Count */
407 u32 icc; /* Invalid CRC Count */
408};
409
410
411#define BFA_MAX_IO_INDEX 7
412#define BFA_NO_IO_INDEX 9
413
414/**
415 * FCS itnim states
416 */
417enum bfa_itnim_state {
418 BFA_ITNIM_OFFLINE = 0, /* offline */
419 BFA_ITNIM_PRLI_SEND = 1, /* prli send */
420 BFA_ITNIM_PRLI_SENT = 2, /* prli sent */
421 BFA_ITNIM_PRLI_RETRY = 3, /* prli retry */
422 BFA_ITNIM_HCB_ONLINE = 4, /* online callback */
423 BFA_ITNIM_ONLINE = 5, /* online */
424 BFA_ITNIM_HCB_OFFLINE = 6, /* offline callback */
425 BFA_ITNIM_INITIATIOR = 7, /* initiator */
426};
427
428/**
429 * FCS remote port statistics
430 */
431struct bfa_itnim_stats_s {
432 u32 onlines; /* num rport online */
433 u32 offlines; /* num rport offline */
434 u32 prli_sent; /* num prli sent out */
435 u32 fcxp_alloc_wait;/* num fcxp alloc waits */
436 u32 prli_rsp_err; /* num prli rsp errors */
437 u32 prli_rsp_acc; /* num prli rsp accepts */
438 u32 initiator; /* rport is an initiator */
439 u32 prli_rsp_parse_err; /* prli rsp parsing errors */
440 u32 prli_rsp_rjt; /* num prli rsp rejects */
441 u32 timeout; /* num timeouts detected */
442 u32 sler; /* num sler notification from BFA */
443 u32 rsvd; /* padding for 64 bit alignment */
444};
445
446/**
447 * FCS itnim attributes returned in queries
448 */
449struct bfa_itnim_attr_s {
450 enum bfa_itnim_state state; /* FCS itnim state */
451 u8 retry; /* data retransmision support */
452 u8 task_retry_id; /* task retry ident support */
453 u8 rec_support; /* REC supported */
454 u8 conf_comp; /* confirmed completion supp */
455};
456
457#endif /* __BFA_DEFS_FCS_H__ */
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
new file mode 100644
index 000000000000..56226fcf9470
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -0,0 +1,1081 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_SVC_H__
19#define __BFA_DEFS_SVC_H__
20
21#include "bfa_defs.h"
22#include "bfa_fc.h"
23#include "bfi.h"
24
25#define BFA_IOCFC_INTR_DELAY 1125
26#define BFA_IOCFC_INTR_LATENCY 225
27#define BFA_IOCFCOE_INTR_DELAY 25
28#define BFA_IOCFCOE_INTR_LATENCY 5
29
30/**
31 * Interrupt coalescing configuration.
32 */
33#pragma pack(1)
34struct bfa_iocfc_intr_attr_s {
35 u8 coalesce; /* enable/disable coalescing */
36 u8 rsvd[3];
37 u16 latency; /* latency in microseconds */
38 u16 delay; /* delay in microseconds */
39};
40
41/**
42 * IOC firmware configuraton
43 */
44struct bfa_iocfc_fwcfg_s {
45 u16 num_fabrics; /* number of fabrics */
46 u16 num_lports; /* number of local lports */
47 u16 num_rports; /* number of remote ports */
48 u16 num_ioim_reqs; /* number of IO reqs */
49 u16 num_tskim_reqs; /* task management requests */
50 u16 num_iotm_reqs; /* number of TM IO reqs */
51 u16 num_tsktm_reqs; /* TM task management requests*/
52 u16 num_fcxp_reqs; /* unassisted FC exchanges */
53 u16 num_uf_bufs; /* unsolicited recv buffers */
54 u8 num_cqs;
55 u8 fw_tick_res; /* FW clock resolution in ms */
56 u8 rsvd[4];
57};
58#pragma pack()
59
60struct bfa_iocfc_drvcfg_s {
61 u16 num_reqq_elems; /* number of req queue elements */
62 u16 num_rspq_elems; /* number of rsp queue elements */
63 u16 num_sgpgs; /* number of total SG pages */
64 u16 num_sboot_tgts; /* number of SAN boot targets */
65 u16 num_sboot_luns; /* number of SAN boot luns */
66 u16 ioc_recover; /* IOC recovery mode */
67 u16 min_cfg; /* minimum configuration */
68 u16 path_tov; /* device path timeout */
69 bfa_boolean_t delay_comp; /* delay completion of
70 failed inflight IOs */
71 u32 rsvd;
72};
73
74/**
75 * IOC configuration
76 */
77struct bfa_iocfc_cfg_s {
78 struct bfa_iocfc_fwcfg_s fwcfg; /* firmware side config */
79 struct bfa_iocfc_drvcfg_s drvcfg; /* driver side config */
80};
81
82/**
83 * IOC firmware IO stats
84 */
85struct bfa_fw_io_stats_s {
86 u32 host_abort; /* IO aborted by host driver*/
87 u32 host_cleanup; /* IO clean up by host driver */
88
89 u32 fw_io_timeout; /* IOs timedout */
90 u32 fw_frm_parse; /* frame parsed by f/w */
91 u32 fw_frm_data; /* fcp_data frame parsed by f/w */
92 u32 fw_frm_rsp; /* fcp_rsp frame parsed by f/w */
93 u32 fw_frm_xfer_rdy; /* xfer_rdy frame parsed by f/w */
94 u32 fw_frm_bls_acc; /* BLS ACC frame parsed by f/w */
95 u32 fw_frm_tgt_abort; /* target ABTS parsed by f/w */
96 u32 fw_frm_unknown; /* unknown parsed by f/w */
97 u32 fw_data_dma; /* f/w DMA'ed the data frame */
98 u32 fw_frm_drop; /* f/w drop the frame */
99
100 u32 rec_timeout; /* FW rec timed out */
101 u32 error_rec; /* FW sending rec on
102 * an error condition*/
103 u32 wait_for_si; /* FW wait for SI */
104 u32 rec_rsp_inval; /* REC rsp invalid */
105 u32 seqr_io_abort; /* target does not know cmd so abort */
106 u32 seqr_io_retry; /* SEQR failed so retry IO */
107
108 u32 itn_cisc_upd_rsp; /* ITN cisc updated on fcp_rsp */
109 u32 itn_cisc_upd_data; /* ITN cisc updated on fcp_data */
110 u32 itn_cisc_upd_xfer_rdy; /* ITN cisc updated on fcp_data */
111
112 u32 fcp_data_lost; /* fcp data lost */
113
114 u32 ro_set_in_xfer_rdy; /* Target set RO in Xfer_rdy frame */
115 u32 xfer_rdy_ooo_err; /* Out of order Xfer_rdy received */
116 u32 xfer_rdy_unknown_err; /* unknown error in xfer_rdy frame */
117
118 u32 io_abort_timeout; /* ABTS timedout */
119 u32 sler_initiated; /* SLER initiated */
120
121 u32 unexp_fcp_rsp; /* fcp response in wrong state */
122
123 u32 fcp_rsp_under_run; /* fcp rsp IO underrun */
124 u32 fcp_rsp_under_run_wr; /* fcp rsp IO underrun for write */
125 u32 fcp_rsp_under_run_err; /* fcp rsp IO underrun error */
126 u32 fcp_rsp_resid_inval; /* invalid residue */
127 u32 fcp_rsp_over_run; /* fcp rsp IO overrun */
128 u32 fcp_rsp_over_run_err; /* fcp rsp IO overrun error */
129 u32 fcp_rsp_proto_err; /* protocol error in fcp rsp */
130 u32 fcp_rsp_sense_err; /* error in sense info in fcp rsp */
131 u32 fcp_conf_req; /* FCP conf requested */
132
133 u32 tgt_aborted_io; /* target initiated abort */
134
135 u32 ioh_edtov_timeout_event;/* IOH edtov timer popped */
136 u32 ioh_fcp_rsp_excp_event; /* IOH FCP_RSP exception */
137 u32 ioh_fcp_conf_event; /* IOH FCP_CONF */
138 u32 ioh_mult_frm_rsp_event; /* IOH multi_frame FCP_RSP */
139 u32 ioh_hit_class2_event; /* IOH hit class2 */
140 u32 ioh_miss_other_event; /* IOH miss other */
141 u32 ioh_seq_cnt_err_event; /* IOH seq cnt error */
142 u32 ioh_len_err_event; /* IOH len error - fcp_dl !=
143 * bytes xfered */
144 u32 ioh_seq_len_err_event; /* IOH seq len error */
145 u32 ioh_data_oor_event; /* Data out of range */
146 u32 ioh_ro_ooo_event; /* Relative offset out of range */
147 u32 ioh_cpu_owned_event; /* IOH hit -iost owned by f/w */
148 u32 ioh_unexp_frame_event; /* unexpected frame recieved
149 * count */
150 u32 ioh_err_int; /* IOH error int during data-phase
151 * for scsi write
152 */
153};
154
155/**
156 * IOC port firmware stats
157 */
158
159struct bfa_fw_port_fpg_stats_s {
160 u32 intr_evt;
161 u32 intr;
162 u32 intr_excess;
163 u32 intr_cause0;
164 u32 intr_other;
165 u32 intr_other_ign;
166 u32 sig_lost;
167 u32 sig_regained;
168 u32 sync_lost;
169 u32 sync_to;
170 u32 sync_regained;
171 u32 div2_overflow;
172 u32 div2_underflow;
173 u32 efifo_overflow;
174 u32 efifo_underflow;
175 u32 idle_rx;
176 u32 lrr_rx;
177 u32 lr_rx;
178 u32 ols_rx;
179 u32 nos_rx;
180 u32 lip_rx;
181 u32 arbf0_rx;
182 u32 arb_rx;
183 u32 mrk_rx;
184 u32 const_mrk_rx;
185 u32 prim_unknown;
186};
187
188
189struct bfa_fw_port_lksm_stats_s {
190 u32 hwsm_success; /* hwsm state machine success */
191 u32 hwsm_fails; /* hwsm fails */
192 u32 hwsm_wdtov; /* hwsm timed out */
193 u32 swsm_success; /* swsm success */
194 u32 swsm_fails; /* swsm fails */
195 u32 swsm_wdtov; /* swsm timed out */
196 u32 busybufs; /* link init failed due to busybuf */
197 u32 buf_waits; /* bufwait state entries */
198 u32 link_fails; /* link failures */
199 u32 psp_errors; /* primitive sequence protocol errors */
200 u32 lr_unexp; /* No. of times LR rx-ed unexpectedly */
201 u32 lrr_unexp; /* No. of times LRR rx-ed unexpectedly */
202 u32 lr_tx; /* No. of times LR tx started */
203 u32 lrr_tx; /* No. of times LRR tx started */
204 u32 ols_tx; /* No. of times OLS tx started */
205 u32 nos_tx; /* No. of times NOS tx started */
206 u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */
207 u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */
208};
209
210struct bfa_fw_port_snsm_stats_s {
211 u32 hwsm_success; /* Successful hwsm terminations */
212 u32 hwsm_fails; /* hwsm fail count */
213 u32 hwsm_wdtov; /* hwsm timed out */
214 u32 swsm_success; /* swsm success */
215 u32 swsm_wdtov; /* swsm timed out */
216 u32 error_resets; /* error resets initiated by upsm */
217 u32 sync_lost; /* Sync loss count */
218 u32 sig_lost; /* Signal loss count */
219};
220
221struct bfa_fw_port_physm_stats_s {
222 u32 module_inserts; /* Module insert count */
223 u32 module_xtracts; /* Module extracts count */
224 u32 module_invalids; /* Invalid module inserted count */
225 u32 module_read_ign; /* Module validation status ignored */
226 u32 laser_faults; /* Laser fault count */
227 u32 rsvd;
228};
229
230struct bfa_fw_fip_stats_s {
231 u32 vlan_req; /* vlan discovery requests */
232 u32 vlan_notify; /* vlan notifications */
233 u32 vlan_err; /* vlan response error */
234 u32 vlan_timeouts; /* vlan disvoery timeouts */
235 u32 vlan_invalids; /* invalid vlan in discovery advert. */
236 u32 disc_req; /* Discovery solicit requests */
237 u32 disc_rsp; /* Discovery solicit response */
238 u32 disc_err; /* Discovery advt. parse errors */
239 u32 disc_unsol; /* Discovery unsolicited */
240 u32 disc_timeouts; /* Discovery timeouts */
241 u32 disc_fcf_unavail; /* Discovery FCF Not Avail. */
242 u32 linksvc_unsupp; /* Unsupported link service req */
243 u32 linksvc_err; /* Parse error in link service req */
244 u32 logo_req; /* FIP logos received */
245 u32 clrvlink_req; /* Clear virtual link req */
246 u32 op_unsupp; /* Unsupported FIP operation */
247 u32 untagged; /* Untagged frames (ignored) */
248 u32 invalid_version; /* Invalid FIP version */
249};
250
251struct bfa_fw_lps_stats_s {
252 u32 mac_invalids; /* Invalid mac assigned */
253 u32 rsvd;
254};
255
256struct bfa_fw_fcoe_stats_s {
257 u32 cee_linkups; /* CEE link up count */
258 u32 cee_linkdns; /* CEE link down count */
259 u32 fip_linkups; /* FIP link up count */
260 u32 fip_linkdns; /* FIP link up count */
261 u32 fip_fails; /* FIP fail count */
262 u32 mac_invalids; /* Invalid mac assigned */
263};
264
265/**
266 * IOC firmware FCoE port stats
267 */
268struct bfa_fw_fcoe_port_stats_s {
269 struct bfa_fw_fcoe_stats_s fcoe_stats;
270 struct bfa_fw_fip_stats_s fip_stats;
271};
272
273/**
274 * IOC firmware FC uport stats
275 */
276struct bfa_fw_fc_uport_stats_s {
277 struct bfa_fw_port_snsm_stats_s snsm_stats;
278 struct bfa_fw_port_lksm_stats_s lksm_stats;
279};
280
281/**
282 * IOC firmware FC port stats
283 */
284union bfa_fw_fc_port_stats_s {
285 struct bfa_fw_fc_uport_stats_s fc_stats;
286 struct bfa_fw_fcoe_port_stats_s fcoe_stats;
287};
288
289/**
290 * IOC firmware port stats
291 */
292struct bfa_fw_port_stats_s {
293 struct bfa_fw_port_fpg_stats_s fpg_stats;
294 struct bfa_fw_port_physm_stats_s physm_stats;
295 union bfa_fw_fc_port_stats_s fc_port;
296};
297
298/**
299 * fcxchg module statistics
300 */
301struct bfa_fw_fcxchg_stats_s {
302 u32 ua_tag_inv;
303 u32 ua_state_inv;
304};
305
306struct bfa_fw_lpsm_stats_s {
307 u32 cls_rx;
308 u32 cls_tx;
309};
310
311/**
312 * Trunk statistics
313 */
314struct bfa_fw_trunk_stats_s {
315 u32 emt_recvd; /* Trunk EMT received */
316 u32 emt_accepted; /* Trunk EMT Accepted */
317 u32 emt_rejected; /* Trunk EMT rejected */
318 u32 etp_recvd; /* Trunk ETP received */
319 u32 etp_accepted; /* Trunk ETP Accepted */
320 u32 etp_rejected; /* Trunk ETP rejected */
321 u32 lr_recvd; /* Trunk LR received */
322 u32 rsvd; /* padding for 64 bit alignment */
323};
324
325struct bfa_fw_advsm_stats_s {
326 u32 flogi_sent; /* Flogi sent */
327 u32 flogi_acc_recvd; /* Flogi Acc received */
328 u32 flogi_rjt_recvd; /* Flogi rejects received */
329 u32 flogi_retries; /* Flogi retries */
330
331 u32 elp_recvd; /* ELP received */
332 u32 elp_accepted; /* ELP Accepted */
333 u32 elp_rejected; /* ELP rejected */
334 u32 elp_dropped; /* ELP dropped */
335};
336
337/**
338 * IOCFC firmware stats
339 */
340struct bfa_fw_iocfc_stats_s {
341 u32 cfg_reqs; /* cfg request */
342 u32 updq_reqs; /* update queue request */
343 u32 ic_reqs; /* interrupt coalesce reqs */
344 u32 unknown_reqs;
345 u32 set_intr_reqs; /* set interrupt reqs */
346};
347
348/**
349 * IOC attributes returned in queries
350 */
351struct bfa_iocfc_attr_s {
352 struct bfa_iocfc_cfg_s config; /* IOCFC config */
353 struct bfa_iocfc_intr_attr_s intr_attr; /* interrupt attr */
354};
355
356/**
357 * Eth_sndrcv mod stats
358 */
359struct bfa_fw_eth_sndrcv_stats_s {
360 u32 crc_err;
361 u32 rsvd; /* 64bit align */
362};
363
364/**
365 * CT MAC mod stats
366 */
367struct bfa_fw_mac_mod_stats_s {
368 u32 mac_on; /* MAC got turned-on */
369 u32 link_up; /* link-up */
370 u32 signal_off; /* lost signal */
371 u32 dfe_on; /* DFE on */
372 u32 mac_reset; /* # of MAC reset to bring lnk up */
373 u32 pcs_reset; /* # of PCS reset to bring lnk up */
374 u32 loopback; /* MAC got into serdes loopback */
375 u32 lb_mac_reset;
376 /* # of MAC reset to bring link up in loopback */
377 u32 lb_pcs_reset;
378 /* # of PCS reset to bring link up in loopback */
379 u32 rsvd; /* 64bit align */
380};
381
382/**
383 * CT MOD stats
384 */
385struct bfa_fw_ct_mod_stats_s {
386 u32 rxa_rds_undrun; /* RxA RDS underrun */
387 u32 rad_bpc_ovfl; /* RAD BPC overflow */
388 u32 rad_rlb_bpc_ovfl; /* RAD RLB BPC overflow */
389 u32 bpc_fcs_err; /* BPC FCS_ERR */
390 u32 txa_tso_hdr; /* TxA TSO header too long */
391 u32 rsvd; /* 64bit align */
392};
393
394/**
395 * IOC firmware stats
396 */
397struct bfa_fw_stats_s {
398 struct bfa_fw_ioc_stats_s ioc_stats;
399 struct bfa_fw_iocfc_stats_s iocfc_stats;
400 struct bfa_fw_io_stats_s io_stats;
401 struct bfa_fw_port_stats_s port_stats;
402 struct bfa_fw_fcxchg_stats_s fcxchg_stats;
403 struct bfa_fw_lpsm_stats_s lpsm_stats;
404 struct bfa_fw_lps_stats_s lps_stats;
405 struct bfa_fw_trunk_stats_s trunk_stats;
406 struct bfa_fw_advsm_stats_s advsm_stats;
407 struct bfa_fw_mac_mod_stats_s macmod_stats;
408 struct bfa_fw_ct_mod_stats_s ctmod_stats;
409 struct bfa_fw_eth_sndrcv_stats_s ethsndrcv_stats;
410};
411
412#define BFA_IOCFC_PATHTOV_MAX 60
413#define BFA_IOCFC_QDEPTH_MAX 2000
414
415/**
416 * QoS states
417 */
418enum bfa_qos_state {
419 BFA_QOS_ONLINE = 1, /* QoS is online */
420 BFA_QOS_OFFLINE = 2, /* QoS is offline */
421};
422
423/**
424 * QoS Priority levels.
425 */
426enum bfa_qos_priority {
427 BFA_QOS_UNKNOWN = 0,
428 BFA_QOS_HIGH = 1, /* QoS Priority Level High */
429 BFA_QOS_MED = 2, /* QoS Priority Level Medium */
430 BFA_QOS_LOW = 3, /* QoS Priority Level Low */
431};
432
433/**
434 * QoS bandwidth allocation for each priority level
435 */
436enum bfa_qos_bw_alloc {
437 BFA_QOS_BW_HIGH = 60, /* bandwidth allocation for High */
438 BFA_QOS_BW_MED = 30, /* bandwidth allocation for Medium */
439 BFA_QOS_BW_LOW = 10, /* bandwidth allocation for Low */
440};
441#pragma pack(1)
442/**
443 * QoS attribute returned in QoS Query
444 */
445struct bfa_qos_attr_s {
446 u8 state; /* QoS current state */
447 u8 rsvd[3];
448 u32 total_bb_cr; /* Total BB Credits */
449};
450
451/**
452 * These fields should be displayed only from the CLI.
453 * There will be a separate BFAL API (get_qos_vc_attr ?)
454 * to retrieve this.
455 *
456 */
457#define BFA_QOS_MAX_VC 16
458
459struct bfa_qos_vc_info_s {
460 u8 vc_credit;
461 u8 borrow_credit;
462 u8 priority;
463 u8 resvd;
464};
465
466struct bfa_qos_vc_attr_s {
467 u16 total_vc_count; /* Total VC Count */
468 u16 shared_credit;
469 u32 elp_opmode_flags;
470 struct bfa_qos_vc_info_s vc_info[BFA_QOS_MAX_VC]; /* as many as
471 * total_vc_count */
472};
473
474/**
475 * QoS statistics
476 */
477struct bfa_qos_stats_s {
478 u32 flogi_sent; /* QoS Flogi sent */
479 u32 flogi_acc_recvd; /* QoS Flogi Acc received */
480 u32 flogi_rjt_recvd; /* QoS Flogi rejects received */
481 u32 flogi_retries; /* QoS Flogi retries */
482
483 u32 elp_recvd; /* QoS ELP received */
484 u32 elp_accepted; /* QoS ELP Accepted */
485 u32 elp_rejected; /* QoS ELP rejected */
486 u32 elp_dropped; /* QoS ELP dropped */
487
488 u32 qos_rscn_recvd; /* QoS RSCN received */
489 u32 rsvd; /* padding for 64 bit alignment */
490};
491
492/**
493 * FCoE statistics
494 */
495struct bfa_fcoe_stats_s {
496 u64 secs_reset; /* Seconds since stats reset */
497 u64 cee_linkups; /* CEE link up */
498 u64 cee_linkdns; /* CEE link down */
499 u64 fip_linkups; /* FIP link up */
500 u64 fip_linkdns; /* FIP link down */
501 u64 fip_fails; /* FIP failures */
502 u64 mac_invalids; /* Invalid mac assignments */
503 u64 vlan_req; /* Vlan requests */
504 u64 vlan_notify; /* Vlan notifications */
505 u64 vlan_err; /* Vlan notification errors */
506 u64 vlan_timeouts; /* Vlan request timeouts */
507 u64 vlan_invalids; /* Vlan invalids */
508 u64 disc_req; /* Discovery requests */
509 u64 disc_rsp; /* Discovery responses */
510 u64 disc_err; /* Discovery error frames */
511 u64 disc_unsol; /* Discovery unsolicited */
512 u64 disc_timeouts; /* Discovery timeouts */
513 u64 disc_fcf_unavail; /* Discovery FCF not avail */
514 u64 linksvc_unsupp; /* FIP link service req unsupp. */
515 u64 linksvc_err; /* FIP link service req errors */
516 u64 logo_req; /* FIP logos received */
517 u64 clrvlink_req; /* Clear virtual link requests */
518 u64 op_unsupp; /* FIP operation unsupp. */
519 u64 untagged; /* FIP untagged frames */
520 u64 txf_ucast; /* Tx FCoE unicast frames */
521 u64 txf_ucast_vlan; /* Tx FCoE unicast vlan frames */
522 u64 txf_ucast_octets; /* Tx FCoE unicast octets */
523 u64 txf_mcast; /* Tx FCoE multicast frames */
524 u64 txf_mcast_vlan; /* Tx FCoE multicast vlan frames */
525 u64 txf_mcast_octets; /* Tx FCoE multicast octets */
526 u64 txf_bcast; /* Tx FCoE broadcast frames */
527 u64 txf_bcast_vlan; /* Tx FCoE broadcast vlan frames */
528 u64 txf_bcast_octets; /* Tx FCoE broadcast octets */
529 u64 txf_timeout; /* Tx timeouts */
530 u64 txf_parity_errors; /* Transmit parity err */
531 u64 txf_fid_parity_errors; /* Transmit FID parity err */
532 u64 rxf_ucast_octets; /* Rx FCoE unicast octets */
533 u64 rxf_ucast; /* Rx FCoE unicast frames */
534 u64 rxf_ucast_vlan; /* Rx FCoE unicast vlan frames */
535 u64 rxf_mcast_octets; /* Rx FCoE multicast octets */
536 u64 rxf_mcast; /* Rx FCoE multicast frames */
537 u64 rxf_mcast_vlan; /* Rx FCoE multicast vlan frames */
538 u64 rxf_bcast_octets; /* Rx FCoE broadcast octets */
539 u64 rxf_bcast; /* Rx FCoE broadcast frames */
540 u64 rxf_bcast_vlan; /* Rx FCoE broadcast vlan frames */
541};
542
543/**
544 * QoS or FCoE stats (fcport stats excluding physical FC port stats)
545 */
546union bfa_fcport_stats_u {
547 struct bfa_qos_stats_s fcqos;
548 struct bfa_fcoe_stats_s fcoe;
549};
550#pragma pack()
551
552struct bfa_fcpim_del_itn_stats_s {
553 u32 del_itn_iocomp_aborted; /* Aborted IO requests */
554 u32 del_itn_iocomp_timedout; /* IO timeouts */
555 u32 del_itn_iocom_sqer_needed; /* IO retry for SQ error recovery */
556 u32 del_itn_iocom_res_free; /* Delayed freeing of IO resources */
557 u32 del_itn_iocom_hostabrts; /* Host IO abort requests */
558 u32 del_itn_total_ios; /* Total IO count */
559 u32 del_io_iocdowns; /* IO cleaned-up due to IOC down */
560 u32 del_tm_iocdowns; /* TM cleaned-up due to IOC down */
561};
562
563struct bfa_itnim_iostats_s {
564
565 u32 total_ios; /* Total IO Requests */
566 u32 input_reqs; /* Data in-bound requests */
567 u32 output_reqs; /* Data out-bound requests */
568 u32 io_comps; /* Total IO Completions */
569 u32 wr_throughput; /* Write data transfered in bytes */
570 u32 rd_throughput; /* Read data transfered in bytes */
571
572 u32 iocomp_ok; /* Slowpath IO completions */
573 u32 iocomp_underrun; /* IO underrun */
574 u32 iocomp_overrun; /* IO overrun */
575 u32 qwait; /* IO Request-Q wait */
576 u32 qresumes; /* IO Request-Q wait done */
577 u32 no_iotags; /* No free IO tag */
578 u32 iocomp_timedout; /* IO timeouts */
579 u32 iocom_nexus_abort; /* IO failure due to target offline */
580 u32 iocom_proto_err; /* IO protocol errors */
581 u32 iocom_dif_err; /* IO SBC-3 protection errors */
582
583 u32 iocom_sqer_needed; /* fcp-2 error recovery failed */
584 u32 iocom_res_free; /* Delayed freeing of IO tag */
585
586
587 u32 io_aborts; /* Host IO abort requests */
588 u32 iocom_hostabrts; /* Host IO abort completions */
589 u32 io_cleanups; /* IO clean-up requests */
590 u32 path_tov_expired; /* IO path tov expired */
591 u32 iocomp_aborted; /* IO abort completions */
592 u32 io_iocdowns; /* IO cleaned-up due to IOC down */
593 u32 iocom_utags; /* IO comp with unknown tags */
594
595 u32 io_tmaborts; /* Abort request due to TM command */
596 u32 tm_io_comps; /* Abort completion due to TM command */
597
598 u32 creates; /* IT Nexus create requests */
599 u32 fw_create; /* IT Nexus FW create requests */
600 u32 create_comps; /* IT Nexus FW create completions */
601 u32 onlines; /* IT Nexus onlines */
602 u32 offlines; /* IT Nexus offlines */
603 u32 fw_delete; /* IT Nexus FW delete requests */
604 u32 delete_comps; /* IT Nexus FW delete completions */
605 u32 deletes; /* IT Nexus delete requests */
606 u32 sler_events; /* SLER events */
607 u32 ioc_disabled; /* Num IOC disables */
608 u32 cleanup_comps; /* IT Nexus cleanup completions */
609
610 u32 tm_cmnds; /* TM Requests */
611 u32 tm_fw_rsps; /* TM Completions */
612 u32 tm_success; /* TM initiated IO cleanup success */
613 u32 tm_failures; /* TM initiated IO cleanup failure */
614 u32 no_tskims; /* No free TM tag */
615 u32 tm_qwait; /* TM Request-Q wait */
616 u32 tm_qresumes; /* TM Request-Q wait done */
617
618 u32 tm_iocdowns; /* TM cleaned-up due to IOC down */
619 u32 tm_cleanups; /* TM cleanup requests */
620 u32 tm_cleanup_comps; /* TM cleanup completions */
621};
622
623/* Modify char* port_stt[] in bfal_port.c if a new state was added */
624enum bfa_port_states {
625 BFA_PORT_ST_UNINIT = 1,
626 BFA_PORT_ST_ENABLING_QWAIT = 2,
627 BFA_PORT_ST_ENABLING = 3,
628 BFA_PORT_ST_LINKDOWN = 4,
629 BFA_PORT_ST_LINKUP = 5,
630 BFA_PORT_ST_DISABLING_QWAIT = 6,
631 BFA_PORT_ST_DISABLING = 7,
632 BFA_PORT_ST_DISABLED = 8,
633 BFA_PORT_ST_STOPPED = 9,
634 BFA_PORT_ST_IOCDOWN = 10,
635 BFA_PORT_ST_IOCDIS = 11,
636 BFA_PORT_ST_FWMISMATCH = 12,
637 BFA_PORT_ST_PREBOOT_DISABLED = 13,
638 BFA_PORT_ST_TOGGLING_QWAIT = 14,
639 BFA_PORT_ST_MAX_STATE,
640};
641
642/**
643 * Port operational type (in sync with SNIA port type).
644 */
645enum bfa_port_type {
646 BFA_PORT_TYPE_UNKNOWN = 1, /* port type is unknown */
647 BFA_PORT_TYPE_NPORT = 5, /* P2P with switched fabric */
648 BFA_PORT_TYPE_NLPORT = 6, /* public loop */
649 BFA_PORT_TYPE_LPORT = 20, /* private loop */
650 BFA_PORT_TYPE_P2P = 21, /* P2P with no switched fabric */
651 BFA_PORT_TYPE_VPORT = 22, /* NPIV - virtual port */
652};
653
654/**
655 * Port topology setting. A port's topology and fabric login status
656 * determine its operational type.
657 */
658enum bfa_port_topology {
659 BFA_PORT_TOPOLOGY_NONE = 0, /* No valid topology */
660 BFA_PORT_TOPOLOGY_P2P = 1, /* P2P only */
661 BFA_PORT_TOPOLOGY_LOOP = 2, /* LOOP topology */
662 BFA_PORT_TOPOLOGY_AUTO = 3, /* auto topology selection */
663};
664
665/**
666 * Physical port loopback types.
667 */
668enum bfa_port_opmode {
669 BFA_PORT_OPMODE_NORMAL = 0x00, /* normal non-loopback mode */
670 BFA_PORT_OPMODE_LB_INT = 0x01, /* internal loop back */
671 BFA_PORT_OPMODE_LB_SLW = 0x02, /* serial link wrapback (serdes) */
672 BFA_PORT_OPMODE_LB_EXT = 0x04, /* external loop back (serdes) */
673 BFA_PORT_OPMODE_LB_CBL = 0x08, /* cabled loop back */
674 BFA_PORT_OPMODE_LB_NLINT = 0x20, /* NL_Port internal loopback */
675};
676
677#define BFA_PORT_OPMODE_LB_HARD(_mode) \
678 ((_mode == BFA_PORT_OPMODE_LB_INT) || \
679 (_mode == BFA_PORT_OPMODE_LB_SLW) || \
680 (_mode == BFA_PORT_OPMODE_LB_EXT))
681
682/**
683 * Port link state
684 */
685enum bfa_port_linkstate {
686 BFA_PORT_LINKUP = 1, /* Physical port/Trunk link up */
687 BFA_PORT_LINKDOWN = 2, /* Physical port/Trunk link down */
688};
689
690/**
691 * Port link state reason code
692 */
693enum bfa_port_linkstate_rsn {
694 BFA_PORT_LINKSTATE_RSN_NONE = 0,
695 BFA_PORT_LINKSTATE_RSN_DISABLED = 1,
696 BFA_PORT_LINKSTATE_RSN_RX_NOS = 2,
697 BFA_PORT_LINKSTATE_RSN_RX_OLS = 3,
698 BFA_PORT_LINKSTATE_RSN_RX_LIP = 4,
699 BFA_PORT_LINKSTATE_RSN_RX_LIPF7 = 5,
700 BFA_PORT_LINKSTATE_RSN_SFP_REMOVED = 6,
701 BFA_PORT_LINKSTATE_RSN_PORT_FAULT = 7,
702 BFA_PORT_LINKSTATE_RSN_RX_LOS = 8,
703 BFA_PORT_LINKSTATE_RSN_LOCAL_FAULT = 9,
704 BFA_PORT_LINKSTATE_RSN_REMOTE_FAULT = 10,
705 BFA_PORT_LINKSTATE_RSN_TIMEOUT = 11,
706
707
708
709 /* CEE related reason codes/errors */
710 CEE_LLDP_INFO_AGED_OUT = 20,
711 CEE_LLDP_SHUTDOWN_TLV_RCVD = 21,
712 CEE_PEER_NOT_ADVERTISE_DCBX = 22,
713 CEE_PEER_NOT_ADVERTISE_PG = 23,
714 CEE_PEER_NOT_ADVERTISE_PFC = 24,
715 CEE_PEER_NOT_ADVERTISE_FCOE = 25,
716 CEE_PG_NOT_COMPATIBLE = 26,
717 CEE_PFC_NOT_COMPATIBLE = 27,
718 CEE_FCOE_NOT_COMPATIBLE = 28,
719 CEE_BAD_PG_RCVD = 29,
720 CEE_BAD_BW_RCVD = 30,
721 CEE_BAD_PFC_RCVD = 31,
722 CEE_BAD_APP_PRI_RCVD = 32,
723 CEE_FCOE_PRI_PFC_OFF = 33,
724 CEE_DUP_CONTROL_TLV_RCVD = 34,
725 CEE_DUP_FEAT_TLV_RCVD = 35,
726 CEE_APPLY_NEW_CFG = 36, /* reason, not error */
727 CEE_PROTOCOL_INIT = 37, /* reason, not error */
728 CEE_PHY_LINK_DOWN = 38,
729 CEE_LLS_FCOE_ABSENT = 39,
730 CEE_LLS_FCOE_DOWN = 40,
731 CEE_ISCSI_NOT_COMPATIBLE = 41,
732 CEE_ISCSI_PRI_PFC_OFF = 42,
733 CEE_ISCSI_PRI_OVERLAP_FCOE_PRI = 43
734};
735#pragma pack(1)
736/**
737 * Physical port configuration
738 */
739struct bfa_port_cfg_s {
740 u8 topology; /* bfa_port_topology */
741 u8 speed; /* enum bfa_port_speed */
742 u8 trunked; /* trunked or not */
743 u8 qos_enabled; /* qos enabled or not */
744 u8 cfg_hardalpa; /* is hard alpa configured */
745 u8 hardalpa; /* configured hard alpa */
746 u16 maxfrsize; /* maximum frame size */
747 u8 rx_bbcredit; /* receive buffer credits */
748 u8 tx_bbcredit; /* transmit buffer credits */
749 u8 ratelimit; /* ratelimit enabled or not */
750 u8 trl_def_speed; /* ratelimit default speed */
751 u16 path_tov; /* device path timeout */
752 u16 q_depth; /* SCSI Queue depth */
753};
754#pragma pack()
755
756/**
757 * Port attribute values.
758 */
759struct bfa_port_attr_s {
760 /*
761 * Static fields
762 */
763 wwn_t nwwn; /* node wwn */
764 wwn_t pwwn; /* port wwn */
765 wwn_t factorynwwn; /* factory node wwn */
766 wwn_t factorypwwn; /* factory port wwn */
767 enum fc_cos cos_supported; /* supported class of services */
768 u32 rsvd;
769 struct fc_symname_s port_symname; /* port symbolic name */
770 enum bfa_port_speed speed_supported; /* supported speeds */
771 bfa_boolean_t pbind_enabled;
772
773 /*
774 * Configured values
775 */
776 struct bfa_port_cfg_s pport_cfg; /* pport cfg */
777
778 /*
779 * Dynamic field - info from BFA
780 */
781 enum bfa_port_states port_state; /* current port state */
782 enum bfa_port_speed speed; /* current speed */
783 enum bfa_port_topology topology; /* current topology */
784 bfa_boolean_t beacon; /* current beacon status */
785 bfa_boolean_t link_e2e_beacon; /* link beacon is on */
786 bfa_boolean_t plog_enabled; /* portlog is enabled */
787
788 /*
789 * Dynamic field - info from FCS
790 */
791 u32 pid; /* port ID */
792 enum bfa_port_type port_type; /* current topology */
793 u32 loopback; /* external loopback */
794 u32 authfail; /* auth fail state */
795 bfa_boolean_t io_profile; /* get it from fcpim mod */
796 u8 pad[4]; /* for 64-bit alignement */
797
798 /* FCoE specific */
799 u16 fcoe_vlan;
800 u8 rsvd1[6];
801};
802
803/**
804 * Port FCP mappings.
805 */
806struct bfa_port_fcpmap_s {
807 char osdevname[256];
808 u32 bus;
809 u32 target;
810 u32 oslun;
811 u32 fcid;
812 wwn_t nwwn;
813 wwn_t pwwn;
814 u64 fcplun;
815 char luid[256];
816};
817
818/**
819 * Port RNID info.
820 */
821struct bfa_port_rnid_s {
822 wwn_t wwn;
823 u32 unittype;
824 u32 portid;
825 u32 attached_nodes_num;
826 u16 ip_version;
827 u16 udp_port;
828 u8 ipaddr[16];
829 u16 rsvd;
830 u16 topologydiscoveryflags;
831};
832
833#pragma pack(1)
834struct bfa_fcport_fcf_s {
835 wwn_t name; /* FCF name */
836 wwn_t fabric_name; /* Fabric Name */
837 u8 fipenabled; /* FIP enabled or not */
838 u8 fipfailed; /* FIP failed or not */
839 u8 resv[2];
840 u8 pri; /* FCF priority */
841 u8 version; /* FIP version used */
842 u8 available; /* Available for login */
843 u8 fka_disabled; /* FKA is disabled */
844 u8 maxsz_verified; /* FCoE max size verified */
845 u8 fc_map[3]; /* FC map */
846 u16 vlan; /* FCoE vlan tag/priority */
847 u32 fka_adv_per; /* FIP ka advert. period */
848 mac_t mac; /* FCF mac */
849};
850
851/**
852 * Trunk states for BCU/BFAL
853 */
854enum bfa_trunk_state {
855 BFA_TRUNK_DISABLED = 0, /* Trunk is not configured */
856 BFA_TRUNK_ONLINE = 1, /* Trunk is online */
857 BFA_TRUNK_OFFLINE = 2, /* Trunk is offline */
858};
859
860/**
861 * VC attributes for trunked link
862 */
863struct bfa_trunk_vc_attr_s {
864 u32 bb_credit;
865 u32 elp_opmode_flags;
866 u32 req_credit;
867 u16 vc_credits[8];
868};
869
870/**
871 * Link state information
872 */
873struct bfa_port_link_s {
874 u8 linkstate; /* Link state bfa_port_linkstate */
875 u8 linkstate_rsn; /* bfa_port_linkstate_rsn_t */
876 u8 topology; /* P2P/LOOP bfa_port_topology */
877 u8 speed; /* Link speed (1/2/4/8 G) */
878 u32 linkstate_opt; /* Linkstate optional data (debug) */
879 u8 trunked; /* Trunked or not (1 or 0) */
880 u8 resvd[3];
881 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */
882 union {
883 struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */
884 struct bfa_trunk_vc_attr_s trunk_vc_attr;
885 struct bfa_fcport_fcf_s fcf; /* FCF information (for FCoE) */
886 } vc_fcf;
887};
888#pragma pack()
889
890enum bfa_trunk_link_fctl {
891 BFA_TRUNK_LINK_FCTL_NORMAL,
892 BFA_TRUNK_LINK_FCTL_VC,
893 BFA_TRUNK_LINK_FCTL_VC_QOS,
894};
895
896enum bfa_trunk_link_state {
897 BFA_TRUNK_LINK_STATE_UP = 1, /* link part of trunk */
898 BFA_TRUNK_LINK_STATE_DN_LINKDN = 2, /* physical link down */
899 BFA_TRUNK_LINK_STATE_DN_GRP_MIS = 3, /* trunk group different */
900 BFA_TRUNK_LINK_STATE_DN_SPD_MIS = 4, /* speed mismatch */
901 BFA_TRUNK_LINK_STATE_DN_MODE_MIS = 5, /* remote port not trunked */
902};
903
904#define BFA_TRUNK_MAX_PORTS 2
905struct bfa_trunk_link_attr_s {
906 wwn_t trunk_wwn;
907 enum bfa_trunk_link_fctl fctl;
908 enum bfa_trunk_link_state link_state;
909 enum bfa_port_speed speed;
910 u32 deskew;
911};
912
913struct bfa_trunk_attr_s {
914 enum bfa_trunk_state state;
915 enum bfa_port_speed speed;
916 u32 port_id;
917 u32 rsvd;
918 struct bfa_trunk_link_attr_s link_attr[BFA_TRUNK_MAX_PORTS];
919};
920
921struct bfa_rport_hal_stats_s {
922 u32 sm_un_cr; /* uninit: create events */
923 u32 sm_un_unexp; /* uninit: exception events */
924 u32 sm_cr_on; /* created: online events */
925 u32 sm_cr_del; /* created: delete events */
926 u32 sm_cr_hwf; /* created: IOC down */
927 u32 sm_cr_unexp; /* created: exception events */
928 u32 sm_fwc_rsp; /* fw create: f/w responses */
929 u32 sm_fwc_del; /* fw create: delete events */
930 u32 sm_fwc_off; /* fw create: offline events */
931 u32 sm_fwc_hwf; /* fw create: IOC down */
932 u32 sm_fwc_unexp; /* fw create: exception events*/
933 u32 sm_on_off; /* online: offline events */
934 u32 sm_on_del; /* online: delete events */
935 u32 sm_on_hwf; /* online: IOC down events */
936 u32 sm_on_unexp; /* online: exception events */
937 u32 sm_fwd_rsp; /* fw delete: fw responses */
938 u32 sm_fwd_del; /* fw delete: delete events */
939 u32 sm_fwd_hwf; /* fw delete: IOC down events */
940 u32 sm_fwd_unexp; /* fw delete: exception events*/
941 u32 sm_off_del; /* offline: delete events */
942 u32 sm_off_on; /* offline: online events */
943 u32 sm_off_hwf; /* offline: IOC down events */
944 u32 sm_off_unexp; /* offline: exception events */
945 u32 sm_del_fwrsp; /* delete: fw responses */
946 u32 sm_del_hwf; /* delete: IOC down events */
947 u32 sm_del_unexp; /* delete: exception events */
948 u32 sm_delp_fwrsp; /* delete pend: fw responses */
949 u32 sm_delp_hwf; /* delete pend: IOC downs */
950 u32 sm_delp_unexp; /* delete pend: exceptions */
951 u32 sm_offp_fwrsp; /* off-pending: fw responses */
952 u32 sm_offp_del; /* off-pending: deletes */
953 u32 sm_offp_hwf; /* off-pending: IOC downs */
954 u32 sm_offp_unexp; /* off-pending: exceptions */
955 u32 sm_iocd_off; /* IOC down: offline events */
956 u32 sm_iocd_del; /* IOC down: delete events */
957 u32 sm_iocd_on; /* IOC down: online events */
958 u32 sm_iocd_unexp; /* IOC down: exceptions */
959 u32 rsvd;
960};
961#pragma pack(1)
962/**
963 * Rport's QoS attributes
964 */
965struct bfa_rport_qos_attr_s {
966 u8 qos_priority; /* rport's QoS priority */
967 u8 rsvd[3];
968 u32 qos_flow_id; /* QoS flow Id */
969};
970#pragma pack()
971
972#define BFA_IOBUCKET_MAX 14
973
974struct bfa_itnim_latency_s {
975 u32 min[BFA_IOBUCKET_MAX];
976 u32 max[BFA_IOBUCKET_MAX];
977 u32 count[BFA_IOBUCKET_MAX];
978 u32 avg[BFA_IOBUCKET_MAX];
979};
980
981struct bfa_itnim_ioprofile_s {
982 u32 clock_res_mul;
983 u32 clock_res_div;
984 u32 index;
985 u32 io_profile_start_time; /* IO profile start time */
986 u32 iocomps[BFA_IOBUCKET_MAX]; /* IO completed */
987 struct bfa_itnim_latency_s io_latency;
988};
989
990/**
991 * FC physical port statistics.
992 */
993struct bfa_port_fc_stats_s {
994 u64 secs_reset; /* Seconds since stats is reset */
995 u64 tx_frames; /* Tx frames */
996 u64 tx_words; /* Tx words */
997 u64 tx_lip; /* Tx LIP */
998 u64 tx_nos; /* Tx NOS */
999 u64 tx_ols; /* Tx OLS */
1000 u64 tx_lr; /* Tx LR */
1001 u64 tx_lrr; /* Tx LRR */
1002 u64 rx_frames; /* Rx frames */
1003 u64 rx_words; /* Rx words */
1004 u64 lip_count; /* Rx LIP */
1005 u64 nos_count; /* Rx NOS */
1006 u64 ols_count; /* Rx OLS */
1007 u64 lr_count; /* Rx LR */
1008 u64 lrr_count; /* Rx LRR */
1009 u64 invalid_crcs; /* Rx CRC err frames */
1010 u64 invalid_crc_gd_eof; /* Rx CRC err good EOF frames */
1011 u64 undersized_frm; /* Rx undersized frames */
1012 u64 oversized_frm; /* Rx oversized frames */
1013 u64 bad_eof_frm; /* Rx frames with bad EOF */
1014 u64 error_frames; /* Errored frames */
1015 u64 dropped_frames; /* Dropped frames */
1016 u64 link_failures; /* Link Failure (LF) count */
1017 u64 loss_of_syncs; /* Loss of sync count */
1018 u64 loss_of_signals; /* Loss of signal count */
1019 u64 primseq_errs; /* Primitive sequence protocol err. */
1020 u64 bad_os_count; /* Invalid ordered sets */
1021 u64 err_enc_out; /* Encoding err nonframe_8b10b */
1022 u64 err_enc; /* Encoding err frame_8b10b */
1023};
1024
1025/**
1026 * Eth Physical Port statistics.
1027 */
1028struct bfa_port_eth_stats_s {
1029 u64 secs_reset; /* Seconds since stats is reset */
1030 u64 frame_64; /* Frames 64 bytes */
1031 u64 frame_65_127; /* Frames 65-127 bytes */
1032 u64 frame_128_255; /* Frames 128-255 bytes */
1033 u64 frame_256_511; /* Frames 256-511 bytes */
1034 u64 frame_512_1023; /* Frames 512-1023 bytes */
1035 u64 frame_1024_1518; /* Frames 1024-1518 bytes */
1036 u64 frame_1519_1522; /* Frames 1519-1522 bytes */
1037 u64 tx_bytes; /* Tx bytes */
1038 u64 tx_packets; /* Tx packets */
1039 u64 tx_mcast_packets; /* Tx multicast packets */
1040 u64 tx_bcast_packets; /* Tx broadcast packets */
1041 u64 tx_control_frame; /* Tx control frame */
1042 u64 tx_drop; /* Tx drops */
1043 u64 tx_jabber; /* Tx jabber */
1044 u64 tx_fcs_error; /* Tx FCS errors */
1045 u64 tx_fragments; /* Tx fragments */
1046 u64 rx_bytes; /* Rx bytes */
1047 u64 rx_packets; /* Rx packets */
1048 u64 rx_mcast_packets; /* Rx multicast packets */
1049 u64 rx_bcast_packets; /* Rx broadcast packets */
1050 u64 rx_control_frames; /* Rx control frames */
1051 u64 rx_unknown_opcode; /* Rx unknown opcode */
1052 u64 rx_drop; /* Rx drops */
1053 u64 rx_jabber; /* Rx jabber */
1054 u64 rx_fcs_error; /* Rx FCS errors */
1055 u64 rx_alignment_error; /* Rx alignment errors */
1056 u64 rx_frame_length_error; /* Rx frame len errors */
1057 u64 rx_code_error; /* Rx code errors */
1058 u64 rx_fragments; /* Rx fragments */
1059 u64 rx_pause; /* Rx pause */
1060 u64 rx_zero_pause; /* Rx zero pause */
1061 u64 tx_pause; /* Tx pause */
1062 u64 tx_zero_pause; /* Tx zero pause */
1063 u64 rx_fcoe_pause; /* Rx FCoE pause */
1064 u64 rx_fcoe_zero_pause; /* Rx FCoE zero pause */
1065 u64 tx_fcoe_pause; /* Tx FCoE pause */
1066 u64 tx_fcoe_zero_pause; /* Tx FCoE zero pause */
1067 u64 rx_iscsi_pause; /* Rx iSCSI pause */
1068 u64 rx_iscsi_zero_pause; /* Rx iSCSI zero pause */
1069 u64 tx_iscsi_pause; /* Tx iSCSI pause */
1070 u64 tx_iscsi_zero_pause; /* Tx iSCSI zero pause */
1071};
1072
1073/**
1074 * Port statistics.
1075 */
1076union bfa_port_stats_u {
1077 struct bfa_port_fc_stats_s fc;
1078 struct bfa_port_eth_stats_s eth;
1079};
1080
1081#endif /* __BFA_DEFS_SVC_H__ */
diff --git a/drivers/scsi/bfa/bfa_module.c b/drivers/scsi/bfa/bfa_drv.c
index a7fcc80c177e..14127646dc54 100644
--- a/drivers/scsi/bfa/bfa_module.c
+++ b/drivers/scsi/bfa/bfa_drv.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -14,10 +14,8 @@
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17#include <bfa.h> 17
18#include <defs/bfa_defs_pci.h> 18#include "bfa_modules.h"
19#include <cs/bfa_debug.h>
20#include <bfa_iocfc.h>
21 19
22/** 20/**
23 * BFA module list terminated by NULL 21 * BFA module list terminated by NULL
@@ -30,9 +28,6 @@ struct bfa_module_s *hal_mods[] = {
30 &hal_mod_uf, 28 &hal_mod_uf,
31 &hal_mod_rport, 29 &hal_mod_rport,
32 &hal_mod_fcpim, 30 &hal_mod_fcpim,
33#ifdef BFA_CFG_PBIND
34 &hal_mod_pbind,
35#endif
36 NULL 31 NULL
37}; 32};
38 33
@@ -74,17 +69,39 @@ bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
74 bfa_isr_unhandled, /* --------- */ 69 bfa_isr_unhandled, /* --------- */
75}; 70};
76 71
72
77/** 73/**
78 * Message handlers for mailbox command classes 74 * Message handlers for mailbox command classes
79 */ 75 */
80bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = { 76bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
81 NULL, 77 NULL,
82 NULL, /* BFI_MC_IOC */ 78 NULL, /* BFI_MC_IOC */
83 NULL, /* BFI_MC_DIAG */ 79 NULL, /* BFI_MC_DIAG */
84 NULL, /* BFI_MC_FLASH */ 80 NULL, /* BFI_MC_FLASH */
85 NULL, /* BFI_MC_CEE */ 81 NULL, /* BFI_MC_CEE */
86 NULL, /* BFI_MC_PORT */ 82 NULL, /* BFI_MC_PORT */
87 bfa_iocfc_isr, /* BFI_MC_IOCFC */ 83 bfa_iocfc_isr, /* BFI_MC_IOCFC */
88 NULL, 84 NULL,
89}; 85};
90 86
87
88
89void
90bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
91{
92 struct bfa_port_s *port = &bfa->modules.port;
93 u32 dm_len;
94 u8 *dm_kva;
95 u64 dm_pa;
96
97 dm_len = bfa_port_meminfo();
98 dm_kva = bfa_meminfo_dma_virt(mi);
99 dm_pa = bfa_meminfo_dma_phys(mi);
100
101 memset(port, 0, sizeof(struct bfa_port_s));
102 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
103 bfa_port_mem_claim(port, dm_kva, dm_pa);
104
105 bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
106 bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
107}
diff --git a/drivers/scsi/bfa/include/protocol/fc.h b/drivers/scsi/bfa/bfa_fc.h
index 436dd7c5643a..6eff705564eb 100644
--- a/drivers/scsi/bfa/include/protocol/fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -15,13 +15,50 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#ifndef __FC_H__ 18#ifndef __BFA_FC_H__
19#define __FC_H__ 19#define __BFA_FC_H__
20 20
21#include <protocol/types.h> 21#include "bfa_os_inc.h"
22
23typedef u64 wwn_t;
24typedef u64 lun_t;
25
26#define WWN_NULL (0)
27#define FC_SYMNAME_MAX 256 /* max name server symbolic name size */
28#define FC_ALPA_MAX 128
22 29
23#pragma pack(1) 30#pragma pack(1)
24 31
32#define MAC_ADDRLEN (6)
33struct mac_s { u8 mac[MAC_ADDRLEN]; };
34#define mac_t struct mac_s
35
36/*
37 * generic SCSI cdb definition
38 */
39#define SCSI_MAX_CDBLEN 16
40struct scsi_cdb_s {
41 u8 scsi_cdb[SCSI_MAX_CDBLEN];
42};
43#define scsi_cdb_t struct scsi_cdb_s
44
45/* ------------------------------------------------------------
46 * SCSI status byte values
47 * ------------------------------------------------------------
48 */
49#define SCSI_STATUS_GOOD 0x00
50#define SCSI_STATUS_CHECK_CONDITION 0x02
51#define SCSI_STATUS_CONDITION_MET 0x04
52#define SCSI_STATUS_BUSY 0x08
53#define SCSI_STATUS_INTERMEDIATE 0x10
54#define SCSI_STATUS_ICM 0x14 /* intermediate condition met */
55#define SCSI_STATUS_RESERVATION_CONFLICT 0x18
56#define SCSI_STATUS_COMMAND_TERMINATED 0x22
57#define SCSI_STATUS_QUEUE_FULL 0x28
58#define SCSI_STATUS_ACA_ACTIVE 0x30
59
60#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */
61
25/* 62/*
26 * Fibre Channel Header Structure (FCHS) definition 63 * Fibre Channel Header Structure (FCHS) definition
27 */ 64 */
@@ -51,9 +88,9 @@ struct fchs_s {
51 u32 ro; /* relative offset */ 88 u32 ro; /* relative offset */
52}; 89};
53 90
54#define FC_SOF_LEN 4 91#define FC_SOF_LEN 4
55#define FC_EOF_LEN 4 92#define FC_EOF_LEN 4
56#define FC_CRC_LEN 4 93#define FC_CRC_LEN 4
57 94
58/* 95/*
59 * Fibre Channel BB_E Header Structure 96 * Fibre Channel BB_E Header Structure
@@ -140,10 +177,12 @@ enum {
140 FC_TYPE_FC_FSS = 0x22, /* Fabric Switch Services */ 177 FC_TYPE_FC_FSS = 0x22, /* Fabric Switch Services */
141 FC_TYPE_FC_AL = 0x23, /* FC-AL */ 178 FC_TYPE_FC_AL = 0x23, /* FC-AL */
142 FC_TYPE_FC_SNMP = 0x24, /* FC-SNMP */ 179 FC_TYPE_FC_SNMP = 0x24, /* FC-SNMP */
180 FC_TYPE_FC_SPINFAB = 0xEE, /* SPINFAB */
181 FC_TYPE_FC_DIAG = 0xEF, /* DIAG */
143 FC_TYPE_MAX = 256, /* 256 FC-4 types */ 182 FC_TYPE_MAX = 256, /* 256 FC-4 types */
144}; 183};
145 184
146struct fc_fc4types_s{ 185struct fc_fc4types_s {
147 u8 bits[FC_TYPE_MAX / 8]; 186 u8 bits[FC_TYPE_MAX / 8];
148}; 187};
149 188
@@ -168,7 +207,7 @@ enum {
168 */ 207 */
169enum { 208enum {
170 FC_MIN_WELL_KNOWN_ADDR = 0xFFFFF0, 209 FC_MIN_WELL_KNOWN_ADDR = 0xFFFFF0,
171 FC_DOMAIN_CONTROLLER_MASK = 0xFFFC00, 210 FC_DOMAIN_CONTROLLER_MASK = 0xFFFC00,
172 FC_ALIAS_SERVER = 0xFFFFF8, 211 FC_ALIAS_SERVER = 0xFFFFF8,
173 FC_MGMT_SERVER = 0xFFFFFA, 212 FC_MGMT_SERVER = 0xFFFFFA,
174 FC_TIME_SERVER = 0xFFFFFB, 213 FC_TIME_SERVER = 0xFFFFFB,
@@ -201,7 +240,7 @@ enum {
201/* 240/*
202 * generic ELS command 241 * generic ELS command
203 */ 242 */
204struct fc_els_cmd_s{ 243struct fc_els_cmd_s {
205 u32 els_code:8; /* ELS Command Code */ 244 u32 els_code:8; /* ELS Command Code */
206 u32 reserved:24; 245 u32 reserved:24;
207}; 246};
@@ -233,6 +272,8 @@ enum {
233 FC_ELS_PDISC = 0x50, /* Discover N_Port Parameters. */ 272 FC_ELS_PDISC = 0x50, /* Discover N_Port Parameters. */
234 FC_ELS_FDISC = 0x51, /* Discover F_Port Parameters. */ 273 FC_ELS_FDISC = 0x51, /* Discover F_Port Parameters. */
235 FC_ELS_ADISC = 0x52, /* Discover Address. */ 274 FC_ELS_ADISC = 0x52, /* Discover Address. */
275 FC_ELS_FARP_REQ = 0x54, /* FARP Request. */
276 FC_ELS_FARP_REP = 0x55, /* FARP Reply. */
236 FC_ELS_FAN = 0x60, /* Fabric Address Notification */ 277 FC_ELS_FAN = 0x60, /* Fabric Address Notification */
237 FC_ELS_RSCN = 0x61, /* Reg State Change Notification */ 278 FC_ELS_RSCN = 0x61, /* Reg State Change Notification */
238 FC_ELS_SCR = 0x62, /* State Change Registration. */ 279 FC_ELS_SCR = 0x62, /* State Change Registration. */
@@ -272,7 +313,7 @@ enum {
272 * N_Port PLOGI Common Service Parameters. 313 * N_Port PLOGI Common Service Parameters.
273 * FC-PH-x. Figure-76. pg. 308. 314 * FC-PH-x. Figure-76. pg. 308.
274 */ 315 */
275struct fc_plogi_csp_s{ 316struct fc_plogi_csp_s {
276 u8 verhi; /* FC-PH high version */ 317 u8 verhi; /* FC-PH high version */
277 u8 verlo; /* FC-PH low version */ 318 u8 verlo; /* FC-PH low version */
278 u16 bbcred; /* BB_Credit */ 319 u16 bbcred; /* BB_Credit */
@@ -326,7 +367,7 @@ struct fc_plogi_csp_s{
326 * N_Port PLOGI Class Specific Parameters. 367 * N_Port PLOGI Class Specific Parameters.
327 * FC-PH-x. Figure 78. pg. 318. 368 * FC-PH-x. Figure 78. pg. 318.
328 */ 369 */
329struct fc_plogi_clp_s{ 370struct fc_plogi_clp_s {
330#ifdef __BIGENDIAN 371#ifdef __BIGENDIAN
331 u32 class_valid:1; 372 u32 class_valid:1;
332 u32 intermix:1; /* class intermix supported if set =1. 373 u32 intermix:1; /* class intermix supported if set =1.
@@ -361,29 +402,29 @@ struct fc_plogi_clp_s{
361 u32 reserved8:16; 402 u32 reserved8:16;
362}; 403};
363 404
364#define FLOGI_VVL_BRCD 0x42524344 /* ASCII value for each character in 405/* ASCII value for each character in string "BRCD" */
365 * string "BRCD" */ 406#define FLOGI_VVL_BRCD 0x42524344
366 407
367/* 408/*
368 * PLOGI els command and reply payload 409 * PLOGI els command and reply payload
369 */ 410 */
370struct fc_logi_s{ 411struct fc_logi_s {
371 struct fc_els_cmd_s els_cmd; /* ELS command code */ 412 struct fc_els_cmd_s els_cmd; /* ELS command code */
372 struct fc_plogi_csp_s csp; /* common service params */ 413 struct fc_plogi_csp_s csp; /* common service params */
373 wwn_t port_name; 414 wwn_t port_name;
374 wwn_t node_name; 415 wwn_t node_name;
375 struct fc_plogi_clp_s class1; /* class 1 service parameters */ 416 struct fc_plogi_clp_s class1; /* class 1 service parameters */
376 struct fc_plogi_clp_s class2; /* class 2 service parameters */ 417 struct fc_plogi_clp_s class2; /* class 2 service parameters */
377 struct fc_plogi_clp_s class3; /* class 3 service parameters */ 418 struct fc_plogi_clp_s class3; /* class 3 service parameters */
378 struct fc_plogi_clp_s class4; /* class 4 service parameters */ 419 struct fc_plogi_clp_s class4; /* class 4 service parameters */
379 u8 vvl[16]; /* vendor version level */ 420 u8 vvl[16]; /* vendor version level */
380}; 421};
381 422
382/* 423/*
383 * LOGO els command payload 424 * LOGO els command payload
384 */ 425 */
385struct fc_logo_s{ 426struct fc_logo_s {
386 struct fc_els_cmd_s els_cmd; /* ELS command code */ 427 struct fc_els_cmd_s els_cmd; /* ELS command code */
387 u32 res1:8; 428 u32 res1:8;
388 u32 nport_id:24; /* N_Port identifier of source */ 429 u32 nport_id:24; /* N_Port identifier of source */
389 wwn_t orig_port_name; /* Port name of the LOGO originator */ 430 wwn_t orig_port_name; /* Port name of the LOGO originator */
@@ -393,7 +434,7 @@ struct fc_logo_s{
393 * ADISC els command payload 434 * ADISC els command payload
394 */ 435 */
395struct fc_adisc_s { 436struct fc_adisc_s {
396 struct fc_els_cmd_s els_cmd; /* ELS command code */ 437 struct fc_els_cmd_s els_cmd; /* ELS command code */
397 u32 res1:8; 438 u32 res1:8;
398 u32 orig_HA:24; /* originator hard address */ 439 u32 orig_HA:24; /* originator hard address */
399 wwn_t orig_port_name; /* originator port name */ 440 wwn_t orig_port_name; /* originator port name */
@@ -405,7 +446,7 @@ struct fc_adisc_s {
405/* 446/*
406 * Exchange status block 447 * Exchange status block
407 */ 448 */
408struct fc_exch_status_blk_s{ 449struct fc_exch_status_blk_s {
409 u32 oxid:16; 450 u32 oxid:16;
410 u32 rxid:16; 451 u32 rxid:16;
411 u32 res1:8; 452 u32 res1:8;
@@ -423,7 +464,7 @@ struct fc_exch_status_blk_s{
423 * RES els command payload 464 * RES els command payload
424 */ 465 */
425struct fc_res_s { 466struct fc_res_s {
426 struct fc_els_cmd_s els_cmd; /* ELS command code */ 467 struct fc_els_cmd_s els_cmd; /* ELS command code */
427 u32 res1:8; 468 u32 res1:8;
428 u32 nport_id:24; /* N_Port identifier of source */ 469 u32 nport_id:24; /* N_Port identifier of source */
429 u32 oxid:16; 470 u32 oxid:16;
@@ -434,16 +475,16 @@ struct fc_res_s {
434/* 475/*
435 * RES els accept payload 476 * RES els accept payload
436 */ 477 */
437struct fc_res_acc_s{ 478struct fc_res_acc_s {
438 struct fc_els_cmd_s els_cmd; /* ELS command code */ 479 struct fc_els_cmd_s els_cmd; /* ELS command code */
439 struct fc_exch_status_blk_s fc_exch_blk; /* Exchange status block */ 480 struct fc_exch_status_blk_s fc_exch_blk; /* Exchange status block */
440}; 481};
441 482
442/* 483/*
443 * REC els command payload 484 * REC els command payload
444 */ 485 */
445struct fc_rec_s { 486struct fc_rec_s {
446 struct fc_els_cmd_s els_cmd; /* ELS command code */ 487 struct fc_els_cmd_s els_cmd; /* ELS command code */
447 u32 res1:8; 488 u32 res1:8;
448 u32 nport_id:24; /* N_Port identifier of source */ 489 u32 nport_id:24; /* N_Port identifier of source */
449 u32 oxid:16; 490 u32 oxid:16;
@@ -451,9 +492,9 @@ struct fc_rec_s {
451}; 492};
452 493
453#define FC_REC_ESB_OWN_RSP 0x80000000 /* responder owns */ 494#define FC_REC_ESB_OWN_RSP 0x80000000 /* responder owns */
454#define FC_REC_ESB_SI 0x40000000 /* SI is owned */ 495#define FC_REC_ESB_SI 0x40000000 /* SI is owned */
455#define FC_REC_ESB_COMP 0x20000000 /* exchange is complete */ 496#define FC_REC_ESB_COMP 0x20000000 /* exchange is complete */
456#define FC_REC_ESB_ENDCOND_ABN 0x10000000 /* abnormal ending */ 497#define FC_REC_ESB_ENDCOND_ABN 0x10000000 /* abnormal ending */
457#define FC_REC_ESB_RQACT 0x04000000 /* recovery qual active */ 498#define FC_REC_ESB_RQACT 0x04000000 /* recovery qual active */
458#define FC_REC_ESB_ERRP_MSK 0x03000000 499#define FC_REC_ESB_ERRP_MSK 0x03000000
459#define FC_REC_ESB_OXID_INV 0x00800000 /* invalid OXID */ 500#define FC_REC_ESB_OXID_INV 0x00800000 /* invalid OXID */
@@ -464,7 +505,7 @@ struct fc_rec_s {
464 * REC els accept payload 505 * REC els accept payload
465 */ 506 */
466struct fc_rec_acc_s { 507struct fc_rec_acc_s {
467 struct fc_els_cmd_s els_cmd; /* ELS command code */ 508 struct fc_els_cmd_s els_cmd; /* ELS command code */
468 u32 oxid:16; 509 u32 oxid:16;
469 u32 rxid:16; 510 u32 rxid:16;
470 u32 res1:8; 511 u32 res1:8;
@@ -479,7 +520,7 @@ struct fc_rec_acc_s {
479 * RSI els payload 520 * RSI els payload
480 */ 521 */
481struct fc_rsi_s { 522struct fc_rsi_s {
482 struct fc_els_cmd_s els_cmd; 523 struct fc_els_cmd_s els_cmd;
483 u32 res1:8; 524 u32 res1:8;
484 u32 orig_sid:24; 525 u32 orig_sid:24;
485 u32 oxid:16; 526 u32 oxid:16;
@@ -490,7 +531,7 @@ struct fc_rsi_s {
490 * structure for PRLI paramater pages, both request & response 531 * structure for PRLI paramater pages, both request & response
491 * see FC-PH-X table 113 & 115 for explanation also FCP table 8 532 * see FC-PH-X table 113 & 115 for explanation also FCP table 8
492 */ 533 */
493struct fc_prli_params_s{ 534struct fc_prli_params_s {
494 u32 reserved:16; 535 u32 reserved:16;
495#ifdef __BIGENDIAN 536#ifdef __BIGENDIAN
496 u32 reserved1:5; 537 u32 reserved1:5;
@@ -531,7 +572,7 @@ enum {
531 FC_PRLI_ACC_PREDEF_IMG = 0x5, /* predefined image - no prli needed */ 572 FC_PRLI_ACC_PREDEF_IMG = 0x5, /* predefined image - no prli needed */
532}; 573};
533 574
534struct fc_prli_params_page_s{ 575struct fc_prli_params_page_s {
535 u32 type:8; 576 u32 type:8;
536 u32 codext:8; 577 u32 codext:8;
537#ifdef __BIGENDIAN 578#ifdef __BIGENDIAN
@@ -551,13 +592,13 @@ struct fc_prli_params_page_s{
551 592
552 u32 origprocas; 593 u32 origprocas;
553 u32 rspprocas; 594 u32 rspprocas;
554 struct fc_prli_params_s servparams; 595 struct fc_prli_params_s servparams;
555}; 596};
556 597
557/* 598/*
558 * PRLI request and accept payload, FC-PH-X tables 112 & 114 599 * PRLI request and accept payload, FC-PH-X tables 112 & 114
559 */ 600 */
560struct fc_prli_s{ 601struct fc_prli_s {
561 u32 command:8; 602 u32 command:8;
562 u32 pglen:8; 603 u32 pglen:8;
563 u32 pagebytes:16; 604 u32 pagebytes:16;
@@ -567,7 +608,7 @@ struct fc_prli_s{
567/* 608/*
568 * PRLO logout params page 609 * PRLO logout params page
569 */ 610 */
570struct fc_prlo_params_page_s{ 611struct fc_prlo_params_page_s {
571 u32 type:8; 612 u32 type:8;
572 u32 type_ext:8; 613 u32 type_ext:8;
573#ifdef __BIGENDIAN 614#ifdef __BIGENDIAN
@@ -592,17 +633,17 @@ struct fc_prlo_params_page_s{
592/* 633/*
593 * PRLO els command payload 634 * PRLO els command payload
594 */ 635 */
595struct fc_prlo_s{ 636struct fc_prlo_s {
596 u32 command:8; 637 u32 command:8;
597 u32 page_len:8; 638 u32 page_len:8;
598 u32 payload_len:16; 639 u32 payload_len:16;
599 struct fc_prlo_params_page_s prlo_params[1]; 640 struct fc_prlo_params_page_s prlo_params[1];
600}; 641};
601 642
602/* 643/*
603 * PRLO Logout response parameter page 644 * PRLO Logout response parameter page
604 */ 645 */
605struct fc_prlo_acc_params_page_s{ 646struct fc_prlo_acc_params_page_s {
606 u32 type:8; 647 u32 type:8;
607 u32 type_ext:8; 648 u32 type_ext:8;
608 649
@@ -628,7 +669,7 @@ struct fc_prlo_acc_params_page_s{
628/* 669/*
629 * PRLO els command ACC payload 670 * PRLO els command ACC payload
630 */ 671 */
631struct fc_prlo_acc_s{ 672struct fc_prlo_acc_s {
632 u32 command:8; 673 u32 command:8;
633 u32 page_len:8; 674 u32 page_len:8;
634 u32 payload_len:16; 675 u32 payload_len:16;
@@ -650,7 +691,7 @@ enum {
650 FC_VU_SCR_REG_FUNC_FABRIC_NAME_CHANGE = 0x01 691 FC_VU_SCR_REG_FUNC_FABRIC_NAME_CHANGE = 0x01
651}; 692};
652 693
653struct fc_scr_s{ 694struct fc_scr_s {
654 u32 command:8; 695 u32 command:8;
655 u32 res:24; 696 u32 res:24;
656 u32 vu_reg_func:8; /* Vendor Unique Registrations */ 697 u32 vu_reg_func:8; /* Vendor Unique Registrations */
@@ -674,7 +715,7 @@ enum {
674 * LS_RJT els reply payload 715 * LS_RJT els reply payload
675 */ 716 */
676struct fc_ls_rjt_s { 717struct fc_ls_rjt_s {
677 struct fc_els_cmd_s els_cmd; /* ELS command code */ 718 struct fc_els_cmd_s els_cmd; /* ELS command code */
678 u32 res1:8; 719 u32 res1:8;
679 u32 reason_code:8; /* Reason code for reject */ 720 u32 reason_code:8; /* Reason code for reject */
680 u32 reason_code_expl:8; /* Reason code explanation */ 721 u32 reason_code_expl:8; /* Reason code explanation */
@@ -722,8 +763,8 @@ enum {
722/* 763/*
723 * RRQ els command payload 764 * RRQ els command payload
724 */ 765 */
725struct fc_rrq_s{ 766struct fc_rrq_s {
726 struct fc_els_cmd_s els_cmd; /* ELS command code */ 767 struct fc_els_cmd_s els_cmd; /* ELS command code */
727 u32 res1:8; 768 u32 res1:8;
728 u32 s_id:24; /* exchange originator S_ID */ 769 u32 s_id:24; /* exchange originator S_ID */
729 770
@@ -736,7 +777,7 @@ struct fc_rrq_s{
736/* 777/*
737 * ABTS BA_ACC reply payload 778 * ABTS BA_ACC reply payload
738 */ 779 */
739struct fc_ba_acc_s{ 780struct fc_ba_acc_s {
740 u32 seq_id_valid:8; /* set to 0x00 for Abort Exchange */ 781 u32 seq_id_valid:8; /* set to 0x00 for Abort Exchange */
741 u32 seq_id:8; /* invalid for Abort Exchange */ 782 u32 seq_id:8; /* invalid for Abort Exchange */
742 u32 res2:16; 783 u32 res2:16;
@@ -749,7 +790,7 @@ struct fc_ba_acc_s{
749/* 790/*
750 * ABTS BA_RJT reject payload 791 * ABTS BA_RJT reject payload
751 */ 792 */
752struct fc_ba_rjt_s{ 793struct fc_ba_rjt_s {
753 u32 res1:8; /* Reserved */ 794 u32 res1:8; /* Reserved */
754 u32 reason_code:8; /* reason code for reject */ 795 u32 reason_code:8; /* reason code for reject */
755 u32 reason_expl:8; /* reason code explanation */ 796 u32 reason_expl:8; /* reason code explanation */
@@ -759,9 +800,9 @@ struct fc_ba_rjt_s{
759/* 800/*
760 * TPRLO logout parameter page 801 * TPRLO logout parameter page
761 */ 802 */
762struct fc_tprlo_params_page_s{ 803struct fc_tprlo_params_page_s {
763 u32 type:8; 804u32 type:8;
764 u32 type_ext:8; 805u32 type_ext:8;
765 806
766#ifdef __BIGENDIAN 807#ifdef __BIGENDIAN
767 u32 opa_valid:1; 808 u32 opa_valid:1;
@@ -787,7 +828,7 @@ struct fc_tprlo_params_page_s{
787/* 828/*
788 * TPRLO ELS command payload 829 * TPRLO ELS command payload
789 */ 830 */
790struct fc_tprlo_s{ 831struct fc_tprlo_s {
791 u32 command:8; 832 u32 command:8;
792 u32 page_len:8; 833 u32 page_len:8;
793 u32 payload_len:16; 834 u32 payload_len:16;
@@ -795,7 +836,7 @@ struct fc_tprlo_s{
795 struct fc_tprlo_params_page_s tprlo_params[1]; 836 struct fc_tprlo_params_page_s tprlo_params[1];
796}; 837};
797 838
798enum fc_tprlo_type{ 839enum fc_tprlo_type {
799 FC_GLOBAL_LOGO = 1, 840 FC_GLOBAL_LOGO = 1,
800 FC_TPR_LOGO 841 FC_TPR_LOGO
801}; 842};
@@ -803,7 +844,7 @@ enum fc_tprlo_type{
803/* 844/*
804 * TPRLO els command ACC payload 845 * TPRLO els command ACC payload
805 */ 846 */
806struct fc_tprlo_acc_s{ 847struct fc_tprlo_acc_s {
807 u32 command:8; 848 u32 command:8;
808 u32 page_len:8; 849 u32 page_len:8;
809 u32 payload_len:16; 850 u32 payload_len:16;
@@ -815,21 +856,21 @@ struct fc_tprlo_acc_s{
815 */ 856 */
816#define FC_RSCN_PGLEN 0x4 857#define FC_RSCN_PGLEN 0x4
817 858
818enum fc_rscn_format{ 859enum fc_rscn_format {
819 FC_RSCN_FORMAT_PORTID = 0x0, 860 FC_RSCN_FORMAT_PORTID = 0x0,
820 FC_RSCN_FORMAT_AREA = 0x1, 861 FC_RSCN_FORMAT_AREA = 0x1,
821 FC_RSCN_FORMAT_DOMAIN = 0x2, 862 FC_RSCN_FORMAT_DOMAIN = 0x2,
822 FC_RSCN_FORMAT_FABRIC = 0x3, 863 FC_RSCN_FORMAT_FABRIC = 0x3,
823}; 864};
824 865
825struct fc_rscn_event_s{ 866struct fc_rscn_event_s {
826 u32 format:2; 867 u32 format:2;
827 u32 qualifier:4; 868 u32 qualifier:4;
828 u32 resvd:2; 869 u32 resvd:2;
829 u32 portid:24; 870 u32 portid:24;
830}; 871};
831 872
832struct fc_rscn_pl_s{ 873struct fc_rscn_pl_s {
833 u8 command; 874 u8 command;
834 u8 pagelen; 875 u8 pagelen;
835 u16 payldlen; 876 u16 payldlen;
@@ -840,18 +881,18 @@ struct fc_rscn_pl_s{
840 * ECHO els command req payload 881 * ECHO els command req payload
841 */ 882 */
842struct fc_echo_s { 883struct fc_echo_s {
843 struct fc_els_cmd_s els_cmd; 884 struct fc_els_cmd_s els_cmd;
844}; 885};
845 886
846/* 887/*
847 * RNID els command 888 * RNID els command
848 */ 889 */
849 890
850#define RNID_NODEID_DATA_FORMAT_COMMON 0x00 891#define RNID_NODEID_DATA_FORMAT_COMMON 0x00
851#define RNID_NODEID_DATA_FORMAT_FCP3 0x08 892#define RNID_NODEID_DATA_FORMAT_FCP3 0x08
852#define RNID_NODEID_DATA_FORMAT_DISCOVERY 0xDF 893#define RNID_NODEID_DATA_FORMAT_DISCOVERY 0xDF
853 894
854#define RNID_ASSOCIATED_TYPE_UNKNOWN 0x00000001 895#define RNID_ASSOCIATED_TYPE_UNKNOWN 0x00000001
855#define RNID_ASSOCIATED_TYPE_OTHER 0x00000002 896#define RNID_ASSOCIATED_TYPE_OTHER 0x00000002
856#define RNID_ASSOCIATED_TYPE_HUB 0x00000003 897#define RNID_ASSOCIATED_TYPE_HUB 0x00000003
857#define RNID_ASSOCIATED_TYPE_SWITCH 0x00000004 898#define RNID_ASSOCIATED_TYPE_SWITCH 0x00000004
@@ -868,8 +909,8 @@ struct fc_echo_s {
868/* 909/*
869 * RNID els command payload 910 * RNID els command payload
870 */ 911 */
871struct fc_rnid_cmd_s{ 912struct fc_rnid_cmd_s {
872 struct fc_els_cmd_s els_cmd; 913 struct fc_els_cmd_s els_cmd;
873 u32 node_id_data_format:8; 914 u32 node_id_data_format:8;
874 u32 reserved:24; 915 u32 reserved:24;
875}; 916};
@@ -878,12 +919,12 @@ struct fc_rnid_cmd_s{
878 * RNID els response payload 919 * RNID els response payload
879 */ 920 */
880 921
881struct fc_rnid_common_id_data_s{ 922struct fc_rnid_common_id_data_s {
882 wwn_t port_name; 923 wwn_t port_name;
883 wwn_t node_name; 924 wwn_t node_name;
884}; 925};
885 926
886struct fc_rnid_general_topology_data_s{ 927struct fc_rnid_general_topology_data_s {
887 u32 vendor_unique[4]; 928 u32 vendor_unique[4];
888 u32 asso_type; 929 u32 asso_type;
889 u32 phy_port_num; 930 u32 phy_port_num;
@@ -896,8 +937,8 @@ struct fc_rnid_general_topology_data_s{
896 u32 vendor_specific:16; 937 u32 vendor_specific:16;
897}; 938};
898 939
899struct fc_rnid_acc_s{ 940struct fc_rnid_acc_s {
900 struct fc_els_cmd_s els_cmd; 941 struct fc_els_cmd_s els_cmd;
901 u32 node_id_data_format:8; 942 u32 node_id_data_format:8;
902 u32 common_id_data_length:8; 943 u32 common_id_data_length:8;
903 u32 reserved:8; 944 u32 reserved:8;
@@ -920,7 +961,7 @@ struct fc_rnid_acc_s{
920#define RNID_ASSOCIATED_TYPE_VIRTUALIZATION_DEVICE 0x00000003 961#define RNID_ASSOCIATED_TYPE_VIRTUALIZATION_DEVICE 0x00000003
921#define RNID_ASSOCIATED_TYPE_MULTI_FUNCTION_DEVICE 0x000000FF 962#define RNID_ASSOCIATED_TYPE_MULTI_FUNCTION_DEVICE 0x000000FF
922 963
923enum fc_rpsc_speed_cap{ 964enum fc_rpsc_speed_cap {
924 RPSC_SPEED_CAP_1G = 0x8000, 965 RPSC_SPEED_CAP_1G = 0x8000,
925 RPSC_SPEED_CAP_2G = 0x4000, 966 RPSC_SPEED_CAP_2G = 0x4000,
926 RPSC_SPEED_CAP_4G = 0x2000, 967 RPSC_SPEED_CAP_4G = 0x2000,
@@ -931,7 +972,7 @@ enum fc_rpsc_speed_cap{
931 RPSC_SPEED_CAP_UNKNOWN = 0x0001, 972 RPSC_SPEED_CAP_UNKNOWN = 0x0001,
932}; 973};
933 974
934enum fc_rpsc_op_speed_s{ 975enum fc_rpsc_op_speed {
935 RPSC_OP_SPEED_1G = 0x8000, 976 RPSC_OP_SPEED_1G = 0x8000,
936 RPSC_OP_SPEED_2G = 0x4000, 977 RPSC_OP_SPEED_2G = 0x4000,
937 RPSC_OP_SPEED_4G = 0x2000, 978 RPSC_OP_SPEED_4G = 0x2000,
@@ -942,24 +983,24 @@ enum fc_rpsc_op_speed_s{
942 RPSC_OP_SPEED_NOT_EST = 0x0001, /*! speed not established */ 983 RPSC_OP_SPEED_NOT_EST = 0x0001, /*! speed not established */
943}; 984};
944 985
945struct fc_rpsc_speed_info_s{ 986struct fc_rpsc_speed_info_s {
946 u16 port_speed_cap; /*! see fc_rpsc_speed_cap_t */ 987 u16 port_speed_cap; /*! see enum fc_rpsc_speed_cap */
947 u16 port_op_speed; /*! see fc_rpsc_op_speed_t */ 988 u16 port_op_speed; /*! see enum fc_rpsc_op_speed */
948}; 989};
949 990
950enum link_e2e_beacon_subcmd{ 991enum link_e2e_beacon_subcmd {
951 LINK_E2E_BEACON_ON = 1, 992 LINK_E2E_BEACON_ON = 1,
952 LINK_E2E_BEACON_OFF = 2 993 LINK_E2E_BEACON_OFF = 2
953}; 994};
954 995
955enum beacon_type{ 996enum beacon_type {
956 BEACON_TYPE_NORMAL = 1, /*! Normal Beaconing. Green */ 997 BEACON_TYPE_NORMAL = 1, /*! Normal Beaconing. Green */
957 BEACON_TYPE_WARN = 2, /*! Warning Beaconing. Yellow/Amber */ 998 BEACON_TYPE_WARN = 2, /*! Warning Beaconing. Yellow/Amber */
958 BEACON_TYPE_CRITICAL = 3 /*! Critical Beaconing. Red */ 999 BEACON_TYPE_CRITICAL = 3 /*! Critical Beaconing. Red */
959}; 1000};
960 1001
961struct link_e2e_beacon_param_s { 1002struct link_e2e_beacon_param_s {
962 u8 beacon_type; /* Beacon Type. See beacon_type_t */ 1003 u8 beacon_type; /* Beacon Type. See enum beacon_type */
963 u8 beacon_frequency; 1004 u8 beacon_frequency;
964 /* Beacon frequency. Number of blinks 1005 /* Beacon frequency. Number of blinks
965 * per 10 seconds 1006 * per 10 seconds
@@ -978,12 +1019,13 @@ struct link_e2e_beacon_param_s {
978}; 1019};
979 1020
980/* 1021/*
981 * Link E2E beacon request/good response format. For LS_RJTs use fc_ls_rjt_t 1022 * Link E2E beacon request/good response format.
1023 * For LS_RJTs use struct fc_ls_rjt_s
982 */ 1024 */
983struct link_e2e_beacon_req_s{ 1025struct link_e2e_beacon_req_s {
984 u32 ls_code; /*! FC_ELS_E2E_LBEACON in requests * 1026 u32 ls_code; /*! FC_ELS_E2E_LBEACON in requests *
985 *or FC_ELS_ACC in good replies */ 1027 *or FC_ELS_ACC in good replies */
986 u32 ls_sub_cmd; /*! See link_e2e_beacon_subcmd_t */ 1028 u32 ls_sub_cmd; /*! See enum link_e2e_beacon_subcmd */
987 struct link_e2e_beacon_param_s beacon_parm; 1029 struct link_e2e_beacon_param_s beacon_parm;
988}; 1030};
989 1031
@@ -992,14 +1034,14 @@ struct link_e2e_beacon_req_s{
992 * all the ports within that domain (TODO - I don't think FOS implements 1034 * all the ports within that domain (TODO - I don't think FOS implements
993 * this...). 1035 * this...).
994 */ 1036 */
995struct fc_rpsc_cmd_s{ 1037struct fc_rpsc_cmd_s {
996 struct fc_els_cmd_s els_cmd; 1038 struct fc_els_cmd_s els_cmd;
997}; 1039};
998 1040
999/* 1041/*
1000 * RPSC Acc 1042 * RPSC Acc
1001 */ 1043 */
1002struct fc_rpsc_acc_s{ 1044struct fc_rpsc_acc_s {
1003 u32 command:8; 1045 u32 command:8;
1004 u32 rsvd:8; 1046 u32 rsvd:8;
1005 u32 num_entries:16; 1047 u32 num_entries:16;
@@ -1012,51 +1054,50 @@ struct fc_rpsc_acc_s{
1012 */ 1054 */
1013#define FC_BRCD_TOKEN 0x42524344 1055#define FC_BRCD_TOKEN 0x42524344
1014 1056
1015struct fc_rpsc2_cmd_s{ 1057struct fc_rpsc2_cmd_s {
1016 struct fc_els_cmd_s els_cmd; 1058 struct fc_els_cmd_s els_cmd;
1017 u32 token; 1059 u32 token;
1018 u16 resvd; 1060 u16 resvd;
1019 u16 num_pids; /* Number of pids in the request */ 1061 u16 num_pids; /* Number of pids in the request */
1020 struct { 1062 struct {
1021 u32 rsvd1:8; 1063 u32 rsvd1:8;
1022 u32 pid:24; /* port identifier */ 1064 u32 pid:24; /* port identifier */
1023 } pid_list[1]; 1065 } pid_list[1];
1024}; 1066};
1025 1067
1026enum fc_rpsc2_port_type{ 1068enum fc_rpsc2_port_type {
1027 RPSC2_PORT_TYPE_UNKNOWN = 0, 1069 RPSC2_PORT_TYPE_UNKNOWN = 0,
1028 RPSC2_PORT_TYPE_NPORT = 1, 1070 RPSC2_PORT_TYPE_NPORT = 1,
1029 RPSC2_PORT_TYPE_NLPORT = 2, 1071 RPSC2_PORT_TYPE_NLPORT = 2,
1030 RPSC2_PORT_TYPE_NPIV_PORT = 0x5f, 1072 RPSC2_PORT_TYPE_NPIV_PORT = 0x5f,
1031 RPSC2_PORT_TYPE_NPORT_TRUNK = 0x6f, 1073 RPSC2_PORT_TYPE_NPORT_TRUNK = 0x6f,
1032}; 1074};
1033
1034/* 1075/*
1035 * RPSC2 portInfo entry structure 1076 * RPSC2 portInfo entry structure
1036 */ 1077 */
1037struct fc_rpsc2_port_info_s{ 1078struct fc_rpsc2_port_info_s {
1038 u32 pid; /* PID */ 1079 u32 pid; /* PID */
1039 u16 resvd1; 1080 u16 resvd1;
1040 u16 index; /* port number / index */ 1081 u16 index; /* port number / index */
1041 u8 resvd2; 1082 u8 resvd2;
1042 u8 type; /* port type N/NL/... */ 1083 u8 type; /* port type N/NL/... */
1043 u16 speed; /* port Operating Speed */ 1084 u16 speed; /* port Operating Speed */
1044}; 1085};
1045 1086
1046/* 1087/*
1047 * RPSC2 Accept payload 1088 * RPSC2 Accept payload
1048 */ 1089 */
1049struct fc_rpsc2_acc_s{ 1090struct fc_rpsc2_acc_s {
1050 u8 els_cmd; 1091 u8 els_cmd;
1051 u8 resvd; 1092 u8 resvd;
1052 u16 num_pids; /* Number of pids in the request */ 1093 u16 num_pids; /* Number of pids in the request */
1053 struct fc_rpsc2_port_info_s port_info[1]; /* port information */ 1094 struct fc_rpsc2_port_info_s port_info[1]; /* port information */
1054}; 1095};
1055 1096
1056/** 1097/**
1057 * bit fields so that multiple classes can be specified 1098 * bit fields so that multiple classes can be specified
1058 */ 1099 */
1059enum fc_cos{ 1100enum fc_cos {
1060 FC_CLASS_2 = 0x04, 1101 FC_CLASS_2 = 0x04,
1061 FC_CLASS_3 = 0x08, 1102 FC_CLASS_3 = 0x08,
1062 FC_CLASS_2_3 = 0x0C, 1103 FC_CLASS_2_3 = 0x0C,
@@ -1065,11 +1106,11 @@ enum fc_cos{
1065/* 1106/*
1066 * symbolic name 1107 * symbolic name
1067 */ 1108 */
1068struct fc_symname_s{ 1109struct fc_symname_s {
1069 u8 symname[FC_SYMNAME_MAX]; 1110 u8 symname[FC_SYMNAME_MAX];
1070}; 1111};
1071 1112
1072struct fc_alpabm_s{ 1113struct fc_alpabm_s {
1073 u8 alpa_bm[FC_ALPA_MAX / 8]; 1114 u8 alpa_bm[FC_ALPA_MAX / 8];
1074}; 1115};
1075 1116
@@ -1094,7 +1135,7 @@ struct fc_alpabm_s{
1094 * Virtual Fabric Tagging header format 1135 * Virtual Fabric Tagging header format
1095 * @caution This is defined only in BIG ENDIAN format. 1136 * @caution This is defined only in BIG ENDIAN format.
1096 */ 1137 */
1097struct fc_vft_s{ 1138struct fc_vft_s {
1098 u32 r_ctl:8; 1139 u32 r_ctl:8;
1099 u32 ver:2; 1140 u32 ver:2;
1100 u32 type:4; 1141 u32 type:4;
@@ -1106,6 +1147,770 @@ struct fc_vft_s{
1106 u32 res_c:24; 1147 u32 res_c:24;
1107}; 1148};
1108 1149
1109#pragma pack() 1150/*
1151 * FCP
1152 */
1153enum {
1154 FCP_RJT = 0x01000000, /* SRR reject */
1155 FCP_SRR_ACCEPT = 0x02000000, /* SRR accept */
1156 FCP_SRR = 0x14000000, /* Sequence Retransmission Request */
1157};
1158
1159/*
1160 * SRR FC-4 LS payload
1161 */
1162struct fc_srr_s {
1163 u32 ls_cmd;
1164 u32 ox_id:16; /* ox-id */
1165 u32 rx_id:16; /* rx-id */
1166 u32 ro; /* relative offset */
1167 u32 r_ctl:8; /* R_CTL for I.U. */
1168 u32 res:24;
1169};
1170
1171
1172/*
1173 * FCP_CMND definitions
1174 */
1175#define FCP_CMND_CDB_LEN 16
1176#define FCP_CMND_LUN_LEN 8
1177
1178struct fcp_cmnd_s {
1179 lun_t lun; /* 64-bit LU number */
1180 u8 crn; /* command reference number */
1181#ifdef __BIGENDIAN
1182 u8 resvd:1,
1183 priority:4, /* FCP-3: SAM-3 priority */
1184 taskattr:3; /* scsi task attribute */
1185#else
1186 u8 taskattr:3, /* scsi task attribute */
1187 priority:4, /* FCP-3: SAM-3 priority */
1188 resvd:1;
1189#endif
1190 u8 tm_flags; /* task management flags */
1191#ifdef __BIGENDIAN
1192 u8 addl_cdb_len:6, /* additional CDB length words */
1193 iodir:2; /* read/write FCP_DATA IUs */
1194#else
1195 u8 iodir:2, /* read/write FCP_DATA IUs */
1196 addl_cdb_len:6; /* additional CDB length */
1197#endif
1198 scsi_cdb_t cdb;
1199
1200 /*
1201 * !!! additional cdb bytes follows here!!!
1202 */
1203 u32 fcp_dl; /* bytes to be transferred */
1204};
1205
1206#define fcp_cmnd_cdb_len(_cmnd) ((_cmnd)->addl_cdb_len * 4 + FCP_CMND_CDB_LEN)
1207#define fcp_cmnd_fcpdl(_cmnd) ((&(_cmnd)->fcp_dl)[(_cmnd)->addl_cdb_len])
1110 1208
1209/*
1210 * struct fcp_cmnd_s .iodir field values
1211 */
1212enum fcp_iodir {
1213 FCP_IODIR_NONE = 0,
1214 FCP_IODIR_WRITE = 1,
1215 FCP_IODIR_READ = 2,
1216 FCP_IODIR_RW = 3,
1217};
1218
1219/*
1220 * Task attribute field
1221 */
1222enum {
1223 FCP_TASK_ATTR_SIMPLE = 0,
1224 FCP_TASK_ATTR_HOQ = 1,
1225 FCP_TASK_ATTR_ORDERED = 2,
1226 FCP_TASK_ATTR_ACA = 4,
1227 FCP_TASK_ATTR_UNTAGGED = 5, /* obsolete in FCP-3 */
1228};
1229
1230/*
1231 * Task management flags field - only one bit shall be set
1232 */
1233enum fcp_tm_cmnd {
1234 FCP_TM_ABORT_TASK_SET = BIT(1),
1235 FCP_TM_CLEAR_TASK_SET = BIT(2),
1236 FCP_TM_LUN_RESET = BIT(4),
1237 FCP_TM_TARGET_RESET = BIT(5), /* obsolete in FCP-3 */
1238 FCP_TM_CLEAR_ACA = BIT(6),
1239};
1240
1241/*
1242 * FCP_XFER_RDY IU defines
1243 */
1244struct fcp_xfer_rdy_s {
1245 u32 data_ro;
1246 u32 burst_len;
1247 u32 reserved;
1248};
1249
1250/*
1251 * FCP_RSP residue flags
1252 */
1253enum fcp_residue {
1254 FCP_NO_RESIDUE = 0, /* no residue */
1255 FCP_RESID_OVER = 1, /* more data left that was not sent */
1256 FCP_RESID_UNDER = 2, /* less data than requested */
1257};
1258
1259enum {
1260 FCP_RSPINFO_GOOD = 0,
1261 FCP_RSPINFO_DATALEN_MISMATCH = 1,
1262 FCP_RSPINFO_CMND_INVALID = 2,
1263 FCP_RSPINFO_ROLEN_MISMATCH = 3,
1264 FCP_RSPINFO_TM_NOT_SUPP = 4,
1265 FCP_RSPINFO_TM_FAILED = 5,
1266};
1267
1268struct fcp_rspinfo_s {
1269 u32 res0:24;
1270 u32 rsp_code:8; /* response code (as above) */
1271 u32 res1;
1272};
1273
1274struct fcp_resp_s {
1275 u32 reserved[2]; /* 2 words reserved */
1276 u16 reserved2;
1277#ifdef __BIGENDIAN
1278 u8 reserved3:3;
1279 u8 fcp_conf_req:1; /* FCP_CONF is requested */
1280 u8 resid_flags:2; /* underflow/overflow */
1281 u8 sns_len_valid:1;/* sense len is valid */
1282 u8 rsp_len_valid:1;/* response len is valid */
1283#else
1284 u8 rsp_len_valid:1;/* response len is valid */
1285 u8 sns_len_valid:1;/* sense len is valid */
1286 u8 resid_flags:2; /* underflow/overflow */
1287 u8 fcp_conf_req:1; /* FCP_CONF is requested */
1288 u8 reserved3:3;
1111#endif 1289#endif
1290 u8 scsi_status; /* one byte SCSI status */
1291 u32 residue; /* residual data bytes */
1292 u32 sns_len; /* length od sense info */
1293 u32 rsp_len; /* length of response info */
1294};
1295
1296#define fcp_snslen(__fcprsp) ((__fcprsp)->sns_len_valid ? \
1297 (__fcprsp)->sns_len : 0)
1298#define fcp_rsplen(__fcprsp) ((__fcprsp)->rsp_len_valid ? \
1299 (__fcprsp)->rsp_len : 0)
1300#define fcp_rspinfo(__fcprsp) ((struct fcp_rspinfo_s *)((__fcprsp) + 1))
1301#define fcp_snsinfo(__fcprsp) (((u8 *)fcp_rspinfo(__fcprsp)) + \
1302 fcp_rsplen(__fcprsp))
1303
1304struct fcp_cmnd_fr_s {
1305 struct fchs_s fchs;
1306 struct fcp_cmnd_s fcp;
1307};
1308
1309/*
1310 * CT
1311 */
1312struct ct_hdr_s {
1313 u32 rev_id:8; /* Revision of the CT */
1314 u32 in_id:24; /* Initiator Id */
1315 u32 gs_type:8; /* Generic service Type */
1316 u32 gs_sub_type:8; /* Generic service sub type */
1317 u32 options:8; /* options */
1318 u32 rsvrd:8; /* reserved */
1319 u32 cmd_rsp_code:16;/* ct command/response code */
1320 u32 max_res_size:16;/* maximum/residual size */
1321 u32 frag_id:8; /* fragment ID */
1322 u32 reason_code:8; /* reason code */
1323 u32 exp_code:8; /* explanation code */
1324 u32 vendor_unq:8; /* vendor unique */
1325};
1326
1327/*
1328 * defines for the Revision
1329 */
1330enum {
1331 CT_GS3_REVISION = 0x01,
1332};
1333
1334/*
1335 * defines for gs_type
1336 */
1337enum {
1338 CT_GSTYPE_KEYSERVICE = 0xF7,
1339 CT_GSTYPE_ALIASSERVICE = 0xF8,
1340 CT_GSTYPE_MGMTSERVICE = 0xFA,
1341 CT_GSTYPE_TIMESERVICE = 0xFB,
1342 CT_GSTYPE_DIRSERVICE = 0xFC,
1343};
1344
1345/*
1346 * defines for gs_sub_type for gs type directory service
1347 */
1348enum {
1349 CT_GSSUBTYPE_NAMESERVER = 0x02,
1350};
1351
1352/*
1353 * defines for gs_sub_type for gs type management service
1354 */
1355enum {
1356 CT_GSSUBTYPE_CFGSERVER = 0x01,
1357 CT_GSSUBTYPE_UNZONED_NS = 0x02,
1358 CT_GSSUBTYPE_ZONESERVER = 0x03,
1359 CT_GSSUBTYPE_LOCKSERVER = 0x04,
1360 CT_GSSUBTYPE_HBA_MGMTSERVER = 0x10, /* for FDMI */
1361};
1362
1363/*
1364 * defines for CT response code field
1365 */
1366enum {
1367 CT_RSP_REJECT = 0x8001,
1368 CT_RSP_ACCEPT = 0x8002,
1369};
1370
1371/*
1372 * defintions for CT reason code
1373 */
1374enum {
1375 CT_RSN_INV_CMD = 0x01,
1376 CT_RSN_INV_VER = 0x02,
1377 CT_RSN_LOGIC_ERR = 0x03,
1378 CT_RSN_INV_SIZE = 0x04,
1379 CT_RSN_LOGICAL_BUSY = 0x05,
1380 CT_RSN_PROTO_ERR = 0x07,
1381 CT_RSN_UNABLE_TO_PERF = 0x09,
1382 CT_RSN_NOT_SUPP = 0x0B,
1383 CT_RSN_SERVER_NOT_AVBL = 0x0D,
1384 CT_RSN_SESSION_COULD_NOT_BE_ESTBD = 0x0E,
1385 CT_RSN_VENDOR_SPECIFIC = 0xFF,
1386
1387};
1388
1389/*
1390 * definitions for explanations code for Name server
1391 */
1392enum {
1393 CT_NS_EXP_NOADDITIONAL = 0x00,
1394 CT_NS_EXP_ID_NOT_REG = 0x01,
1395 CT_NS_EXP_PN_NOT_REG = 0x02,
1396 CT_NS_EXP_NN_NOT_REG = 0x03,
1397 CT_NS_EXP_CS_NOT_REG = 0x04,
1398 CT_NS_EXP_IPN_NOT_REG = 0x05,
1399 CT_NS_EXP_IPA_NOT_REG = 0x06,
1400 CT_NS_EXP_FT_NOT_REG = 0x07,
1401 CT_NS_EXP_SPN_NOT_REG = 0x08,
1402 CT_NS_EXP_SNN_NOT_REG = 0x09,
1403 CT_NS_EXP_PT_NOT_REG = 0x0A,
1404 CT_NS_EXP_IPP_NOT_REG = 0x0B,
1405 CT_NS_EXP_FPN_NOT_REG = 0x0C,
1406 CT_NS_EXP_HA_NOT_REG = 0x0D,
1407 CT_NS_EXP_FD_NOT_REG = 0x0E,
1408 CT_NS_EXP_FF_NOT_REG = 0x0F,
1409 CT_NS_EXP_ACCESSDENIED = 0x10,
1410 CT_NS_EXP_UNACCEPTABLE_ID = 0x11,
1411 CT_NS_EXP_DATABASEEMPTY = 0x12,
1412 CT_NS_EXP_NOT_REG_IN_SCOPE = 0x13,
1413 CT_NS_EXP_DOM_ID_NOT_PRESENT = 0x14,
1414 CT_NS_EXP_PORT_NUM_NOT_PRESENT = 0x15,
1415 CT_NS_EXP_NO_DEVICE_ATTACHED = 0x16
1416};
1417
1418/*
1419 * defintions for the explanation code for all servers
1420 */
1421enum {
1422 CT_EXP_AUTH_EXCEPTION = 0xF1,
1423 CT_EXP_DB_FULL = 0xF2,
1424 CT_EXP_DB_EMPTY = 0xF3,
1425 CT_EXP_PROCESSING_REQ = 0xF4,
1426 CT_EXP_UNABLE_TO_VERIFY_CONN = 0xF5,
1427 CT_EXP_DEVICES_NOT_IN_CMN_ZONE = 0xF6
1428};
1429
1430/*
1431 * Command codes for Name server
1432 */
1433enum {
1434 GS_GID_PN = 0x0121, /* Get Id on port name */
1435 GS_GPN_ID = 0x0112, /* Get port name on ID */
1436 GS_GNN_ID = 0x0113, /* Get node name on ID */
1437 GS_GID_FT = 0x0171, /* Get Id on FC4 type */
1438 GS_GSPN_ID = 0x0118, /* Get symbolic PN on ID */
1439 GS_RFT_ID = 0x0217, /* Register fc4type on ID */
1440 GS_RSPN_ID = 0x0218, /* Register symbolic PN on ID */
1441 GS_RPN_ID = 0x0212, /* Register port name */
1442 GS_RNN_ID = 0x0213, /* Register node name */
1443 GS_RCS_ID = 0x0214, /* Register class of service */
1444 GS_RPT_ID = 0x021A, /* Register port type */
1445 GS_GA_NXT = 0x0100, /* Get all next */
1446 GS_RFF_ID = 0x021F, /* Register FC4 Feature */
1447};
1448
1449struct fcgs_id_req_s{
1450 u32 rsvd:8;
1451 u32 dap:24; /* port identifier */
1452};
1453#define fcgs_gpnid_req_t struct fcgs_id_req_s
1454#define fcgs_gnnid_req_t struct fcgs_id_req_s
1455#define fcgs_gspnid_req_t struct fcgs_id_req_s
1456
1457struct fcgs_gidpn_req_s {
1458 wwn_t port_name; /* port wwn */
1459};
1460
1461struct fcgs_gidpn_resp_s {
1462 u32 rsvd:8;
1463 u32 dap:24; /* port identifier */
1464};
1465
1466/**
1467 * RFT_ID
1468 */
1469struct fcgs_rftid_req_s {
1470 u32 rsvd:8;
1471 u32 dap:24; /* port identifier */
1472 u32 fc4_type[8]; /* fc4 types */
1473};
1474
1475/**
1476 * RFF_ID : Register FC4 features.
1477 */
1478
1479#define FC_GS_FCP_FC4_FEATURE_INITIATOR 0x02
1480#define FC_GS_FCP_FC4_FEATURE_TARGET 0x01
1481
1482struct fcgs_rffid_req_s {
1483 u32 rsvd:8;
1484 u32 dap:24; /* port identifier */
1485 u32 rsvd1:16;
1486 u32 fc4ftr_bits:8; /* fc4 feature bits */
1487 u32 fc4_type:8; /* corresponding FC4 Type */
1488};
1489
1490/**
1491 * GID_FT Request
1492 */
1493struct fcgs_gidft_req_s {
1494 u8 reserved;
1495 u8 domain_id; /* domain, 0 - all fabric */
1496 u8 area_id; /* area, 0 - whole domain */
1497 u8 fc4_type; /* FC_TYPE_FCP for SCSI devices */
1498}; /* GID_FT Request */
1499
1500/**
1501 * GID_FT Response
1502 */
1503struct fcgs_gidft_resp_s {
1504 u8 last:1; /* last port identifier flag */
1505 u8 reserved:7;
1506 u32 pid:24; /* port identifier */
1507}; /* GID_FT Response */
1508
1509/**
1510 * RSPN_ID
1511 */
1512struct fcgs_rspnid_req_s {
1513 u32 rsvd:8;
1514 u32 dap:24; /* port identifier */
1515 u8 spn_len; /* symbolic port name length */
1516 u8 spn[256]; /* symbolic port name */
1517};
1518
1519/**
1520 * RPN_ID
1521 */
1522struct fcgs_rpnid_req_s {
1523 u32 rsvd:8;
1524 u32 port_id:24;
1525 wwn_t port_name;
1526};
1527
1528/**
1529 * RNN_ID
1530 */
1531struct fcgs_rnnid_req_s {
1532 u32 rsvd:8;
1533 u32 port_id:24;
1534 wwn_t node_name;
1535};
1536
1537/**
1538 * RCS_ID
1539 */
1540struct fcgs_rcsid_req_s {
1541 u32 rsvd:8;
1542 u32 port_id:24;
1543 u32 cos;
1544};
1545
1546/**
1547 * RPT_ID
1548 */
1549struct fcgs_rptid_req_s {
1550 u32 rsvd:8;
1551 u32 port_id:24;
1552 u32 port_type:8;
1553 u32 rsvd1:24;
1554};
1555
1556/**
1557 * GA_NXT Request
1558 */
1559struct fcgs_ganxt_req_s {
1560 u32 rsvd:8;
1561 u32 port_id:24;
1562};
1563
1564/**
1565 * GA_NXT Response
1566 */
1567struct fcgs_ganxt_rsp_s {
1568 u32 port_type:8; /* Port Type */
1569 u32 port_id:24; /* Port Identifier */
1570 wwn_t port_name; /* Port Name */
1571 u8 spn_len; /* Length of Symbolic Port Name */
1572 char spn[255]; /* Symbolic Port Name */
1573 wwn_t node_name; /* Node Name */
1574 u8 snn_len; /* Length of Symbolic Node Name */
1575 char snn[255]; /* Symbolic Node Name */
1576 u8 ipa[8]; /* Initial Process Associator */
1577 u8 ip[16]; /* IP Address */
1578 u32 cos; /* Class of Service */
1579 u32 fc4types[8]; /* FC-4 TYPEs */
1580 wwn_t fabric_port_name;
1581 /* Fabric Port Name */
1582 u32 rsvd:8; /* Reserved */
1583 u32 hard_addr:24; /* Hard Address */
1584};
1585
1586/*
1587 * Fabric Config Server
1588 */
1589
1590/*
1591 * Command codes for Fabric Configuration Server
1592 */
1593enum {
1594 GS_FC_GFN_CMD = 0x0114, /* GS FC Get Fabric Name */
1595 GS_FC_GMAL_CMD = 0x0116, /* GS FC GMAL */
1596 GS_FC_TRACE_CMD = 0x0400, /* GS FC Trace Route */
1597 GS_FC_PING_CMD = 0x0401, /* GS FC Ping */
1598};
1599
1600/*
1601 * Source or Destination Port Tags.
1602 */
1603enum {
1604 GS_FTRACE_TAG_NPORT_ID = 1,
1605 GS_FTRACE_TAG_NPORT_NAME = 2,
1606};
1607
1608/*
1609* Port Value : Could be a Port id or wwn
1610 */
1611union fcgs_port_val_u {
1612 u32 nport_id;
1613 wwn_t nport_wwn;
1614};
1615
1616#define GS_FTRACE_MAX_HOP_COUNT 20
1617#define GS_FTRACE_REVISION 1
1618
1619/*
1620 * Ftrace Related Structures.
1621 */
1622
1623/*
1624 * STR (Switch Trace) Reject Reason Codes. From FC-SW.
1625 */
1626enum {
1627 GS_FTRACE_STR_CMD_COMPLETED_SUCC = 0,
1628 GS_FTRACE_STR_CMD_NOT_SUPP_IN_NEXT_SWITCH,
1629 GS_FTRACE_STR_NO_RESP_FROM_NEXT_SWITCH,
1630 GS_FTRACE_STR_MAX_HOP_CNT_REACHED,
1631 GS_FTRACE_STR_SRC_PORT_NOT_FOUND,
1632 GS_FTRACE_STR_DST_PORT_NOT_FOUND,
1633 GS_FTRACE_STR_DEVICES_NOT_IN_COMMON_ZONE,
1634 GS_FTRACE_STR_NO_ROUTE_BW_PORTS,
1635 GS_FTRACE_STR_NO_ADDL_EXPLN,
1636 GS_FTRACE_STR_FABRIC_BUSY,
1637 GS_FTRACE_STR_FABRIC_BUILD_IN_PROGRESS,
1638 GS_FTRACE_STR_VENDOR_SPECIFIC_ERR_START = 0xf0,
1639 GS_FTRACE_STR_VENDOR_SPECIFIC_ERR_END = 0xff,
1640};
1641
1642/*
1643 * Ftrace Request
1644 */
1645struct fcgs_ftrace_req_s {
1646 u32 revision;
1647 u16 src_port_tag; /* Source Port tag */
1648 u16 src_port_len; /* Source Port len */
1649 union fcgs_port_val_u src_port_val; /* Source Port value */
1650 u16 dst_port_tag; /* Destination Port tag */
1651 u16 dst_port_len; /* Destination Port len */
1652 union fcgs_port_val_u dst_port_val; /* Destination Port value */
1653 u32 token;
1654 u8 vendor_id[8]; /* T10 Vendor Identifier */
1655 u8 vendor_info[8]; /* Vendor specific Info */
1656 u32 max_hop_cnt; /* Max Hop Count */
1657};
1658
1659/*
1660 * Path info structure
1661 */
1662struct fcgs_ftrace_path_info_s {
1663 wwn_t switch_name; /* Switch WWN */
1664 u32 domain_id;
1665 wwn_t ingress_port_name; /* Ingress ports wwn */
1666 u32 ingress_phys_port_num; /* Ingress ports physical port
1667 * number
1668 */
1669 wwn_t egress_port_name; /* Ingress ports wwn */
1670 u32 egress_phys_port_num; /* Ingress ports physical port
1671 * number
1672 */
1673};
1674
1675/*
1676 * Ftrace Acc Response
1677 */
1678struct fcgs_ftrace_resp_s {
1679 u32 revision;
1680 u32 token;
1681 u8 vendor_id[8]; /* T10 Vendor Identifier */
1682 u8 vendor_info[8]; /* Vendor specific Info */
1683 u32 str_rej_reason_code; /* STR Reject Reason Code */
1684 u32 num_path_info_entries; /* No. of path info entries */
1685 /*
1686 * path info entry/entries.
1687 */
1688 struct fcgs_ftrace_path_info_s path_info[1];
1689
1690};
1691
1692/*
1693* Fabric Config Server : FCPing
1694 */
1695
1696/*
1697 * FC Ping Request
1698 */
1699struct fcgs_fcping_req_s {
1700 u32 revision;
1701 u16 port_tag;
1702 u16 port_len; /* Port len */
1703 union fcgs_port_val_u port_val; /* Port value */
1704 u32 token;
1705};
1706
1707/*
1708 * FC Ping Response
1709 */
1710struct fcgs_fcping_resp_s {
1711 u32 token;
1712};
1713
1714/*
1715 * Command codes for zone server query.
1716 */
1717enum {
1718 ZS_GZME = 0x0124, /* Get zone member extended */
1719};
1720
1721/*
1722 * ZS GZME request
1723 */
1724#define ZS_GZME_ZNAMELEN 32
1725struct zs_gzme_req_s {
1726 u8 znamelen;
1727 u8 rsvd[3];
1728 u8 zname[ZS_GZME_ZNAMELEN];
1729};
1730
1731enum zs_mbr_type {
1732 ZS_MBR_TYPE_PWWN = 1,
1733 ZS_MBR_TYPE_DOMPORT = 2,
1734 ZS_MBR_TYPE_PORTID = 3,
1735 ZS_MBR_TYPE_NWWN = 4,
1736};
1737
1738struct zs_mbr_wwn_s {
1739 u8 mbr_type;
1740 u8 rsvd[3];
1741 wwn_t wwn;
1742};
1743
1744struct zs_query_resp_s {
1745 u32 nmbrs; /* number of zone members */
1746 struct zs_mbr_wwn_s mbr[1];
1747};
1748
1749/*
1750 * GMAL Command ( Get ( interconnect Element) Management Address List)
1751 * To retrieve the IP Address of a Switch.
1752 */
1753
1754#define CT_GMAL_RESP_PREFIX_TELNET "telnet://"
1755#define CT_GMAL_RESP_PREFIX_HTTP "http://"
1756
1757/* GMAL/GFN request */
1758struct fcgs_req_s {
1759 wwn_t wwn; /* PWWN/NWWN */
1760};
1761
1762#define fcgs_gmal_req_t struct fcgs_req_s
1763#define fcgs_gfn_req_t struct fcgs_req_s
1764
1765/* Accept Response to GMAL */
1766struct fcgs_gmal_resp_s {
1767 u32 ms_len; /* Num of entries */
1768 u8 ms_ma[256];
1769};
1770
1771struct fcgs_gmal_entry_s {
1772 u8 len;
1773 u8 prefix[7]; /* like "http://" */
1774 u8 ip_addr[248];
1775};
1776
1777/*
1778 * FDMI
1779 */
1780/*
1781 * FDMI Command Codes
1782 */
1783#define FDMI_GRHL 0x0100
1784#define FDMI_GHAT 0x0101
1785#define FDMI_GRPL 0x0102
1786#define FDMI_GPAT 0x0110
1787#define FDMI_RHBA 0x0200
1788#define FDMI_RHAT 0x0201
1789#define FDMI_RPRT 0x0210
1790#define FDMI_RPA 0x0211
1791#define FDMI_DHBA 0x0300
1792#define FDMI_DPRT 0x0310
1793
1794/*
1795 * FDMI reason codes
1796 */
1797#define FDMI_NO_ADDITIONAL_EXP 0x00
1798#define FDMI_HBA_ALREADY_REG 0x10
1799#define FDMI_HBA_ATTRIB_NOT_REG 0x11
1800#define FDMI_HBA_ATTRIB_MULTIPLE 0x12
1801#define FDMI_HBA_ATTRIB_LENGTH_INVALID 0x13
1802#define FDMI_HBA_ATTRIB_NOT_PRESENT 0x14
1803#define FDMI_PORT_ORIG_NOT_IN_LIST 0x15
1804#define FDMI_PORT_HBA_NOT_IN_LIST 0x16
1805#define FDMI_PORT_ATTRIB_NOT_REG 0x20
1806#define FDMI_PORT_NOT_REG 0x21
1807#define FDMI_PORT_ATTRIB_MULTIPLE 0x22
1808#define FDMI_PORT_ATTRIB_LENGTH_INVALID 0x23
1809#define FDMI_PORT_ALREADY_REGISTEREED 0x24
1810
1811/*
1812 * FDMI Transmission Speed Mask values
1813 */
1814#define FDMI_TRANS_SPEED_1G 0x00000001
1815#define FDMI_TRANS_SPEED_2G 0x00000002
1816#define FDMI_TRANS_SPEED_10G 0x00000004
1817#define FDMI_TRANS_SPEED_4G 0x00000008
1818#define FDMI_TRANS_SPEED_8G 0x00000010
1819#define FDMI_TRANS_SPEED_16G 0x00000020
1820#define FDMI_TRANS_SPEED_UNKNOWN 0x00008000
1821
1822/*
1823 * FDMI HBA attribute types
1824 */
1825enum fdmi_hba_attribute_type {
1826 FDMI_HBA_ATTRIB_NODENAME = 1, /* 0x0001 */
1827 FDMI_HBA_ATTRIB_MANUFACTURER, /* 0x0002 */
1828 FDMI_HBA_ATTRIB_SERIALNUM, /* 0x0003 */
1829 FDMI_HBA_ATTRIB_MODEL, /* 0x0004 */
1830 FDMI_HBA_ATTRIB_MODEL_DESC, /* 0x0005 */
1831 FDMI_HBA_ATTRIB_HW_VERSION, /* 0x0006 */
1832 FDMI_HBA_ATTRIB_DRIVER_VERSION, /* 0x0007 */
1833 FDMI_HBA_ATTRIB_ROM_VERSION, /* 0x0008 */
1834 FDMI_HBA_ATTRIB_FW_VERSION, /* 0x0009 */
1835 FDMI_HBA_ATTRIB_OS_NAME, /* 0x000A */
1836 FDMI_HBA_ATTRIB_MAX_CT, /* 0x000B */
1837
1838 FDMI_HBA_ATTRIB_MAX_TYPE
1839};
1840
1841/*
1842 * FDMI Port attribute types
1843 */
1844enum fdmi_port_attribute_type {
1845 FDMI_PORT_ATTRIB_FC4_TYPES = 1, /* 0x0001 */
1846 FDMI_PORT_ATTRIB_SUPP_SPEED, /* 0x0002 */
1847 FDMI_PORT_ATTRIB_PORT_SPEED, /* 0x0003 */
1848 FDMI_PORT_ATTRIB_FRAME_SIZE, /* 0x0004 */
1849 FDMI_PORT_ATTRIB_DEV_NAME, /* 0x0005 */
1850 FDMI_PORT_ATTRIB_HOST_NAME, /* 0x0006 */
1851
1852 FDMI_PORT_ATTR_MAX_TYPE
1853};
1854
1855/*
1856 * FDMI attribute
1857 */
1858struct fdmi_attr_s {
1859 u16 type;
1860 u16 len;
1861 u8 value[1];
1862};
1863
1864/*
1865 * HBA Attribute Block
1866 */
1867struct fdmi_hba_attr_s {
1868 u32 attr_count; /* # of attributes */
1869 struct fdmi_attr_s hba_attr; /* n attributes */
1870};
1871
1872/*
1873 * Registered Port List
1874 */
1875struct fdmi_port_list_s {
1876 u32 num_ports; /* number Of Port Entries */
1877 wwn_t port_entry; /* one or more */
1878};
1879
1880/*
1881 * Port Attribute Block
1882 */
1883struct fdmi_port_attr_s {
1884 u32 attr_count; /* # of attributes */
1885 struct fdmi_attr_s port_attr; /* n attributes */
1886};
1887
1888/*
1889 * FDMI Register HBA Attributes
1890 */
1891struct fdmi_rhba_s {
1892 wwn_t hba_id; /* HBA Identifier */
1893 struct fdmi_port_list_s port_list; /* Registered Port List */
1894 struct fdmi_hba_attr_s hba_attr_blk; /* HBA attribute block */
1895};
1896
1897/*
1898 * FDMI Register Port
1899 */
1900struct fdmi_rprt_s {
1901 wwn_t hba_id; /* HBA Identifier */
1902 wwn_t port_name; /* Port wwn */
1903 struct fdmi_port_attr_s port_attr_blk; /* Port Attr Block */
1904};
1905
1906/*
1907 * FDMI Register Port Attributes
1908 */
1909struct fdmi_rpa_s {
1910 wwn_t port_name; /* port wwn */
1911 struct fdmi_port_attr_s port_attr_blk; /* Port Attr Block */
1912};
1913
1914#pragma pack()
1915
1916#endif /* __BFA_FC_H__ */
diff --git a/drivers/scsi/bfa/fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index fee5456451cb..b7d2657ca82a 100644
--- a/drivers/scsi/bfa/fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -18,25 +18,25 @@
18 * fcbuild.c - FC link service frame building and parsing routines 18 * fcbuild.c - FC link service frame building and parsing routines
19 */ 19 */
20 20
21#include <bfa_os_inc.h> 21#include "bfa_os_inc.h"
22#include "fcbuild.h" 22#include "bfa_fcbuild.h"
23 23
24/* 24/*
25 * static build functions 25 * static build functions
26 */ 26 */
27static void fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, 27static void fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
28 u16 ox_id); 28 u16 ox_id);
29static void fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, 29static void fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
30 u16 ox_id); 30 u16 ox_id);
31static struct fchs_s fc_els_req_tmpl; 31static struct fchs_s fc_els_req_tmpl;
32static struct fchs_s fc_els_rsp_tmpl; 32static struct fchs_s fc_els_rsp_tmpl;
33static struct fchs_s fc_bls_req_tmpl; 33static struct fchs_s fc_bls_req_tmpl;
34static struct fchs_s fc_bls_rsp_tmpl; 34static struct fchs_s fc_bls_rsp_tmpl;
35static struct fc_ba_acc_s ba_acc_tmpl; 35static struct fc_ba_acc_s ba_acc_tmpl;
36static struct fc_logi_s plogi_tmpl; 36static struct fc_logi_s plogi_tmpl;
37static struct fc_prli_s prli_tmpl; 37static struct fc_prli_s prli_tmpl;
38static struct fc_rrq_s rrq_tmpl; 38static struct fc_rrq_s rrq_tmpl;
39static struct fchs_s fcp_fchs_tmpl; 39static struct fchs_s fcp_fchs_tmpl;
40 40
41void 41void
42fcbuild_init(void) 42fcbuild_init(void)
@@ -123,7 +123,7 @@ fcbuild_init(void)
123 rrq_tmpl.els_cmd.els_code = FC_ELS_RRQ; 123 rrq_tmpl.els_cmd.els_code = FC_ELS_RRQ;
124 124
125 /* 125 /*
126 * fcp_fchs_tmpl 126 * fcp_struct fchs_s mpl
127 */ 127 */
128 fcp_fchs_tmpl.routing = FC_RTG_FC4_DEV_DATA; 128 fcp_fchs_tmpl.routing = FC_RTG_FC4_DEV_DATA;
129 fcp_fchs_tmpl.cat_info = FC_CAT_UNSOLICIT_CMD; 129 fcp_fchs_tmpl.cat_info = FC_CAT_UNSOLICIT_CMD;
@@ -135,8 +135,7 @@ fcbuild_init(void)
135} 135}
136 136
137static void 137static void
138fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, 138fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
139 u32 ox_id)
140{ 139{
141 bfa_os_memset(fchs, 0, sizeof(struct fchs_s)); 140 bfa_os_memset(fchs, 0, sizeof(struct fchs_s));
142 141
@@ -158,8 +157,7 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
158} 157}
159 158
160void 159void
161fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, 160fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
162 u16 ox_id)
163{ 161{
164 bfa_os_memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s)); 162 bfa_os_memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s));
165 fchs->d_id = (d_id); 163 fchs->d_id = (d_id);
@@ -168,8 +166,7 @@ fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
168} 166}
169 167
170static void 168static void
171fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, 169fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
172 u16 ox_id)
173{ 170{
174 bfa_os_memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s)); 171 bfa_os_memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s));
175 fchs->d_id = d_id; 172 fchs->d_id = d_id;
@@ -180,8 +177,8 @@ fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
180enum fc_parse_status 177enum fc_parse_status
181fc_els_rsp_parse(struct fchs_s *fchs, int len) 178fc_els_rsp_parse(struct fchs_s *fchs, int len)
182{ 179{
183 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); 180 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
184 struct fc_ls_rjt_s *ls_rjt = (struct fc_ls_rjt_s *) els_cmd; 181 struct fc_ls_rjt_s *ls_rjt = (struct fc_ls_rjt_s *) els_cmd;
185 182
186 len = len; 183 len = len;
187 184
@@ -199,8 +196,7 @@ fc_els_rsp_parse(struct fchs_s *fchs, int len)
199} 196}
200 197
201static void 198static void
202fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, 199fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
203 u16 ox_id)
204{ 200{
205 bfa_os_memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s)); 201 bfa_os_memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s));
206 fchs->d_id = d_id; 202 fchs->d_id = d_id;
@@ -213,7 +209,7 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
213 u16 ox_id, wwn_t port_name, wwn_t node_name, 209 u16 ox_id, wwn_t port_name, wwn_t node_name,
214 u16 pdu_size, u8 els_code) 210 u16 pdu_size, u8 els_code)
215{ 211{
216 struct fc_logi_s *plogi = (struct fc_logi_s *) (pld); 212 struct fc_logi_s *plogi = (struct fc_logi_s *) (pld);
217 213
218 bfa_os_memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s)); 214 bfa_os_memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s));
219 215
@@ -233,12 +229,11 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
233 229
234u16 230u16
235fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, 231fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
236 u16 ox_id, wwn_t port_name, wwn_t node_name, 232 u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size,
237 u16 pdu_size, u8 set_npiv, u8 set_auth, 233 u8 set_npiv, u8 set_auth, u16 local_bb_credits)
238 u16 local_bb_credits)
239{ 234{
240 u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT); 235 u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT);
241 u32 *vvl_info; 236 u32 *vvl_info;
242 237
243 bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); 238 bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
244 239
@@ -292,8 +287,7 @@ fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
292 287
293u16 288u16
294fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, 289fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
295 u16 ox_id, wwn_t port_name, wwn_t node_name, 290 u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size)
296 u16 pdu_size)
297{ 291{
298 u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT); 292 u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT);
299 293
@@ -330,9 +324,9 @@ fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
330enum fc_parse_status 324enum fc_parse_status
331fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name) 325fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
332{ 326{
333 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); 327 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
334 struct fc_logi_s *plogi; 328 struct fc_logi_s *plogi;
335 struct fc_ls_rjt_s *ls_rjt; 329 struct fc_ls_rjt_s *ls_rjt;
336 330
337 switch (els_cmd->els_code) { 331 switch (els_cmd->els_code) {
338 case FC_ELS_LS_RJT: 332 case FC_ELS_LS_RJT:
@@ -364,7 +358,7 @@ fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
364enum fc_parse_status 358enum fc_parse_status
365fc_plogi_parse(struct fchs_s *fchs) 359fc_plogi_parse(struct fchs_s *fchs)
366{ 360{
367 struct fc_logi_s *plogi = (struct fc_logi_s *) (fchs + 1); 361 struct fc_logi_s *plogi = (struct fc_logi_s *) (fchs + 1);
368 362
369 if (plogi->class3.class_valid != 1) 363 if (plogi->class3.class_valid != 1)
370 return FC_PARSE_FAILURE; 364 return FC_PARSE_FAILURE;
@@ -381,7 +375,7 @@ u16
381fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, 375fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
382 u16 ox_id) 376 u16 ox_id)
383{ 377{
384 struct fc_prli_s *prli = (struct fc_prli_s *) (pld); 378 struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
385 379
386 fc_els_req_build(fchs, d_id, s_id, ox_id); 380 fc_els_req_build(fchs, d_id, s_id, ox_id);
387 bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s)); 381 bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
@@ -398,19 +392,16 @@ fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
398 392
399u16 393u16
400fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, 394fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
401 u16 ox_id, enum bfa_port_role role) 395 u16 ox_id, enum bfa_lport_role role)
402{ 396{
403 struct fc_prli_s *prli = (struct fc_prli_s *) (pld); 397 struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
404 398
405 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 399 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
406 bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s)); 400 bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
407 401
408 prli->command = FC_ELS_ACC; 402 prli->command = FC_ELS_ACC;
409 403
410 if ((role & BFA_PORT_ROLE_FCP_TM) == BFA_PORT_ROLE_FCP_TM) 404 prli->parampage.servparams.initiator = 1;
411 prli->parampage.servparams.target = 1;
412 else
413 prli->parampage.servparams.initiator = 1;
414 405
415 prli->parampage.rspcode = FC_PRLI_ACC_XQTD; 406 prli->parampage.rspcode = FC_PRLI_ACC_XQTD;
416 407
@@ -452,12 +443,12 @@ fc_prli_parse(struct fc_prli_s *prli)
452} 443}
453 444
454u16 445u16
455fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, 446fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, u32 s_id,
456 u32 s_id, u16 ox_id, wwn_t port_name) 447 u16 ox_id, wwn_t port_name)
457{ 448{
458 fc_els_req_build(fchs, d_id, s_id, ox_id); 449 fc_els_req_build(fchs, d_id, s_id, ox_id);
459 450
460 memset(logo, '\0', sizeof(struct fc_logo_s)); 451 bfa_os_memset(logo, '\0', sizeof(struct fc_logo_s));
461 logo->els_cmd.els_code = FC_ELS_LOGO; 452 logo->els_cmd.els_code = FC_ELS_LOGO;
462 logo->nport_id = (s_id); 453 logo->nport_id = (s_id);
463 logo->orig_port_name = port_name; 454 logo->orig_port_name = port_name;
@@ -470,7 +461,7 @@ fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
470 u32 s_id, u16 ox_id, wwn_t port_name, 461 u32 s_id, u16 ox_id, wwn_t port_name,
471 wwn_t node_name, u8 els_code) 462 wwn_t node_name, u8 els_code)
472{ 463{
473 memset(adisc, '\0', sizeof(struct fc_adisc_s)); 464 bfa_os_memset(adisc, '\0', sizeof(struct fc_adisc_s));
474 465
475 adisc->els_cmd.els_code = els_code; 466 adisc->els_cmd.els_code = els_code;
476 467
@@ -489,8 +480,7 @@ fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
489 480
490u16 481u16
491fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, 482fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
492 u32 s_id, u16 ox_id, wwn_t port_name, 483 u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name)
493 wwn_t node_name)
494{ 484{
495 return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name, 485 return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name,
496 node_name, FC_ELS_ADISC); 486 node_name, FC_ELS_ADISC);
@@ -523,10 +513,10 @@ fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len, wwn_t port_name,
523} 513}
524 514
525enum fc_parse_status 515enum fc_parse_status
526fc_adisc_parse(struct fchs_s *fchs, void *pld, u32 host_dap, 516fc_adisc_parse(struct fchs_s *fchs, void *pld, u32 host_dap, wwn_t node_name,
527 wwn_t node_name, wwn_t port_name) 517 wwn_t port_name)
528{ 518{
529 struct fc_adisc_s *adisc = (struct fc_adisc_s *) pld; 519 struct fc_adisc_s *adisc = (struct fc_adisc_s *) pld;
530 520
531 if (adisc->els_cmd.els_code != FC_ELS_ACC) 521 if (adisc->els_cmd.els_code != FC_ELS_ACC)
532 return FC_PARSE_FAILURE; 522 return FC_PARSE_FAILURE;
@@ -542,13 +532,13 @@ fc_adisc_parse(struct fchs_s *fchs, void *pld, u32 host_dap,
542enum fc_parse_status 532enum fc_parse_status
543fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name) 533fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name)
544{ 534{
545 struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); 535 struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
546 536
547 if (pdisc->class3.class_valid != 1) 537 if (pdisc->class3.class_valid != 1)
548 return FC_PARSE_FAILURE; 538 return FC_PARSE_FAILURE;
549 539
550 if ((bfa_os_ntohs(pdisc->class3.rxsz) < 540 if ((bfa_os_ntohs(pdisc->class3.rxsz) <
551 (FC_MIN_PDUSZ - sizeof(struct fchs_s))) 541 (FC_MIN_PDUSZ - sizeof(struct fchs_s)))
552 || (pdisc->class3.rxsz == 0)) 542 || (pdisc->class3.rxsz == 0))
553 return FC_PARSE_FAILURE; 543 return FC_PARSE_FAILURE;
554 544
@@ -584,8 +574,8 @@ fc_abts_rsp_parse(struct fchs_s *fchs, int len)
584} 574}
585 575
586u16 576u16
587fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id, 577fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id, u32 s_id,
588 u32 s_id, u16 ox_id, u16 rrq_oxid) 578 u16 ox_id, u16 rrq_oxid)
589{ 579{
590 fc_els_req_build(fchs, d_id, s_id, ox_id); 580 fc_els_req_build(fchs, d_id, s_id, ox_id);
591 581
@@ -604,11 +594,11 @@ u16
604fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, 594fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
605 u16 ox_id) 595 u16 ox_id)
606{ 596{
607 struct fc_els_cmd_s *acc = pld; 597 struct fc_els_cmd_s *acc = pld;
608 598
609 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 599 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
610 600
611 memset(acc, 0, sizeof(struct fc_els_cmd_s)); 601 bfa_os_memset(acc, 0, sizeof(struct fc_els_cmd_s));
612 acc->els_code = FC_ELS_ACC; 602 acc->els_code = FC_ELS_ACC;
613 603
614 return sizeof(struct fc_els_cmd_s); 604 return sizeof(struct fc_els_cmd_s);
@@ -620,7 +610,7 @@ fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id,
620 u8 reason_code_expl) 610 u8 reason_code_expl)
621{ 611{
622 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 612 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
623 memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s)); 613 bfa_os_memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s));
624 614
625 ls_rjt->els_cmd.els_code = FC_ELS_LS_RJT; 615 ls_rjt->els_cmd.els_code = FC_ELS_LS_RJT;
626 ls_rjt->reason_code = reason_code; 616 ls_rjt->reason_code = reason_code;
@@ -647,11 +637,11 @@ fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
647} 637}
648 638
649u16 639u16
650fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, 640fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, u32 d_id,
651 u32 d_id, u32 s_id, u16 ox_id) 641 u32 s_id, u16 ox_id)
652{ 642{
653 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 643 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
654 memset(els_cmd, 0, sizeof(struct fc_els_cmd_s)); 644 bfa_os_memset(els_cmd, 0, sizeof(struct fc_els_cmd_s));
655 els_cmd->els_code = FC_ELS_ACC; 645 els_cmd->els_code = FC_ELS_ACC;
656 646
657 return sizeof(struct fc_els_cmd_s); 647 return sizeof(struct fc_els_cmd_s);
@@ -661,8 +651,8 @@ int
661fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code) 651fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code)
662{ 652{
663 int num_pages = 0; 653 int num_pages = 0;
664 struct fc_prlo_s *prlo; 654 struct fc_prlo_s *prlo;
665 struct fc_tprlo_s *tprlo; 655 struct fc_tprlo_s *tprlo;
666 656
667 if (els_code == FC_ELS_PRLO) { 657 if (els_code == FC_ELS_PRLO) {
668 prlo = (struct fc_prlo_s *) (fc_frame + 1); 658 prlo = (struct fc_prlo_s *) (fc_frame + 1);
@@ -676,14 +666,13 @@ fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code)
676 666
677u16 667u16
678fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc, 668fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
679 u32 d_id, u32 s_id, u16 ox_id, 669 u32 d_id, u32 s_id, u16 ox_id, int num_pages)
680 int num_pages)
681{ 670{
682 int page; 671 int page;
683 672
684 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 673 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
685 674
686 memset(tprlo_acc, 0, (num_pages * 16) + 4); 675 bfa_os_memset(tprlo_acc, 0, (num_pages * 16) + 4);
687 tprlo_acc->command = FC_ELS_ACC; 676 tprlo_acc->command = FC_ELS_ACC;
688 677
689 tprlo_acc->page_len = 0x10; 678 tprlo_acc->page_len = 0x10;
@@ -700,15 +689,14 @@ fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
700} 689}
701 690
702u16 691u16
703fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, 692fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id,
704 u32 d_id, u32 s_id, u16 ox_id, 693 u32 s_id, u16 ox_id, int num_pages)
705 int num_pages)
706{ 694{
707 int page; 695 int page;
708 696
709 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 697 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
710 698
711 memset(prlo_acc, 0, (num_pages * 16) + 4); 699 bfa_os_memset(prlo_acc, 0, (num_pages * 16) + 4);
712 prlo_acc->command = FC_ELS_ACC; 700 prlo_acc->command = FC_ELS_ACC;
713 prlo_acc->page_len = 0x10; 701 prlo_acc->page_len = 0x10;
714 prlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4); 702 prlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4);
@@ -726,11 +714,11 @@ fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
726 714
727u16 715u16
728fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id, 716fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id,
729 u32 s_id, u16 ox_id, u32 data_format) 717 u32 s_id, u16 ox_id, u32 data_format)
730{ 718{
731 fc_els_req_build(fchs, d_id, s_id, ox_id); 719 fc_els_req_build(fchs, d_id, s_id, ox_id);
732 720
733 memset(rnid, 0, sizeof(struct fc_rnid_cmd_s)); 721 bfa_os_memset(rnid, 0, sizeof(struct fc_rnid_cmd_s));
734 722
735 rnid->els_cmd.els_code = FC_ELS_RNID; 723 rnid->els_cmd.els_code = FC_ELS_RNID;
736 rnid->node_id_data_format = data_format; 724 rnid->node_id_data_format = data_format;
@@ -739,13 +727,12 @@ fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id,
739} 727}
740 728
741u16 729u16
742fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, 730fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id,
743 u32 d_id, u32 s_id, u16 ox_id, 731 u32 s_id, u16 ox_id, u32 data_format,
744 u32 data_format, 732 struct fc_rnid_common_id_data_s *common_id_data,
745 struct fc_rnid_common_id_data_s *common_id_data, 733 struct fc_rnid_general_topology_data_s *gen_topo_data)
746 struct fc_rnid_general_topology_data_s *gen_topo_data)
747{ 734{
748 memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s)); 735 bfa_os_memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s));
749 736
750 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 737 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
751 738
@@ -769,27 +756,26 @@ fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc,
769 756
770u16 757u16
771fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, u32 d_id, 758fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, u32 d_id,
772 u32 s_id, u16 ox_id) 759 u32 s_id, u16 ox_id)
773{ 760{
774 fc_els_req_build(fchs, d_id, s_id, ox_id); 761 fc_els_req_build(fchs, d_id, s_id, ox_id);
775 762
776 memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s)); 763 bfa_os_memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s));
777 764
778 rpsc->els_cmd.els_code = FC_ELS_RPSC; 765 rpsc->els_cmd.els_code = FC_ELS_RPSC;
779 return sizeof(struct fc_rpsc_cmd_s); 766 return sizeof(struct fc_rpsc_cmd_s);
780} 767}
781 768
782u16 769u16
783fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, 770fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, u32 d_id,
784 u32 d_id, u32 s_id, u32 *pid_list, 771 u32 s_id, u32 *pid_list, u16 npids)
785 u16 npids)
786{ 772{
787 u32 dctlr_id = FC_DOMAIN_CTRLR(bfa_os_hton3b(d_id)); 773 u32 dctlr_id = FC_DOMAIN_CTRLR(bfa_os_hton3b(d_id));
788 int i = 0; 774 int i = 0;
789 775
790 fc_els_req_build(fchs, bfa_os_hton3b(dctlr_id), s_id, 0); 776 fc_els_req_build(fchs, bfa_os_hton3b(dctlr_id), s_id, 0);
791 777
792 memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s)); 778 bfa_os_memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s));
793 779
794 rpsc2->els_cmd.els_code = FC_ELS_RPSC; 780 rpsc2->els_cmd.els_code = FC_ELS_RPSC;
795 rpsc2->token = bfa_os_htonl(FC_BRCD_TOKEN); 781 rpsc2->token = bfa_os_htonl(FC_BRCD_TOKEN);
@@ -797,16 +783,15 @@ fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2,
797 for (i = 0; i < npids; i++) 783 for (i = 0; i < npids; i++)
798 rpsc2->pid_list[i].pid = pid_list[i]; 784 rpsc2->pid_list[i].pid = pid_list[i];
799 785
800 return sizeof(struct fc_rpsc2_cmd_s) + ((npids - 1) * 786 return sizeof(struct fc_rpsc2_cmd_s) + ((npids - 1) * (sizeof(u32)));
801 (sizeof(u32)));
802} 787}
803 788
804u16 789u16
805fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc, 790fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
806 u32 d_id, u32 s_id, u16 ox_id, 791 u32 d_id, u32 s_id, u16 ox_id,
807 struct fc_rpsc_speed_info_s *oper_speed) 792 struct fc_rpsc_speed_info_s *oper_speed)
808{ 793{
809 memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s)); 794 bfa_os_memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s));
810 795
811 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 796 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
812 797
@@ -820,7 +805,6 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
820 bfa_os_htons(oper_speed->port_op_speed); 805 bfa_os_htons(oper_speed->port_op_speed);
821 806
822 return sizeof(struct fc_rpsc_acc_s); 807 return sizeof(struct fc_rpsc_acc_s);
823
824} 808}
825 809
826/* 810/*
@@ -831,7 +815,7 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
831u16 815u16
832fc_logo_rsp_parse(struct fchs_s *fchs, int len) 816fc_logo_rsp_parse(struct fchs_s *fchs, int len)
833{ 817{
834 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); 818 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
835 819
836 len = len; 820 len = len;
837 if (els_cmd->els_code != FC_ELS_ACC) 821 if (els_cmd->els_code != FC_ELS_ACC)
@@ -841,11 +825,10 @@ fc_logo_rsp_parse(struct fchs_s *fchs, int len)
841} 825}
842 826
843u16 827u16
844fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, 828fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
845 u16 ox_id, wwn_t port_name, wwn_t node_name, 829 wwn_t port_name, wwn_t node_name, u16 pdu_size)
846 u16 pdu_size)
847{ 830{
848 struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); 831 struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
849 832
850 bfa_os_memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s)); 833 bfa_os_memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s));
851 834
@@ -862,7 +845,7 @@ fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
862u16 845u16
863fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name) 846fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
864{ 847{
865 struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); 848 struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
866 849
867 if (len < sizeof(struct fc_logi_s)) 850 if (len < sizeof(struct fc_logi_s))
868 return FC_PARSE_LEN_INVAL; 851 return FC_PARSE_LEN_INVAL;
@@ -886,11 +869,11 @@ u16
886fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, 869fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
887 int num_pages) 870 int num_pages)
888{ 871{
889 struct fc_prlo_s *prlo = (struct fc_prlo_s *) (fchs + 1); 872 struct fc_prlo_s *prlo = (struct fc_prlo_s *) (fchs + 1);
890 int page; 873 int page;
891 874
892 fc_els_req_build(fchs, d_id, s_id, ox_id); 875 fc_els_req_build(fchs, d_id, s_id, ox_id);
893 memset(prlo, 0, (num_pages * 16) + 4); 876 bfa_os_memset(prlo, 0, (num_pages * 16) + 4);
894 prlo->command = FC_ELS_PRLO; 877 prlo->command = FC_ELS_PRLO;
895 prlo->page_len = 0x10; 878 prlo->page_len = 0x10;
896 prlo->payload_len = bfa_os_htons((num_pages * 16) + 4); 879 prlo->payload_len = bfa_os_htons((num_pages * 16) + 4);
@@ -909,7 +892,7 @@ fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
909u16 892u16
910fc_prlo_rsp_parse(struct fchs_s *fchs, int len) 893fc_prlo_rsp_parse(struct fchs_s *fchs, int len)
911{ 894{
912 struct fc_prlo_acc_s *prlo = (struct fc_prlo_acc_s *) (fchs + 1); 895 struct fc_prlo_acc_s *prlo = (struct fc_prlo_acc_s *) (fchs + 1);
913 int num_pages = 0; 896 int num_pages = 0;
914 int page = 0; 897 int page = 0;
915 898
@@ -941,15 +924,14 @@ fc_prlo_rsp_parse(struct fchs_s *fchs, int len)
941} 924}
942 925
943u16 926u16
944fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, 927fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
945 u16 ox_id, int num_pages, 928 int num_pages, enum fc_tprlo_type tprlo_type, u32 tpr_id)
946 enum fc_tprlo_type tprlo_type, u32 tpr_id)
947{ 929{
948 struct fc_tprlo_s *tprlo = (struct fc_tprlo_s *) (fchs + 1); 930 struct fc_tprlo_s *tprlo = (struct fc_tprlo_s *) (fchs + 1);
949 int page; 931 int page;
950 932
951 fc_els_req_build(fchs, d_id, s_id, ox_id); 933 fc_els_req_build(fchs, d_id, s_id, ox_id);
952 memset(tprlo, 0, (num_pages * 16) + 4); 934 bfa_os_memset(tprlo, 0, (num_pages * 16) + 4);
953 tprlo->command = FC_ELS_TPRLO; 935 tprlo->command = FC_ELS_TPRLO;
954 tprlo->page_len = 0x10; 936 tprlo->page_len = 0x10;
955 tprlo->payload_len = bfa_os_htons((num_pages * 16) + 4); 937 tprlo->payload_len = bfa_os_htons((num_pages * 16) + 4);
@@ -1003,7 +985,7 @@ fc_tprlo_rsp_parse(struct fchs_s *fchs, int len)
1003enum fc_parse_status 985enum fc_parse_status
1004fc_rrq_rsp_parse(struct fchs_s *fchs, int len) 986fc_rrq_rsp_parse(struct fchs_s *fchs, int len)
1005{ 987{
1006 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); 988 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
1007 989
1008 len = len; 990 len = len;
1009 if (els_cmd->els_code != FC_ELS_ACC) 991 if (els_cmd->els_code != FC_ELS_ACC)
@@ -1013,11 +995,10 @@ fc_rrq_rsp_parse(struct fchs_s *fchs, int len)
1013} 995}
1014 996
1015u16 997u16
1016fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, 998fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
1017 u16 ox_id, u32 reason_code, 999 u32 reason_code, u32 reason_expl)
1018 u32 reason_expl)
1019{ 1000{
1020 struct fc_ba_rjt_s *ba_rjt = (struct fc_ba_rjt_s *) (fchs + 1); 1001 struct fc_ba_rjt_s *ba_rjt = (struct fc_ba_rjt_s *) (fchs + 1);
1021 1002
1022 fc_bls_rsp_build(fchs, d_id, s_id, ox_id); 1003 fc_bls_rsp_build(fchs, d_id, s_id, ox_id);
1023 1004
@@ -1062,10 +1043,8 @@ u16
1062fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, 1043fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1063 wwn_t port_name) 1044 wwn_t port_name)
1064{ 1045{
1065 1046 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1066 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1047 struct fcgs_gidpn_req_s *gidpn = (struct fcgs_gidpn_req_s *)(cthdr + 1);
1067 struct fcgs_gidpn_req_s *gidpn =
1068 (struct fcgs_gidpn_req_s *) (cthdr + 1);
1069 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1048 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1070 1049
1071 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1050 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
@@ -1080,8 +1059,7 @@ u16
1080fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, 1059fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1081 u32 port_id) 1060 u32 port_id)
1082{ 1061{
1083 1062 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1084 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1085 fcgs_gpnid_req_t *gpnid = (fcgs_gpnid_req_t *) (cthdr + 1); 1063 fcgs_gpnid_req_t *gpnid = (fcgs_gpnid_req_t *) (cthdr + 1);
1086 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1064 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1087 1065
@@ -1097,8 +1075,7 @@ u16
1097fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, 1075fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1098 u32 port_id) 1076 u32 port_id)
1099{ 1077{
1100 1078 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1101 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1102 fcgs_gnnid_req_t *gnnid = (fcgs_gnnid_req_t *) (cthdr + 1); 1079 fcgs_gnnid_req_t *gnnid = (fcgs_gnnid_req_t *) (cthdr + 1);
1103 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1080 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1104 1081
@@ -1124,8 +1101,8 @@ fc_ct_rsp_parse(struct ct_hdr_s *cthdr)
1124} 1101}
1125 1102
1126u16 1103u16
1127fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, u8 set_br_reg, 1104fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
1128 u32 s_id, u16 ox_id) 1105 u8 set_br_reg, u32 s_id, u16 ox_id)
1129{ 1106{
1130 u32 d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER); 1107 u32 d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER);
1131 1108
@@ -1141,8 +1118,8 @@ fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, u8 set_br_reg,
1141} 1118}
1142 1119
1143u16 1120u16
1144fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, u32 s_id, 1121fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn,
1145 u16 ox_id) 1122 u32 s_id, u16 ox_id)
1146{ 1123{
1147 u32 d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER); 1124 u32 d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER);
1148 u16 payldlen; 1125 u16 payldlen;
@@ -1162,11 +1139,10 @@ fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, u32 s_id,
1162 1139
1163u16 1140u16
1164fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, 1141fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1165 enum bfa_port_role roles) 1142 enum bfa_lport_role roles)
1166{ 1143{
1167 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1144 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1168 struct fcgs_rftid_req_s *rftid = 1145 struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1);
1169 (struct fcgs_rftid_req_s *) (cthdr + 1);
1170 u32 type_value, d_id = bfa_os_hton3b(FC_NAME_SERVER); 1146 u32 type_value, d_id = bfa_os_hton3b(FC_NAME_SERVER);
1171 u8 index; 1147 u8 index;
1172 1148
@@ -1182,23 +1158,15 @@ fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1182 type_value = 1 << (FC_TYPE_FCP % 32); 1158 type_value = 1 << (FC_TYPE_FCP % 32);
1183 rftid->fc4_type[index] = bfa_os_htonl(type_value); 1159 rftid->fc4_type[index] = bfa_os_htonl(type_value);
1184 1160
1185 if (roles & BFA_PORT_ROLE_FCP_IPFC) {
1186 index = FC_TYPE_IP >> 5;
1187 type_value = 1 << (FC_TYPE_IP % 32);
1188 rftid->fc4_type[index] |= bfa_os_htonl(type_value);
1189 }
1190
1191 return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s); 1161 return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
1192} 1162}
1193 1163
1194u16 1164u16
1195fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, 1165fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1196 u16 ox_id, u8 *fc4_bitmap, 1166 u8 *fc4_bitmap, u32 bitmap_size)
1197 u32 bitmap_size)
1198{ 1167{
1199 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1168 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1200 struct fcgs_rftid_req_s *rftid = 1169 struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1);
1201 (struct fcgs_rftid_req_s *) (cthdr + 1);
1202 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1170 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1203 1171
1204 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1172 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
@@ -1208,7 +1176,7 @@ fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id,
1208 1176
1209 rftid->dap = s_id; 1177 rftid->dap = s_id;
1210 bfa_os_memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap, 1178 bfa_os_memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap,
1211 (bitmap_size < 32 ? bitmap_size : 32)); 1179 (bitmap_size < 32 ? bitmap_size : 32));
1212 1180
1213 return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s); 1181 return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
1214} 1182}
@@ -1217,9 +1185,8 @@ u16
1217fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, 1185fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1218 u8 fc4_type, u8 fc4_ftrs) 1186 u8 fc4_type, u8 fc4_ftrs)
1219{ 1187{
1220 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1188 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1221 struct fcgs_rffid_req_s *rffid = 1189 struct fcgs_rffid_req_s *rffid = (struct fcgs_rffid_req_s *)(cthdr + 1);
1222 (struct fcgs_rffid_req_s *) (cthdr + 1);
1223 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1190 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1224 1191
1225 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1192 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
@@ -1227,9 +1194,9 @@ fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1227 1194
1228 bfa_os_memset(rffid, 0, sizeof(struct fcgs_rffid_req_s)); 1195 bfa_os_memset(rffid, 0, sizeof(struct fcgs_rffid_req_s));
1229 1196
1230 rffid->dap = s_id; 1197 rffid->dap = s_id;
1231 rffid->fc4ftr_bits = fc4_ftrs; 1198 rffid->fc4ftr_bits = fc4_ftrs;
1232 rffid->fc4_type = fc4_type; 1199 rffid->fc4_type = fc4_type;
1233 1200
1234 return sizeof(struct fcgs_rffid_req_s) + sizeof(struct ct_hdr_s); 1201 return sizeof(struct fcgs_rffid_req_s) + sizeof(struct ct_hdr_s);
1235} 1202}
@@ -1239,9 +1206,9 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1239 u8 *name) 1206 u8 *name)
1240{ 1207{
1241 1208
1242 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1209 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1243 struct fcgs_rspnid_req_s *rspnid = 1210 struct fcgs_rspnid_req_s *rspnid =
1244 (struct fcgs_rspnid_req_s *) (cthdr + 1); 1211 (struct fcgs_rspnid_req_s *)(cthdr + 1);
1245 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1212 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1246 1213
1247 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1214 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
@@ -1257,13 +1224,11 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1257} 1224}
1258 1225
1259u16 1226u16
1260fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, 1227fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, u8 fc4_type)
1261 u8 fc4_type)
1262{ 1228{
1263 1229
1264 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1230 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1265 struct fcgs_gidft_req_s *gidft = 1231 struct fcgs_gidft_req_s *gidft = (struct fcgs_gidft_req_s *)(cthdr + 1);
1266 (struct fcgs_gidft_req_s *) (cthdr + 1);
1267 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1232 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1268 1233
1269 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1234 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
@@ -1282,9 +1247,8 @@ u16
1282fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, 1247fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
1283 wwn_t port_name) 1248 wwn_t port_name)
1284{ 1249{
1285 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1250 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1286 struct fcgs_rpnid_req_s *rpnid = 1251 struct fcgs_rpnid_req_s *rpnid = (struct fcgs_rpnid_req_s *)(cthdr + 1);
1287 (struct fcgs_rpnid_req_s *) (cthdr + 1);
1288 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1252 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1289 1253
1290 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1254 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
@@ -1301,9 +1265,8 @@ u16
1301fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, 1265fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
1302 wwn_t node_name) 1266 wwn_t node_name)
1303{ 1267{
1304 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1268 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1305 struct fcgs_rnnid_req_s *rnnid = 1269 struct fcgs_rnnid_req_s *rnnid = (struct fcgs_rnnid_req_s *)(cthdr + 1);
1306 (struct fcgs_rnnid_req_s *) (cthdr + 1);
1307 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1270 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1308 1271
1309 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1272 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
@@ -1320,7 +1283,7 @@ u16
1320fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, 1283fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
1321 u32 cos) 1284 u32 cos)
1322{ 1285{
1323 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1286 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1324 struct fcgs_rcsid_req_s *rcsid = 1287 struct fcgs_rcsid_req_s *rcsid =
1325 (struct fcgs_rcsid_req_s *) (cthdr + 1); 1288 (struct fcgs_rcsid_req_s *) (cthdr + 1);
1326 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1289 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
@@ -1339,9 +1302,8 @@ u16
1339fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, 1302fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
1340 u8 port_type) 1303 u8 port_type)
1341{ 1304{
1342 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1305 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1343 struct fcgs_rptid_req_s *rptid = 1306 struct fcgs_rptid_req_s *rptid = (struct fcgs_rptid_req_s *)(cthdr + 1);
1344 (struct fcgs_rptid_req_s *) (cthdr + 1);
1345 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1307 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1346 1308
1347 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1309 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
@@ -1357,9 +1319,8 @@ fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
1357u16 1319u16
1358fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id) 1320fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id)
1359{ 1321{
1360 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1322 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1361 struct fcgs_ganxt_req_s *ganxt = 1323 struct fcgs_ganxt_req_s *ganxt = (struct fcgs_ganxt_req_s *)(cthdr + 1);
1362 (struct fcgs_ganxt_req_s *) (cthdr + 1);
1363 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1324 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1364 1325
1365 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1326 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
@@ -1379,7 +1340,7 @@ fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id,
1379 u16 cmd_code) 1340 u16 cmd_code)
1380{ 1341{
1381 1342
1382 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1343 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1383 u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER); 1344 u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER);
1384 1345
1385 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1346 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
@@ -1409,12 +1370,12 @@ fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask)
1409} 1370}
1410 1371
1411/* 1372/*
1412 * GMAL Request 1373 * GMAL Request
1413 */ 1374 */
1414u16 1375u16
1415fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn) 1376fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
1416{ 1377{
1417 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1378 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1418 fcgs_gmal_req_t *gmal = (fcgs_gmal_req_t *) (cthdr + 1); 1379 fcgs_gmal_req_t *gmal = (fcgs_gmal_req_t *) (cthdr + 1);
1419 u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER); 1380 u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER);
1420 1381
@@ -1434,7 +1395,7 @@ fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
1434u16 1395u16
1435fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn) 1396fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
1436{ 1397{
1437 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1398 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1438 fcgs_gfn_req_t *gfn = (fcgs_gfn_req_t *) (cthdr + 1); 1399 fcgs_gfn_req_t *gfn = (fcgs_gfn_req_t *) (cthdr + 1);
1439 u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER); 1400 u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER);
1440 1401
diff --git a/drivers/scsi/bfa/bfa_fcbuild.h b/drivers/scsi/bfa/bfa_fcbuild.h
new file mode 100644
index 000000000000..73abd02e53cc
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcbuild.h
@@ -0,0 +1,316 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17/*
18 * fcbuild.h - FC link service frame building and parsing routines
19 */
20
21#ifndef __FCBUILD_H__
22#define __FCBUILD_H__
23
24#include "bfa_os_inc.h"
25#include "bfa_fc.h"
26#include "bfa_defs_fcs.h"
27
28/*
29 * Utility Macros/functions
30 */
31
32#define wwn_is_equal(_wwn1, _wwn2) \
33 (memcmp(&(_wwn1), &(_wwn2), sizeof(wwn_t)) == 0)
34
35#define fc_roundup(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1))
36
37/*
38 * Given the fc response length, this routine will return
39 * the length of the actual payload bytes following the CT header.
40 *
41 * Assumes the input response length does not include the crc, eof, etc.
42 */
43static inline u32
44fc_get_ctresp_pyld_len(u32 resp_len)
45{
46 return resp_len - sizeof(struct ct_hdr_s);
47}
48
49/*
50 * Convert bfa speed to rpsc speed value.
51 */
52static inline enum bfa_port_speed
53fc_rpsc_operspeed_to_bfa_speed(enum fc_rpsc_op_speed speed)
54{
55 switch (speed) {
56
57 case RPSC_OP_SPEED_1G:
58 return BFA_PORT_SPEED_1GBPS;
59
60 case RPSC_OP_SPEED_2G:
61 return BFA_PORT_SPEED_2GBPS;
62
63 case RPSC_OP_SPEED_4G:
64 return BFA_PORT_SPEED_4GBPS;
65
66 case RPSC_OP_SPEED_8G:
67 return BFA_PORT_SPEED_8GBPS;
68
69 case RPSC_OP_SPEED_10G:
70 return BFA_PORT_SPEED_10GBPS;
71
72 default:
73 return BFA_PORT_SPEED_UNKNOWN;
74 }
75}
76
77/*
78 * Convert RPSC speed to bfa speed value.
79 */
80static inline enum fc_rpsc_op_speed
81fc_bfa_speed_to_rpsc_operspeed(enum bfa_port_speed op_speed)
82{
83 switch (op_speed) {
84
85 case BFA_PORT_SPEED_1GBPS:
86 return RPSC_OP_SPEED_1G;
87
88 case BFA_PORT_SPEED_2GBPS:
89 return RPSC_OP_SPEED_2G;
90
91 case BFA_PORT_SPEED_4GBPS:
92 return RPSC_OP_SPEED_4G;
93
94 case BFA_PORT_SPEED_8GBPS:
95 return RPSC_OP_SPEED_8G;
96
97 case BFA_PORT_SPEED_10GBPS:
98 return RPSC_OP_SPEED_10G;
99
100 default:
101 return RPSC_OP_SPEED_NOT_EST;
102 }
103}
104
105enum fc_parse_status {
106 FC_PARSE_OK = 0,
107 FC_PARSE_FAILURE = 1,
108 FC_PARSE_BUSY = 2,
109 FC_PARSE_LEN_INVAL,
110 FC_PARSE_ACC_INVAL,
111 FC_PARSE_PWWN_NOT_EQUAL,
112 FC_PARSE_NWWN_NOT_EQUAL,
113 FC_PARSE_RXSZ_INVAL,
114 FC_PARSE_NOT_FCP,
115 FC_PARSE_OPAFLAG_INVAL,
116 FC_PARSE_RPAFLAG_INVAL,
117 FC_PARSE_OPA_INVAL,
118 FC_PARSE_RPA_INVAL,
119
120};
121
122struct fc_templates_s {
123 struct fchs_s fc_els_req;
124 struct fchs_s fc_bls_req;
125 struct fc_logi_s plogi;
126 struct fc_rrq_s rrq;
127};
128
129void fcbuild_init(void);
130
131u16 fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi,
132 u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name,
133 u16 pdu_size, u8 set_npiv, u8 set_auth,
134 u16 local_bb_credits);
135
136u16 fc_fdisc_build(struct fchs_s *buf, struct fc_logi_s *flogi, u32 s_id,
137 u16 ox_id, wwn_t port_name, wwn_t node_name,
138 u16 pdu_size);
139
140u16 fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi,
141 u32 s_id, u16 ox_id,
142 wwn_t port_name, wwn_t node_name,
143 u16 pdu_size,
144 u16 local_bb_credits);
145
146u16 fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id,
147 u32 s_id, u16 ox_id, wwn_t port_name,
148 wwn_t node_name, u16 pdu_size);
149
150enum fc_parse_status fc_plogi_parse(struct fchs_s *fchs);
151
152u16 fc_abts_build(struct fchs_s *buf, u32 d_id, u32 s_id,
153 u16 ox_id);
154
155enum fc_parse_status fc_abts_rsp_parse(struct fchs_s *buf, int len);
156
157u16 fc_rrq_build(struct fchs_s *buf, struct fc_rrq_s *rrq, u32 d_id,
158 u32 s_id, u16 ox_id, u16 rrq_oxid);
159enum fc_parse_status fc_rrq_rsp_parse(struct fchs_s *buf, int len);
160
161u16 fc_rspnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
162 u16 ox_id, u8 *name);
163
164u16 fc_rftid_build(struct fchs_s *fchs, void *pld, u32 s_id,
165 u16 ox_id, enum bfa_lport_role role);
166
167u16 fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id,
168 u16 ox_id, u8 *fc4_bitmap,
169 u32 bitmap_size);
170
171u16 fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
172 u16 ox_id, u8 fc4_type, u8 fc4_ftrs);
173
174u16 fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
175 u16 ox_id, wwn_t port_name);
176
177u16 fc_gpnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
178 u16 ox_id, u32 port_id);
179
180u16 fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
181 u8 set_br_reg, u32 s_id, u16 ox_id);
182
183u16 fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
184 u32 s_id, u16 ox_id,
185 wwn_t port_name, wwn_t node_name,
186 u16 pdu_size);
187
188u16 fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
189 u32 d_id, u32 s_id, u16 ox_id, wwn_t port_name,
190 wwn_t node_name);
191
192enum fc_parse_status fc_adisc_parse(struct fchs_s *fchs, void *pld,
193 u32 host_dap, wwn_t node_name, wwn_t port_name);
194
195enum fc_parse_status fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len,
196 wwn_t port_name, wwn_t node_name);
197
198u16 fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
199 u32 d_id, u32 s_id, u16 ox_id,
200 wwn_t port_name, wwn_t node_name);
201u16 fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt,
202 u32 d_id, u32 s_id, u16 ox_id,
203 u8 reason_code, u8 reason_code_expl);
204u16 fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd,
205 u32 d_id, u32 s_id, u16 ox_id);
206u16 fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id,
207 u32 s_id, u16 ox_id);
208
209enum fc_parse_status fc_prli_rsp_parse(struct fc_prli_s *prli, int len);
210
211u16 fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
212 u32 s_id, u16 ox_id,
213 enum bfa_lport_role role);
214
215u16 fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid,
216 u32 d_id, u32 s_id, u16 ox_id,
217 u32 data_format);
218
219u16 fc_rnid_acc_build(struct fchs_s *fchs,
220 struct fc_rnid_acc_s *rnid_acc, u32 d_id, u32 s_id,
221 u16 ox_id, u32 data_format,
222 struct fc_rnid_common_id_data_s *common_id_data,
223 struct fc_rnid_general_topology_data_s *gen_topo_data);
224
225u16 fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rps2c,
226 u32 d_id, u32 s_id, u32 *pid_list, u16 npids);
227u16 fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc,
228 u32 d_id, u32 s_id, u16 ox_id);
229u16 fc_rpsc_acc_build(struct fchs_s *fchs,
230 struct fc_rpsc_acc_s *rpsc_acc, u32 d_id, u32 s_id,
231 u16 ox_id, struct fc_rpsc_speed_info_s *oper_speed);
232u16 fc_gid_ft_build(struct fchs_s *fchs, void *pld, u32 s_id,
233 u8 fc4_type);
234
235u16 fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
236 u32 port_id, wwn_t port_name);
237
238u16 fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
239 u32 port_id, wwn_t node_name);
240
241u16 fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
242 u32 port_id, u32 cos);
243
244u16 fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
245 u32 port_id, u8 port_type);
246
247u16 fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id,
248 u32 port_id);
249
250u16 fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id,
251 u32 s_id, u16 ox_id, wwn_t port_name);
252
253u16 fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
254 u32 s_id, u16 ox_id);
255
256u16 fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id,
257 u16 cmd_code);
258u16 fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn);
259u16 fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn);
260
261void fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask);
262
263void fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
264 u16 ox_id);
265
266enum fc_parse_status fc_els_rsp_parse(struct fchs_s *fchs, int len);
267
268enum fc_parse_status fc_plogi_rsp_parse(struct fchs_s *fchs, int len,
269 wwn_t port_name);
270
271enum fc_parse_status fc_prli_parse(struct fc_prli_s *prli);
272
273enum fc_parse_status fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name,
274 wwn_t port_name);
275
276u16 fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
277 u32 s_id, u16 ox_id, u16 rx_id);
278
279int fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code);
280
281u16 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
282 u32 d_id, u32 s_id, u16 ox_id, int num_pages);
283
284u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
285 u32 d_id, u32 s_id, u16 ox_id, int num_pages);
286
287u16 fc_logo_rsp_parse(struct fchs_s *fchs, int len);
288
289u16 fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
290 u16 ox_id, wwn_t port_name, wwn_t node_name,
291 u16 pdu_size);
292
293u16 fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name);
294
295u16 fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
296 u16 ox_id, int num_pages);
297
298u16 fc_prlo_rsp_parse(struct fchs_s *fchs, int len);
299
300u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
301 u16 ox_id, int num_pages, enum fc_tprlo_type tprlo_type,
302 u32 tpr_id);
303
304u16 fc_tprlo_rsp_parse(struct fchs_s *fchs, int len);
305
306u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
307 u16 ox_id, u32 reason_code, u32 reason_expl);
308
309u16 fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
310 u32 port_id);
311
312u16 fc_ct_rsp_parse(struct ct_hdr_s *cthdr);
313
314u16 fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, u32 s_id,
315 u16 ox_id);
316#endif
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 8c703d8dc94b..33c8dd51f474 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -15,18 +15,291 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#include <bfa.h> 18#include "bfa_modules.h"
19#include <log/bfa_log_hal.h> 19#include "bfa_cb_ioim.h"
20 20
21BFA_TRC_FILE(HAL, FCPIM); 21BFA_TRC_FILE(HAL, FCPIM);
22BFA_MODULE(fcpim); 22BFA_MODULE(fcpim);
23 23
24
25#define bfa_fcpim_add_iostats(__l, __r, __stats) \
26 (__l->__stats += __r->__stats)
27
28
29/**
30 * BFA ITNIM Related definitions
31 */
32static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
33
34#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
35 (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
36
37#define bfa_fcpim_additn(__itnim) \
38 list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
39#define bfa_fcpim_delitn(__itnim) do { \
40 bfa_assert(bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \
41 bfa_itnim_update_del_itn_stats(__itnim); \
42 list_del(&(__itnim)->qe); \
43 bfa_assert(list_empty(&(__itnim)->io_q)); \
44 bfa_assert(list_empty(&(__itnim)->io_cleanup_q)); \
45 bfa_assert(list_empty(&(__itnim)->pending_q)); \
46} while (0)
47
48#define bfa_itnim_online_cb(__itnim) do { \
49 if ((__itnim)->bfa->fcs) \
50 bfa_cb_itnim_online((__itnim)->ditn); \
51 else { \
52 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
53 __bfa_cb_itnim_online, (__itnim)); \
54 } \
55} while (0)
56
57#define bfa_itnim_offline_cb(__itnim) do { \
58 if ((__itnim)->bfa->fcs) \
59 bfa_cb_itnim_offline((__itnim)->ditn); \
60 else { \
61 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
62 __bfa_cb_itnim_offline, (__itnim)); \
63 } \
64} while (0)
65
66#define bfa_itnim_sler_cb(__itnim) do { \
67 if ((__itnim)->bfa->fcs) \
68 bfa_cb_itnim_sler((__itnim)->ditn); \
69 else { \
70 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
71 __bfa_cb_itnim_sler, (__itnim)); \
72 } \
73} while (0)
74
75/**
76 * bfa_itnim_sm BFA itnim state machine
77 */
78
79
80enum bfa_itnim_event {
81 BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
82 BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
83 BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
84 BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
85 BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
86 BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
87 BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
88 BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
89 BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
90};
91
92/**
93 * BFA IOIM related definitions
94 */
95#define bfa_ioim_move_to_comp_q(__ioim) do { \
96 list_del(&(__ioim)->qe); \
97 list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \
98} while (0)
99
100
101#define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \
102 if ((__fcpim)->profile_comp) \
103 (__fcpim)->profile_comp(__ioim); \
104} while (0)
105
106#define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \
107 if ((__fcpim)->profile_start) \
108 (__fcpim)->profile_start(__ioim); \
109} while (0)
110/**
111 * hal_ioim_sm
112 */
113
114/**
115 * IO state machine events
116 */
117enum bfa_ioim_event {
118 BFA_IOIM_SM_START = 1, /* io start request from host */
119 BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
120 BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
121 BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
122 BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
123 BFA_IOIM_SM_FREE = 6, /* io resource is freed */
124 BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
125 BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
126 BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
127 BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
128 BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
129 BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
130 BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
131 BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
132 BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
133 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
134 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
135 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
136};
137
138
139/**
140 * BFA TSKIM related definitions
141 */
142
143/**
144 * task management completion handling
145 */
146#define bfa_tskim_qcomp(__tskim, __cbfn) do { \
147 bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
148 bfa_tskim_notify_comp(__tskim); \
149} while (0)
150
151#define bfa_tskim_notify_comp(__tskim) do { \
152 if ((__tskim)->notify) \
153 bfa_itnim_tskdone((__tskim)->itnim); \
154} while (0)
155
156
157enum bfa_tskim_event {
158 BFA_TSKIM_SM_START = 1, /* TM command start */
159 BFA_TSKIM_SM_DONE = 2, /* TM completion */
160 BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
161 BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
162 BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
163 BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
164 BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
165 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
166};
167
168/**
169 * forward declaration for BFA ITNIM functions
170 */
171static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
172static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
173static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
174static void bfa_itnim_cleanp_comp(void *itnim_cbarg);
175static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
176static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
177static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
178static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
179static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
180static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
181static void bfa_itnim_iotov(void *itnim_arg);
182static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
183static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
184static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
185
186/**
187 * forward declaration of ITNIM state machine
188 */
189static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
190 enum bfa_itnim_event event);
191static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
192 enum bfa_itnim_event event);
193static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
194 enum bfa_itnim_event event);
195static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
196 enum bfa_itnim_event event);
197static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
198 enum bfa_itnim_event event);
199static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
200 enum bfa_itnim_event event);
201static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
202 enum bfa_itnim_event event);
203static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
204 enum bfa_itnim_event event);
205static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
206 enum bfa_itnim_event event);
207static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
208 enum bfa_itnim_event event);
209static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
210 enum bfa_itnim_event event);
211static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
212 enum bfa_itnim_event event);
213static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
214 enum bfa_itnim_event event);
215static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
216 enum bfa_itnim_event event);
217static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
218 enum bfa_itnim_event event);
219
220/**
221 * forward declaration for BFA IOIM functions
222 */
223static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
224static bfa_boolean_t bfa_ioim_sge_setup(struct bfa_ioim_s *ioim);
225static void bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim);
226static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
227static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
228static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
229static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
230static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
231static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
232static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
233static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
234
235
236/**
237 * forward declaration of BFA IO state machine
238 */
239static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
240 enum bfa_ioim_event event);
241static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
242 enum bfa_ioim_event event);
243static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
244 enum bfa_ioim_event event);
245static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
246 enum bfa_ioim_event event);
247static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
248 enum bfa_ioim_event event);
249static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
250 enum bfa_ioim_event event);
251static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
252 enum bfa_ioim_event event);
253static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
254 enum bfa_ioim_event event);
255static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
256 enum bfa_ioim_event event);
257static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
258 enum bfa_ioim_event event);
259static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
260 enum bfa_ioim_event event);
261static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
262 enum bfa_ioim_event event);
263
264/**
265 * forward declaration for BFA TSKIM functions
266 */
267static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
268static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
269static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
270 lun_t lun);
271static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
272static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
273static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
274static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
275static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
276static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
277
278
279/**
280 * forward declaration of BFA TSKIM state machine
281 */
282static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
283 enum bfa_tskim_event event);
284static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
285 enum bfa_tskim_event event);
286static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
287 enum bfa_tskim_event event);
288static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
289 enum bfa_tskim_event event);
290static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
291 enum bfa_tskim_event event);
292static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
293 enum bfa_tskim_event event);
294static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
295 enum bfa_tskim_event event);
296
24/** 297/**
25 * hal_fcpim_mod BFA FCP Initiator Mode module 298 * hal_fcpim_mod BFA FCP Initiator Mode module
26 */ 299 */
27 300
28/** 301/**
29 * Compute and return memory needed by FCP(im) module. 302 * Compute and return memory needed by FCP(im) module.
30 */ 303 */
31static void 304static void
32bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, 305bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
@@ -58,7 +331,7 @@ bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
58 331
59static void 332static void
60bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 333bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
61 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 334 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
62{ 335{
63 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 336 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
64 337
@@ -67,12 +340,14 @@ bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
67 bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs); 340 bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
68 bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs); 341 bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
69 342
70 fcpim->bfa = bfa; 343 fcpim->bfa = bfa;
71 fcpim->num_itnims = cfg->fwcfg.num_rports; 344 fcpim->num_itnims = cfg->fwcfg.num_rports;
72 fcpim->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs; 345 fcpim->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
73 fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs; 346 fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
74 fcpim->path_tov = cfg->drvcfg.path_tov; 347 fcpim->path_tov = cfg->drvcfg.path_tov;
75 fcpim->delay_comp = cfg->drvcfg.delay_comp; 348 fcpim->delay_comp = cfg->drvcfg.delay_comp;
349 fcpim->profile_comp = NULL;
350 fcpim->profile_start = NULL;
76 351
77 bfa_itnim_attach(fcpim, meminfo); 352 bfa_itnim_attach(fcpim, meminfo);
78 bfa_tskim_attach(fcpim, meminfo); 353 bfa_tskim_attach(fcpim, meminfo);
@@ -103,7 +378,7 @@ bfa_fcpim_iocdisable(struct bfa_s *bfa)
103{ 378{
104 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 379 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
105 struct bfa_itnim_s *itnim; 380 struct bfa_itnim_s *itnim;
106 struct list_head *qe, *qen; 381 struct list_head *qe, *qen;
107 382
108 list_for_each_safe(qe, qen, &fcpim->itnim_q) { 383 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
109 itnim = (struct bfa_itnim_s *) qe; 384 itnim = (struct bfa_itnim_s *) qe;
@@ -112,6 +387,56 @@ bfa_fcpim_iocdisable(struct bfa_s *bfa)
112} 387}
113 388
114void 389void
390bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
391 struct bfa_itnim_iostats_s *rstats)
392{
393 bfa_fcpim_add_iostats(lstats, rstats, total_ios);
394 bfa_fcpim_add_iostats(lstats, rstats, qresumes);
395 bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
396 bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
397 bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
398 bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
399 bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
400 bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
401 bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
402 bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
403 bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
404 bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
405 bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
406 bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
407 bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
408 bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
409 bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
410 bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
411 bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
412 bfa_fcpim_add_iostats(lstats, rstats, onlines);
413 bfa_fcpim_add_iostats(lstats, rstats, offlines);
414 bfa_fcpim_add_iostats(lstats, rstats, creates);
415 bfa_fcpim_add_iostats(lstats, rstats, deletes);
416 bfa_fcpim_add_iostats(lstats, rstats, create_comps);
417 bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
418 bfa_fcpim_add_iostats(lstats, rstats, sler_events);
419 bfa_fcpim_add_iostats(lstats, rstats, fw_create);
420 bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
421 bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
422 bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
423 bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
424 bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
425 bfa_fcpim_add_iostats(lstats, rstats, tm_success);
426 bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
427 bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
428 bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
429 bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
430 bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
431 bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
432 bfa_fcpim_add_iostats(lstats, rstats, io_comps);
433 bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
434 bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
435 bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
436 bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
437}
438
439void
115bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov) 440bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
116{ 441{
117 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 442 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
@@ -130,21 +455,113 @@ bfa_fcpim_path_tov_get(struct bfa_s *bfa)
130} 455}
131 456
132bfa_status_t 457bfa_status_t
133bfa_fcpim_get_modstats(struct bfa_s *bfa, struct bfa_fcpim_stats_s *modstats) 458bfa_fcpim_port_iostats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *stats,
459 u8 lp_tag)
460{
461 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
462 struct list_head *qe, *qen;
463 struct bfa_itnim_s *itnim;
464
465 /* accumulate IO stats from itnim */
466 bfa_os_memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
467 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
468 itnim = (struct bfa_itnim_s *) qe;
469 if (itnim->rport->rport_info.lp_tag != lp_tag)
470 continue;
471 bfa_fcpim_add_stats(stats, &(itnim->stats));
472 }
473 return BFA_STATUS_OK;
474}
475bfa_status_t
476bfa_fcpim_get_modstats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *modstats)
477{
478 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
479 struct list_head *qe, *qen;
480 struct bfa_itnim_s *itnim;
481
482 /* accumulate IO stats from itnim */
483 bfa_os_memset(modstats, 0, sizeof(struct bfa_itnim_iostats_s));
484 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
485 itnim = (struct bfa_itnim_s *) qe;
486 bfa_fcpim_add_stats(modstats, &(itnim->stats));
487 }
488 return BFA_STATUS_OK;
489}
490
491bfa_status_t
492bfa_fcpim_get_del_itn_stats(struct bfa_s *bfa,
493 struct bfa_fcpim_del_itn_stats_s *modstats)
134{ 494{
135 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 495 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
136 496
137 *modstats = fcpim->stats; 497 *modstats = fcpim->del_itn_stats;
138 498
139 return BFA_STATUS_OK; 499 return BFA_STATUS_OK;
140} 500}
141 501
502
503bfa_status_t
504bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
505{
506 struct bfa_itnim_s *itnim;
507 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
508 struct list_head *qe, *qen;
509
510 /* accumulate IO stats from itnim */
511 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
512 itnim = (struct bfa_itnim_s *) qe;
513 bfa_itnim_clear_stats(itnim);
514 }
515 fcpim->io_profile = BFA_TRUE;
516 fcpim->io_profile_start_time = time;
517 fcpim->profile_comp = bfa_ioim_profile_comp;
518 fcpim->profile_start = bfa_ioim_profile_start;
519
520 return BFA_STATUS_OK;
521}
522bfa_status_t
523bfa_fcpim_profile_off(struct bfa_s *bfa)
524{
525 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
526 fcpim->io_profile = BFA_FALSE;
527 fcpim->io_profile_start_time = 0;
528 fcpim->profile_comp = NULL;
529 fcpim->profile_start = NULL;
530 return BFA_STATUS_OK;
531}
532
533bfa_status_t
534bfa_fcpim_port_clear_iostats(struct bfa_s *bfa, u8 lp_tag)
535{
536 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
537 struct list_head *qe, *qen;
538 struct bfa_itnim_s *itnim;
539
540 /* clear IO stats from all active itnims */
541 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
542 itnim = (struct bfa_itnim_s *) qe;
543 if (itnim->rport->rport_info.lp_tag != lp_tag)
544 continue;
545 bfa_itnim_clear_stats(itnim);
546 }
547 return BFA_STATUS_OK;
548
549}
550
142bfa_status_t 551bfa_status_t
143bfa_fcpim_clr_modstats(struct bfa_s *bfa) 552bfa_fcpim_clr_modstats(struct bfa_s *bfa)
144{ 553{
145 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 554 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
555 struct list_head *qe, *qen;
556 struct bfa_itnim_s *itnim;
146 557
147 memset(&fcpim->stats, 0, sizeof(struct bfa_fcpim_stats_s)); 558 /* clear IO stats from all active itnims */
559 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
560 itnim = (struct bfa_itnim_s *) qe;
561 bfa_itnim_clear_stats(itnim);
562 }
563 bfa_os_memset(&fcpim->del_itn_stats, 0,
564 sizeof(struct bfa_fcpim_del_itn_stats_s));
148 565
149 return BFA_STATUS_OK; 566 return BFA_STATUS_OK;
150} 567}
@@ -176,14 +593,6 @@ bfa_fcpim_update_ioredirect(struct bfa_s *bfa)
176 * IO redirection is turned off when QoS is enabled and vice versa 593 * IO redirection is turned off when QoS is enabled and vice versa
177 */ 594 */
178 ioredirect = bfa_fcport_is_qos_enabled(bfa) ? BFA_FALSE : BFA_TRUE; 595 ioredirect = bfa_fcport_is_qos_enabled(bfa) ? BFA_FALSE : BFA_TRUE;
179
180 /*
181 * Notify the bfad module of a possible state change in
182 * IO redirection capability, due to a QoS state change. bfad will
183 * check on the support for io redirection and update the
184 * fcpim's ioredirect state accordingly.
185 */
186 bfa_cb_ioredirect_state_change((void *)(bfa->bfad), ioredirect);
187} 596}
188 597
189void 598void
@@ -192,3 +601,3012 @@ bfa_fcpim_set_ioredirect(struct bfa_s *bfa, bfa_boolean_t state)
192 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 601 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
193 fcpim->ioredirect = state; 602 fcpim->ioredirect = state;
194} 603}
604
605
606
607/**
608 * BFA ITNIM module state machine functions
609 */
610
611/**
612 * Beginning/unallocated state - no events expected.
613 */
614static void
615bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
616{
617 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
618 bfa_trc(itnim->bfa, event);
619
620 switch (event) {
621 case BFA_ITNIM_SM_CREATE:
622 bfa_sm_set_state(itnim, bfa_itnim_sm_created);
623 itnim->is_online = BFA_FALSE;
624 bfa_fcpim_additn(itnim);
625 break;
626
627 default:
628 bfa_sm_fault(itnim->bfa, event);
629 }
630}
631
632/**
633 * Beginning state, only online event expected.
634 */
635static void
636bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
637{
638 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
639 bfa_trc(itnim->bfa, event);
640
641 switch (event) {
642 case BFA_ITNIM_SM_ONLINE:
643 if (bfa_itnim_send_fwcreate(itnim))
644 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
645 else
646 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
647 break;
648
649 case BFA_ITNIM_SM_DELETE:
650 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
651 bfa_fcpim_delitn(itnim);
652 break;
653
654 case BFA_ITNIM_SM_HWFAIL:
655 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
656 break;
657
658 default:
659 bfa_sm_fault(itnim->bfa, event);
660 }
661}
662
663/**
664 * Waiting for itnim create response from firmware.
665 */
666static void
667bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
668{
669 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
670 bfa_trc(itnim->bfa, event);
671
672 switch (event) {
673 case BFA_ITNIM_SM_FWRSP:
674 bfa_sm_set_state(itnim, bfa_itnim_sm_online);
675 itnim->is_online = BFA_TRUE;
676 bfa_itnim_iotov_online(itnim);
677 bfa_itnim_online_cb(itnim);
678 break;
679
680 case BFA_ITNIM_SM_DELETE:
681 bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
682 break;
683
684 case BFA_ITNIM_SM_OFFLINE:
685 if (bfa_itnim_send_fwdelete(itnim))
686 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
687 else
688 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
689 break;
690
691 case BFA_ITNIM_SM_HWFAIL:
692 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
693 break;
694
695 default:
696 bfa_sm_fault(itnim->bfa, event);
697 }
698}
699
700static void
701bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
702 enum bfa_itnim_event event)
703{
704 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
705 bfa_trc(itnim->bfa, event);
706
707 switch (event) {
708 case BFA_ITNIM_SM_QRESUME:
709 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
710 bfa_itnim_send_fwcreate(itnim);
711 break;
712
713 case BFA_ITNIM_SM_DELETE:
714 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
715 bfa_reqq_wcancel(&itnim->reqq_wait);
716 bfa_fcpim_delitn(itnim);
717 break;
718
719 case BFA_ITNIM_SM_OFFLINE:
720 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
721 bfa_reqq_wcancel(&itnim->reqq_wait);
722 bfa_itnim_offline_cb(itnim);
723 break;
724
725 case BFA_ITNIM_SM_HWFAIL:
726 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
727 bfa_reqq_wcancel(&itnim->reqq_wait);
728 break;
729
730 default:
731 bfa_sm_fault(itnim->bfa, event);
732 }
733}
734
735/**
736 * Waiting for itnim create response from firmware, a delete is pending.
737 */
738static void
739bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
740 enum bfa_itnim_event event)
741{
742 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
743 bfa_trc(itnim->bfa, event);
744
745 switch (event) {
746 case BFA_ITNIM_SM_FWRSP:
747 if (bfa_itnim_send_fwdelete(itnim))
748 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
749 else
750 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
751 break;
752
753 case BFA_ITNIM_SM_HWFAIL:
754 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
755 bfa_fcpim_delitn(itnim);
756 break;
757
758 default:
759 bfa_sm_fault(itnim->bfa, event);
760 }
761}
762
763/**
764 * Online state - normal parking state.
765 */
766static void
767bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
768{
769 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
770 bfa_trc(itnim->bfa, event);
771
772 switch (event) {
773 case BFA_ITNIM_SM_OFFLINE:
774 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
775 itnim->is_online = BFA_FALSE;
776 bfa_itnim_iotov_start(itnim);
777 bfa_itnim_cleanup(itnim);
778 break;
779
780 case BFA_ITNIM_SM_DELETE:
781 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
782 itnim->is_online = BFA_FALSE;
783 bfa_itnim_cleanup(itnim);
784 break;
785
786 case BFA_ITNIM_SM_SLER:
787 bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
788 itnim->is_online = BFA_FALSE;
789 bfa_itnim_iotov_start(itnim);
790 bfa_itnim_sler_cb(itnim);
791 break;
792
793 case BFA_ITNIM_SM_HWFAIL:
794 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
795 itnim->is_online = BFA_FALSE;
796 bfa_itnim_iotov_start(itnim);
797 bfa_itnim_iocdisable_cleanup(itnim);
798 break;
799
800 default:
801 bfa_sm_fault(itnim->bfa, event);
802 }
803}
804
805/**
806 * Second level error recovery need.
807 */
808static void
809bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
810{
811 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
812 bfa_trc(itnim->bfa, event);
813
814 switch (event) {
815 case BFA_ITNIM_SM_OFFLINE:
816 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
817 bfa_itnim_cleanup(itnim);
818 break;
819
820 case BFA_ITNIM_SM_DELETE:
821 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
822 bfa_itnim_cleanup(itnim);
823 bfa_itnim_iotov_delete(itnim);
824 break;
825
826 case BFA_ITNIM_SM_HWFAIL:
827 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
828 bfa_itnim_iocdisable_cleanup(itnim);
829 break;
830
831 default:
832 bfa_sm_fault(itnim->bfa, event);
833 }
834}
835
836/**
837 * Going offline. Waiting for active IO cleanup.
838 */
839static void
840bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
841 enum bfa_itnim_event event)
842{
843 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
844 bfa_trc(itnim->bfa, event);
845
846 switch (event) {
847 case BFA_ITNIM_SM_CLEANUP:
848 if (bfa_itnim_send_fwdelete(itnim))
849 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
850 else
851 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
852 break;
853
854 case BFA_ITNIM_SM_DELETE:
855 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
856 bfa_itnim_iotov_delete(itnim);
857 break;
858
859 case BFA_ITNIM_SM_HWFAIL:
860 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
861 bfa_itnim_iocdisable_cleanup(itnim);
862 bfa_itnim_offline_cb(itnim);
863 break;
864
865 case BFA_ITNIM_SM_SLER:
866 break;
867
868 default:
869 bfa_sm_fault(itnim->bfa, event);
870 }
871}
872
873/**
874 * Deleting itnim. Waiting for active IO cleanup.
875 */
876static void
877bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
878 enum bfa_itnim_event event)
879{
880 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
881 bfa_trc(itnim->bfa, event);
882
883 switch (event) {
884 case BFA_ITNIM_SM_CLEANUP:
885 if (bfa_itnim_send_fwdelete(itnim))
886 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
887 else
888 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
889 break;
890
891 case BFA_ITNIM_SM_HWFAIL:
892 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
893 bfa_itnim_iocdisable_cleanup(itnim);
894 break;
895
896 default:
897 bfa_sm_fault(itnim->bfa, event);
898 }
899}
900
901/**
902 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
903 */
904static void
905bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
906{
907 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
908 bfa_trc(itnim->bfa, event);
909
910 switch (event) {
911 case BFA_ITNIM_SM_FWRSP:
912 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
913 bfa_itnim_offline_cb(itnim);
914 break;
915
916 case BFA_ITNIM_SM_DELETE:
917 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
918 break;
919
920 case BFA_ITNIM_SM_HWFAIL:
921 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
922 bfa_itnim_offline_cb(itnim);
923 break;
924
925 default:
926 bfa_sm_fault(itnim->bfa, event);
927 }
928}
929
930static void
931bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
932 enum bfa_itnim_event event)
933{
934 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
935 bfa_trc(itnim->bfa, event);
936
937 switch (event) {
938 case BFA_ITNIM_SM_QRESUME:
939 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
940 bfa_itnim_send_fwdelete(itnim);
941 break;
942
943 case BFA_ITNIM_SM_DELETE:
944 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
945 break;
946
947 case BFA_ITNIM_SM_HWFAIL:
948 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
949 bfa_reqq_wcancel(&itnim->reqq_wait);
950 bfa_itnim_offline_cb(itnim);
951 break;
952
953 default:
954 bfa_sm_fault(itnim->bfa, event);
955 }
956}
957
958/**
959 * Offline state.
960 */
961static void
962bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
963{
964 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
965 bfa_trc(itnim->bfa, event);
966
967 switch (event) {
968 case BFA_ITNIM_SM_DELETE:
969 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
970 bfa_itnim_iotov_delete(itnim);
971 bfa_fcpim_delitn(itnim);
972 break;
973
974 case BFA_ITNIM_SM_ONLINE:
975 if (bfa_itnim_send_fwcreate(itnim))
976 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
977 else
978 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
979 break;
980
981 case BFA_ITNIM_SM_HWFAIL:
982 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
983 break;
984
985 default:
986 bfa_sm_fault(itnim->bfa, event);
987 }
988}
989
990/**
991 * IOC h/w failed state.
992 */
993static void
994bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
995 enum bfa_itnim_event event)
996{
997 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
998 bfa_trc(itnim->bfa, event);
999
1000 switch (event) {
1001 case BFA_ITNIM_SM_DELETE:
1002 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1003 bfa_itnim_iotov_delete(itnim);
1004 bfa_fcpim_delitn(itnim);
1005 break;
1006
1007 case BFA_ITNIM_SM_OFFLINE:
1008 bfa_itnim_offline_cb(itnim);
1009 break;
1010
1011 case BFA_ITNIM_SM_ONLINE:
1012 if (bfa_itnim_send_fwcreate(itnim))
1013 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
1014 else
1015 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
1016 break;
1017
1018 case BFA_ITNIM_SM_HWFAIL:
1019 break;
1020
1021 default:
1022 bfa_sm_fault(itnim->bfa, event);
1023 }
1024}
1025
1026/**
1027 * Itnim is deleted, waiting for firmware response to delete.
1028 */
1029static void
1030bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
1031{
1032 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
1033 bfa_trc(itnim->bfa, event);
1034
1035 switch (event) {
1036 case BFA_ITNIM_SM_FWRSP:
1037 case BFA_ITNIM_SM_HWFAIL:
1038 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1039 bfa_fcpim_delitn(itnim);
1040 break;
1041
1042 default:
1043 bfa_sm_fault(itnim->bfa, event);
1044 }
1045}
1046
1047static void
1048bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
1049 enum bfa_itnim_event event)
1050{
1051 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
1052 bfa_trc(itnim->bfa, event);
1053
1054 switch (event) {
1055 case BFA_ITNIM_SM_QRESUME:
1056 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
1057 bfa_itnim_send_fwdelete(itnim);
1058 break;
1059
1060 case BFA_ITNIM_SM_HWFAIL:
1061 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1062 bfa_reqq_wcancel(&itnim->reqq_wait);
1063 bfa_fcpim_delitn(itnim);
1064 break;
1065
1066 default:
1067 bfa_sm_fault(itnim->bfa, event);
1068 }
1069}
1070
1071/**
1072 * Initiate cleanup of all IOs on an IOC failure.
1073 */
1074static void
1075bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
1076{
1077 struct bfa_tskim_s *tskim;
1078 struct bfa_ioim_s *ioim;
1079 struct list_head *qe, *qen;
1080
1081 list_for_each_safe(qe, qen, &itnim->tsk_q) {
1082 tskim = (struct bfa_tskim_s *) qe;
1083 bfa_tskim_iocdisable(tskim);
1084 }
1085
1086 list_for_each_safe(qe, qen, &itnim->io_q) {
1087 ioim = (struct bfa_ioim_s *) qe;
1088 bfa_ioim_iocdisable(ioim);
1089 }
1090
1091 /**
1092 * For IO request in pending queue, we pretend an early timeout.
1093 */
1094 list_for_each_safe(qe, qen, &itnim->pending_q) {
1095 ioim = (struct bfa_ioim_s *) qe;
1096 bfa_ioim_tov(ioim);
1097 }
1098
1099 list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
1100 ioim = (struct bfa_ioim_s *) qe;
1101 bfa_ioim_iocdisable(ioim);
1102 }
1103}
1104
1105/**
1106 * IO cleanup completion
1107 */
1108static void
1109bfa_itnim_cleanp_comp(void *itnim_cbarg)
1110{
1111 struct bfa_itnim_s *itnim = itnim_cbarg;
1112
1113 bfa_stats(itnim, cleanup_comps);
1114 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
1115}
1116
1117/**
1118 * Initiate cleanup of all IOs.
1119 */
1120static void
1121bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
1122{
1123 struct bfa_ioim_s *ioim;
1124 struct bfa_tskim_s *tskim;
1125 struct list_head *qe, *qen;
1126
1127 bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
1128
1129 list_for_each_safe(qe, qen, &itnim->io_q) {
1130 ioim = (struct bfa_ioim_s *) qe;
1131
1132 /**
1133 * Move IO to a cleanup queue from active queue so that a later
1134 * TM will not pickup this IO.
1135 */
1136 list_del(&ioim->qe);
1137 list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
1138
1139 bfa_wc_up(&itnim->wc);
1140 bfa_ioim_cleanup(ioim);
1141 }
1142
1143 list_for_each_safe(qe, qen, &itnim->tsk_q) {
1144 tskim = (struct bfa_tskim_s *) qe;
1145 bfa_wc_up(&itnim->wc);
1146 bfa_tskim_cleanup(tskim);
1147 }
1148
1149 bfa_wc_wait(&itnim->wc);
1150}
1151
1152static void
1153__bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
1154{
1155 struct bfa_itnim_s *itnim = cbarg;
1156
1157 if (complete)
1158 bfa_cb_itnim_online(itnim->ditn);
1159}
1160
1161static void
1162__bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
1163{
1164 struct bfa_itnim_s *itnim = cbarg;
1165
1166 if (complete)
1167 bfa_cb_itnim_offline(itnim->ditn);
1168}
1169
1170static void
1171__bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
1172{
1173 struct bfa_itnim_s *itnim = cbarg;
1174
1175 if (complete)
1176 bfa_cb_itnim_sler(itnim->ditn);
1177}
1178
1179/**
1180 * Call to resume any I/O requests waiting for room in request queue.
1181 */
1182static void
1183bfa_itnim_qresume(void *cbarg)
1184{
1185 struct bfa_itnim_s *itnim = cbarg;
1186
1187 bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
1188}
1189
1190
1191
1192
1193/**
1194 * bfa_itnim_public
1195 */
1196
1197void
1198bfa_itnim_iodone(struct bfa_itnim_s *itnim)
1199{
1200 bfa_wc_down(&itnim->wc);
1201}
1202
1203void
1204bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
1205{
1206 bfa_wc_down(&itnim->wc);
1207}
1208
1209void
1210bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
1211 u32 *dm_len)
1212{
1213 /**
1214 * ITN memory
1215 */
1216 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
1217}
1218
1219void
1220bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
1221{
1222 struct bfa_s *bfa = fcpim->bfa;
1223 struct bfa_itnim_s *itnim;
1224 int i, j;
1225
1226 INIT_LIST_HEAD(&fcpim->itnim_q);
1227
1228 itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo);
1229 fcpim->itnim_arr = itnim;
1230
1231 for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
1232 bfa_os_memset(itnim, 0, sizeof(struct bfa_itnim_s));
1233 itnim->bfa = bfa;
1234 itnim->fcpim = fcpim;
1235 itnim->reqq = BFA_REQQ_QOS_LO;
1236 itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
1237 itnim->iotov_active = BFA_FALSE;
1238 bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
1239
1240 INIT_LIST_HEAD(&itnim->io_q);
1241 INIT_LIST_HEAD(&itnim->io_cleanup_q);
1242 INIT_LIST_HEAD(&itnim->pending_q);
1243 INIT_LIST_HEAD(&itnim->tsk_q);
1244 INIT_LIST_HEAD(&itnim->delay_comp_q);
1245 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1246 itnim->ioprofile.io_latency.min[j] = ~0;
1247 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1248 }
1249
1250 bfa_meminfo_kva(minfo) = (u8 *) itnim;
1251}
1252
1253void
1254bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
1255{
1256 bfa_stats(itnim, ioc_disabled);
1257 bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
1258}
1259
1260static bfa_boolean_t
1261bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1262{
1263 struct bfi_itnim_create_req_s *m;
1264
1265 itnim->msg_no++;
1266
1267 /**
1268 * check for room in queue to send request now
1269 */
1270 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1271 if (!m) {
1272 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1273 return BFA_FALSE;
1274 }
1275
1276 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_CREATE_REQ,
1277 bfa_lpuid(itnim->bfa));
1278 m->fw_handle = itnim->rport->fw_handle;
1279 m->class = FC_CLASS_3;
1280 m->seq_rec = itnim->seq_rec;
1281 m->msg_no = itnim->msg_no;
1282 bfa_stats(itnim, fw_create);
1283
1284 /**
1285 * queue I/O message to firmware
1286 */
1287 bfa_reqq_produce(itnim->bfa, itnim->reqq);
1288 return BFA_TRUE;
1289}
1290
1291static bfa_boolean_t
1292bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1293{
1294 struct bfi_itnim_delete_req_s *m;
1295
1296 /**
1297 * check for room in queue to send request now
1298 */
1299 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1300 if (!m) {
1301 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1302 return BFA_FALSE;
1303 }
1304
1305 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_DELETE_REQ,
1306 bfa_lpuid(itnim->bfa));
1307 m->fw_handle = itnim->rport->fw_handle;
1308 bfa_stats(itnim, fw_delete);
1309
1310 /**
1311 * queue I/O message to firmware
1312 */
1313 bfa_reqq_produce(itnim->bfa, itnim->reqq);
1314 return BFA_TRUE;
1315}
1316
1317/**
1318 * Cleanup all pending failed inflight requests.
1319 */
1320static void
1321bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
1322{
1323 struct bfa_ioim_s *ioim;
1324 struct list_head *qe, *qen;
1325
1326 list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
1327 ioim = (struct bfa_ioim_s *)qe;
1328 bfa_ioim_delayed_comp(ioim, iotov);
1329 }
1330}
1331
1332/**
1333 * Start all pending IO requests.
1334 */
1335static void
1336bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1337{
1338 struct bfa_ioim_s *ioim;
1339
1340 bfa_itnim_iotov_stop(itnim);
1341
1342 /**
1343 * Abort all inflight IO requests in the queue
1344 */
1345 bfa_itnim_delayed_comp(itnim, BFA_FALSE);
1346
1347 /**
1348 * Start all pending IO requests.
1349 */
1350 while (!list_empty(&itnim->pending_q)) {
1351 bfa_q_deq(&itnim->pending_q, &ioim);
1352 list_add_tail(&ioim->qe, &itnim->io_q);
1353 bfa_ioim_start(ioim);
1354 }
1355}
1356
1357/**
1358 * Fail all pending IO requests
1359 */
1360static void
1361bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1362{
1363 struct bfa_ioim_s *ioim;
1364
1365 /**
1366 * Fail all inflight IO requests in the queue
1367 */
1368 bfa_itnim_delayed_comp(itnim, BFA_TRUE);
1369
1370 /**
1371 * Fail any pending IO requests.
1372 */
1373 while (!list_empty(&itnim->pending_q)) {
1374 bfa_q_deq(&itnim->pending_q, &ioim);
1375 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1376 bfa_ioim_tov(ioim);
1377 }
1378}
1379
1380/**
1381 * IO TOV timer callback. Fail any pending IO requests.
1382 */
1383static void
1384bfa_itnim_iotov(void *itnim_arg)
1385{
1386 struct bfa_itnim_s *itnim = itnim_arg;
1387
1388 itnim->iotov_active = BFA_FALSE;
1389
1390 bfa_cb_itnim_tov_begin(itnim->ditn);
1391 bfa_itnim_iotov_cleanup(itnim);
1392 bfa_cb_itnim_tov(itnim->ditn);
1393}
1394
1395/**
1396 * Start IO TOV timer for failing back pending IO requests in offline state.
1397 */
1398static void
1399bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1400{
1401 if (itnim->fcpim->path_tov > 0) {
1402
1403 itnim->iotov_active = BFA_TRUE;
1404 bfa_assert(bfa_itnim_hold_io(itnim));
1405 bfa_timer_start(itnim->bfa, &itnim->timer,
1406 bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
1407 }
1408}
1409
1410/**
1411 * Stop IO TOV timer.
1412 */
1413static void
1414bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
1415{
1416 if (itnim->iotov_active) {
1417 itnim->iotov_active = BFA_FALSE;
1418 bfa_timer_stop(&itnim->timer);
1419 }
1420}
1421
1422/**
1423 * Stop IO TOV timer.
1424 */
1425static void
1426bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
1427{
1428 bfa_boolean_t pathtov_active = BFA_FALSE;
1429
1430 if (itnim->iotov_active)
1431 pathtov_active = BFA_TRUE;
1432
1433 bfa_itnim_iotov_stop(itnim);
1434 if (pathtov_active)
1435 bfa_cb_itnim_tov_begin(itnim->ditn);
1436 bfa_itnim_iotov_cleanup(itnim);
1437 if (pathtov_active)
1438 bfa_cb_itnim_tov(itnim->ditn);
1439}
1440
1441static void
1442bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1443{
1444 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
1445 fcpim->del_itn_stats.del_itn_iocomp_aborted +=
1446 itnim->stats.iocomp_aborted;
1447 fcpim->del_itn_stats.del_itn_iocomp_timedout +=
1448 itnim->stats.iocomp_timedout;
1449 fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
1450 itnim->stats.iocom_sqer_needed;
1451 fcpim->del_itn_stats.del_itn_iocom_res_free +=
1452 itnim->stats.iocom_res_free;
1453 fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
1454 itnim->stats.iocom_hostabrts;
1455 fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
1456 fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
1457 fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
1458}
1459
1460
1461
1462/**
1463 * bfa_itnim_public
1464 */
1465
1466/**
1467 * Itnim interrupt processing.
1468 */
1469void
1470bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1471{
1472 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1473 union bfi_itnim_i2h_msg_u msg;
1474 struct bfa_itnim_s *itnim;
1475
1476 bfa_trc(bfa, m->mhdr.msg_id);
1477
1478 msg.msg = m;
1479
1480 switch (m->mhdr.msg_id) {
1481 case BFI_ITNIM_I2H_CREATE_RSP:
1482 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1483 msg.create_rsp->bfa_handle);
1484 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
1485 bfa_stats(itnim, create_comps);
1486 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1487 break;
1488
1489 case BFI_ITNIM_I2H_DELETE_RSP:
1490 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1491 msg.delete_rsp->bfa_handle);
1492 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
1493 bfa_stats(itnim, delete_comps);
1494 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1495 break;
1496
1497 case BFI_ITNIM_I2H_SLER_EVENT:
1498 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1499 msg.sler_event->bfa_handle);
1500 bfa_stats(itnim, sler_events);
1501 bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
1502 break;
1503
1504 default:
1505 bfa_trc(bfa, m->mhdr.msg_id);
1506 bfa_assert(0);
1507 }
1508}
1509
1510
1511
1512/**
1513 * bfa_itnim_api
1514 */
1515
1516struct bfa_itnim_s *
1517bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1518{
1519 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1520 struct bfa_itnim_s *itnim;
1521
1522 itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
1523 bfa_assert(itnim->rport == rport);
1524
1525 itnim->ditn = ditn;
1526
1527 bfa_stats(itnim, creates);
1528 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1529
1530 return itnim;
1531}
1532
1533void
1534bfa_itnim_delete(struct bfa_itnim_s *itnim)
1535{
1536 bfa_stats(itnim, deletes);
1537 bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
1538}
1539
1540void
1541bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
1542{
1543 itnim->seq_rec = seq_rec;
1544 bfa_stats(itnim, onlines);
1545 bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
1546}
1547
1548void
1549bfa_itnim_offline(struct bfa_itnim_s *itnim)
1550{
1551 bfa_stats(itnim, offlines);
1552 bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1553}
1554
1555/**
1556 * Return true if itnim is considered offline for holding off IO request.
1557 * IO is not held if itnim is being deleted.
1558 */
1559bfa_boolean_t
1560bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1561{
1562 return itnim->fcpim->path_tov && itnim->iotov_active &&
1563 (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1564 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1565 bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1566 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1567 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1568 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1569}
1570
1571bfa_status_t
1572bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
1573 struct bfa_itnim_ioprofile_s *ioprofile)
1574{
1575 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
1576 if (!fcpim->io_profile)
1577 return BFA_STATUS_IOPROFILE_OFF;
1578
1579 itnim->ioprofile.index = BFA_IOBUCKET_MAX;
1580 itnim->ioprofile.io_profile_start_time =
1581 bfa_io_profile_start_time(itnim->bfa);
1582 itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
1583 itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
1584 *ioprofile = itnim->ioprofile;
1585
1586 return BFA_STATUS_OK;
1587}
1588
1589void
1590bfa_itnim_get_stats(struct bfa_itnim_s *itnim,
1591 struct bfa_itnim_iostats_s *stats)
1592{
1593 *stats = itnim->stats;
1594}
1595
1596void
1597bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1598{
1599 int j;
1600 bfa_os_memset(&itnim->stats, 0, sizeof(itnim->stats));
1601 bfa_os_memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
1602 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1603 itnim->ioprofile.io_latency.min[j] = ~0;
1604}
1605
1606/**
1607 * BFA IO module state machine functions
1608 */
1609
1610/**
1611 * IO is not started (unallocated).
1612 */
1613static void
1614bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1615{
1616 bfa_trc_fp(ioim->bfa, ioim->iotag);
1617 bfa_trc_fp(ioim->bfa, event);
1618
1619 switch (event) {
1620 case BFA_IOIM_SM_START:
1621 if (!bfa_itnim_is_online(ioim->itnim)) {
1622 if (!bfa_itnim_hold_io(ioim->itnim)) {
1623 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1624 list_del(&ioim->qe);
1625 list_add_tail(&ioim->qe,
1626 &ioim->fcpim->ioim_comp_q);
1627 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1628 __bfa_cb_ioim_pathtov, ioim);
1629 } else {
1630 list_del(&ioim->qe);
1631 list_add_tail(&ioim->qe,
1632 &ioim->itnim->pending_q);
1633 }
1634 break;
1635 }
1636
1637 if (ioim->nsges > BFI_SGE_INLINE) {
1638 if (!bfa_ioim_sge_setup(ioim)) {
1639 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
1640 return;
1641 }
1642 }
1643
1644 if (!bfa_ioim_send_ioreq(ioim)) {
1645 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1646 break;
1647 }
1648
1649 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1650 break;
1651
1652 case BFA_IOIM_SM_IOTOV:
1653 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1654 bfa_ioim_move_to_comp_q(ioim);
1655 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1656 __bfa_cb_ioim_pathtov, ioim);
1657 break;
1658
1659 case BFA_IOIM_SM_ABORT:
1660 /**
1661 * IO in pending queue can get abort requests. Complete abort
1662 * requests immediately.
1663 */
1664 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1665 bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
1666 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1667 __bfa_cb_ioim_abort, ioim);
1668 break;
1669
1670 default:
1671 bfa_sm_fault(ioim->bfa, event);
1672 }
1673}
1674
1675/**
1676 * IO is waiting for SG pages.
1677 */
1678static void
1679bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1680{
1681 bfa_trc(ioim->bfa, ioim->iotag);
1682 bfa_trc(ioim->bfa, event);
1683
1684 switch (event) {
1685 case BFA_IOIM_SM_SGALLOCED:
1686 if (!bfa_ioim_send_ioreq(ioim)) {
1687 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1688 break;
1689 }
1690 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1691 break;
1692
1693 case BFA_IOIM_SM_CLEANUP:
1694 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1695 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1696 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1697 ioim);
1698 bfa_ioim_notify_cleanup(ioim);
1699 break;
1700
1701 case BFA_IOIM_SM_ABORT:
1702 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1703 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1704 bfa_ioim_move_to_comp_q(ioim);
1705 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1706 ioim);
1707 break;
1708
1709 case BFA_IOIM_SM_HWFAIL:
1710 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1711 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1712 bfa_ioim_move_to_comp_q(ioim);
1713 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1714 ioim);
1715 break;
1716
1717 default:
1718 bfa_sm_fault(ioim->bfa, event);
1719 }
1720}
1721
1722/**
1723 * IO is active.
1724 */
1725static void
1726bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1727{
1728 bfa_trc_fp(ioim->bfa, ioim->iotag);
1729 bfa_trc_fp(ioim->bfa, event);
1730
1731 switch (event) {
1732 case BFA_IOIM_SM_COMP_GOOD:
1733 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1734 bfa_ioim_move_to_comp_q(ioim);
1735 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1736 __bfa_cb_ioim_good_comp, ioim);
1737 break;
1738
1739 case BFA_IOIM_SM_COMP:
1740 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1741 bfa_ioim_move_to_comp_q(ioim);
1742 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1743 ioim);
1744 break;
1745
1746 case BFA_IOIM_SM_DONE:
1747 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1748 bfa_ioim_move_to_comp_q(ioim);
1749 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1750 ioim);
1751 break;
1752
1753 case BFA_IOIM_SM_ABORT:
1754 ioim->iosp->abort_explicit = BFA_TRUE;
1755 ioim->io_cbfn = __bfa_cb_ioim_abort;
1756
1757 if (bfa_ioim_send_abort(ioim))
1758 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1759 else {
1760 bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
1761 bfa_stats(ioim->itnim, qwait);
1762 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1763 &ioim->iosp->reqq_wait);
1764 }
1765 break;
1766
1767 case BFA_IOIM_SM_CLEANUP:
1768 ioim->iosp->abort_explicit = BFA_FALSE;
1769 ioim->io_cbfn = __bfa_cb_ioim_failed;
1770
1771 if (bfa_ioim_send_abort(ioim))
1772 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1773 else {
1774 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1775 bfa_stats(ioim->itnim, qwait);
1776 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1777 &ioim->iosp->reqq_wait);
1778 }
1779 break;
1780
1781 case BFA_IOIM_SM_HWFAIL:
1782 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1783 bfa_ioim_move_to_comp_q(ioim);
1784 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1785 ioim);
1786 break;
1787
1788 case BFA_IOIM_SM_SQRETRY:
1789 if (bfa_ioim_get_iotag(ioim) != BFA_TRUE) {
1790 /* max retry completed free IO */
1791 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1792 bfa_ioim_move_to_comp_q(ioim);
1793 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1794 __bfa_cb_ioim_failed, ioim);
1795 break;
1796 }
1797 /* waiting for IO tag resource free */
1798 bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
1799 break;
1800
1801 default:
1802 bfa_sm_fault(ioim->bfa, event);
1803 }
1804}
1805
1806/**
1807* IO is retried with new tag.
1808*/
1809static void
1810bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1811{
1812 bfa_trc_fp(ioim->bfa, ioim->iotag);
1813 bfa_trc_fp(ioim->bfa, event);
1814
1815 switch (event) {
1816 case BFA_IOIM_SM_FREE:
1817 /* abts and rrq done. Now retry the IO with new tag */
1818 if (!bfa_ioim_send_ioreq(ioim)) {
1819 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1820 break;
1821 }
1822 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1823 break;
1824
1825 case BFA_IOIM_SM_CLEANUP:
1826 ioim->iosp->abort_explicit = BFA_FALSE;
1827 ioim->io_cbfn = __bfa_cb_ioim_failed;
1828
1829 if (bfa_ioim_send_abort(ioim))
1830 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1831 else {
1832 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1833 bfa_stats(ioim->itnim, qwait);
1834 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1835 &ioim->iosp->reqq_wait);
1836 }
1837 break;
1838
1839 case BFA_IOIM_SM_HWFAIL:
1840 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1841 bfa_ioim_move_to_comp_q(ioim);
1842 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1843 __bfa_cb_ioim_failed, ioim);
1844 break;
1845
1846 case BFA_IOIM_SM_ABORT:
1847 /** in this state IO abort is done.
1848 * Waiting for IO tag resource free.
1849 */
1850 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1851 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1852 ioim);
1853 break;
1854
1855 default:
1856 bfa_sm_fault(ioim->bfa, event);
1857 }
1858}
1859
1860/**
1861 * IO is being aborted, waiting for completion from firmware.
1862 */
1863static void
1864bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1865{
1866 bfa_trc(ioim->bfa, ioim->iotag);
1867 bfa_trc(ioim->bfa, event);
1868
1869 switch (event) {
1870 case BFA_IOIM_SM_COMP_GOOD:
1871 case BFA_IOIM_SM_COMP:
1872 case BFA_IOIM_SM_DONE:
1873 case BFA_IOIM_SM_FREE:
1874 break;
1875
1876 case BFA_IOIM_SM_ABORT_DONE:
1877 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1878 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1879 ioim);
1880 break;
1881
1882 case BFA_IOIM_SM_ABORT_COMP:
1883 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1884 bfa_ioim_move_to_comp_q(ioim);
1885 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1886 ioim);
1887 break;
1888
1889 case BFA_IOIM_SM_COMP_UTAG:
1890 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1891 bfa_ioim_move_to_comp_q(ioim);
1892 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1893 ioim);
1894 break;
1895
1896 case BFA_IOIM_SM_CLEANUP:
1897 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
1898 ioim->iosp->abort_explicit = BFA_FALSE;
1899
1900 if (bfa_ioim_send_abort(ioim))
1901 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1902 else {
1903 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1904 bfa_stats(ioim->itnim, qwait);
1905 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1906 &ioim->iosp->reqq_wait);
1907 }
1908 break;
1909
1910 case BFA_IOIM_SM_HWFAIL:
1911 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1912 bfa_ioim_move_to_comp_q(ioim);
1913 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1914 ioim);
1915 break;
1916
1917 default:
1918 bfa_sm_fault(ioim->bfa, event);
1919 }
1920}
1921
1922/**
1923 * IO is being cleaned up (implicit abort), waiting for completion from
1924 * firmware.
1925 */
1926static void
1927bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1928{
1929 bfa_trc(ioim->bfa, ioim->iotag);
1930 bfa_trc(ioim->bfa, event);
1931
1932 switch (event) {
1933 case BFA_IOIM_SM_COMP_GOOD:
1934 case BFA_IOIM_SM_COMP:
1935 case BFA_IOIM_SM_DONE:
1936 case BFA_IOIM_SM_FREE:
1937 break;
1938
1939 case BFA_IOIM_SM_ABORT:
1940 /**
1941 * IO is already being aborted implicitly
1942 */
1943 ioim->io_cbfn = __bfa_cb_ioim_abort;
1944 break;
1945
1946 case BFA_IOIM_SM_ABORT_DONE:
1947 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1948 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1949 bfa_ioim_notify_cleanup(ioim);
1950 break;
1951
1952 case BFA_IOIM_SM_ABORT_COMP:
1953 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1954 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1955 bfa_ioim_notify_cleanup(ioim);
1956 break;
1957
1958 case BFA_IOIM_SM_COMP_UTAG:
1959 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1960 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1961 bfa_ioim_notify_cleanup(ioim);
1962 break;
1963
1964 case BFA_IOIM_SM_HWFAIL:
1965 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1966 bfa_ioim_move_to_comp_q(ioim);
1967 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1968 ioim);
1969 break;
1970
1971 case BFA_IOIM_SM_CLEANUP:
1972 /**
1973 * IO can be in cleanup state already due to TM command.
1974 * 2nd cleanup request comes from ITN offline event.
1975 */
1976 break;
1977
1978 default:
1979 bfa_sm_fault(ioim->bfa, event);
1980 }
1981}
1982
1983/**
1984 * IO is waiting for room in request CQ
1985 */
1986static void
1987bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1988{
1989 bfa_trc(ioim->bfa, ioim->iotag);
1990 bfa_trc(ioim->bfa, event);
1991
1992 switch (event) {
1993 case BFA_IOIM_SM_QRESUME:
1994 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1995 bfa_ioim_send_ioreq(ioim);
1996 break;
1997
1998 case BFA_IOIM_SM_ABORT:
1999 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2000 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2001 bfa_ioim_move_to_comp_q(ioim);
2002 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
2003 ioim);
2004 break;
2005
2006 case BFA_IOIM_SM_CLEANUP:
2007 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2008 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2009 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2010 ioim);
2011 bfa_ioim_notify_cleanup(ioim);
2012 break;
2013
2014 case BFA_IOIM_SM_HWFAIL:
2015 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2016 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2017 bfa_ioim_move_to_comp_q(ioim);
2018 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2019 ioim);
2020 break;
2021
2022 default:
2023 bfa_sm_fault(ioim->bfa, event);
2024 }
2025}
2026
2027/**
2028 * Active IO is being aborted, waiting for room in request CQ.
2029 */
2030static void
2031bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2032{
2033 bfa_trc(ioim->bfa, ioim->iotag);
2034 bfa_trc(ioim->bfa, event);
2035
2036 switch (event) {
2037 case BFA_IOIM_SM_QRESUME:
2038 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
2039 bfa_ioim_send_abort(ioim);
2040 break;
2041
2042 case BFA_IOIM_SM_CLEANUP:
2043 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
2044 ioim->iosp->abort_explicit = BFA_FALSE;
2045 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
2046 break;
2047
2048 case BFA_IOIM_SM_COMP_GOOD:
2049 case BFA_IOIM_SM_COMP:
2050 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2051 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2052 bfa_ioim_move_to_comp_q(ioim);
2053 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
2054 ioim);
2055 break;
2056
2057 case BFA_IOIM_SM_DONE:
2058 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
2059 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2060 bfa_ioim_move_to_comp_q(ioim);
2061 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
2062 ioim);
2063 break;
2064
2065 case BFA_IOIM_SM_HWFAIL:
2066 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2067 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2068 bfa_ioim_move_to_comp_q(ioim);
2069 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2070 ioim);
2071 break;
2072
2073 default:
2074 bfa_sm_fault(ioim->bfa, event);
2075 }
2076}
2077
2078/**
2079 * Active IO is being cleaned up, waiting for room in request CQ.
2080 */
2081static void
2082bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2083{
2084 bfa_trc(ioim->bfa, ioim->iotag);
2085 bfa_trc(ioim->bfa, event);
2086
2087 switch (event) {
2088 case BFA_IOIM_SM_QRESUME:
2089 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
2090 bfa_ioim_send_abort(ioim);
2091 break;
2092
2093 case BFA_IOIM_SM_ABORT:
2094 /**
2095 * IO is alraedy being cleaned up implicitly
2096 */
2097 ioim->io_cbfn = __bfa_cb_ioim_abort;
2098 break;
2099
2100 case BFA_IOIM_SM_COMP_GOOD:
2101 case BFA_IOIM_SM_COMP:
2102 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2103 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2104 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2105 bfa_ioim_notify_cleanup(ioim);
2106 break;
2107
2108 case BFA_IOIM_SM_DONE:
2109 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
2110 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2111 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2112 bfa_ioim_notify_cleanup(ioim);
2113 break;
2114
2115 case BFA_IOIM_SM_HWFAIL:
2116 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2117 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2118 bfa_ioim_move_to_comp_q(ioim);
2119 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2120 ioim);
2121 break;
2122
2123 default:
2124 bfa_sm_fault(ioim->bfa, event);
2125 }
2126}
2127
2128/**
2129 * IO bfa callback is pending.
2130 */
2131static void
2132bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2133{
2134 bfa_trc_fp(ioim->bfa, ioim->iotag);
2135 bfa_trc_fp(ioim->bfa, event);
2136
2137 switch (event) {
2138 case BFA_IOIM_SM_HCB:
2139 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2140 bfa_ioim_free(ioim);
2141 break;
2142
2143 case BFA_IOIM_SM_CLEANUP:
2144 bfa_ioim_notify_cleanup(ioim);
2145 break;
2146
2147 case BFA_IOIM_SM_HWFAIL:
2148 break;
2149
2150 default:
2151 bfa_sm_fault(ioim->bfa, event);
2152 }
2153}
2154
2155/**
2156 * IO bfa callback is pending. IO resource cannot be freed.
2157 */
2158static void
2159bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2160{
2161 bfa_trc(ioim->bfa, ioim->iotag);
2162 bfa_trc(ioim->bfa, event);
2163
2164 switch (event) {
2165 case BFA_IOIM_SM_HCB:
2166 bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
2167 list_del(&ioim->qe);
2168 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
2169 break;
2170
2171 case BFA_IOIM_SM_FREE:
2172 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2173 break;
2174
2175 case BFA_IOIM_SM_CLEANUP:
2176 bfa_ioim_notify_cleanup(ioim);
2177 break;
2178
2179 case BFA_IOIM_SM_HWFAIL:
2180 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2181 break;
2182
2183 default:
2184 bfa_sm_fault(ioim->bfa, event);
2185 }
2186}
2187
2188/**
2189 * IO is completed, waiting resource free from firmware.
2190 */
2191static void
2192bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2193{
2194 bfa_trc(ioim->bfa, ioim->iotag);
2195 bfa_trc(ioim->bfa, event);
2196
2197 switch (event) {
2198 case BFA_IOIM_SM_FREE:
2199 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2200 bfa_ioim_free(ioim);
2201 break;
2202
2203 case BFA_IOIM_SM_CLEANUP:
2204 bfa_ioim_notify_cleanup(ioim);
2205 break;
2206
2207 case BFA_IOIM_SM_HWFAIL:
2208 break;
2209
2210 default:
2211 bfa_sm_fault(ioim->bfa, event);
2212 }
2213}
2214
2215
2216
2217/**
2218 * hal_ioim_private
2219 */
2220
2221static void
2222__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
2223{
2224 struct bfa_ioim_s *ioim = cbarg;
2225
2226 if (!complete) {
2227 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2228 return;
2229 }
2230
2231 bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
2232}
2233
2234static void
2235__bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2236{
2237 struct bfa_ioim_s *ioim = cbarg;
2238 struct bfi_ioim_rsp_s *m;
2239 u8 *snsinfo = NULL;
2240 u8 sns_len = 0;
2241 s32 residue = 0;
2242
2243 if (!complete) {
2244 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2245 return;
2246 }
2247
2248 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2249 if (m->io_status == BFI_IOIM_STS_OK) {
2250 /**
2251 * setup sense information, if present
2252 */
2253 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
2254 m->sns_len) {
2255 sns_len = m->sns_len;
2256 snsinfo = ioim->iosp->snsinfo;
2257 }
2258
2259 /**
2260 * setup residue value correctly for normal completions
2261 */
2262 if (m->resid_flags == FCP_RESID_UNDER) {
2263 residue = bfa_os_ntohl(m->residue);
2264 bfa_stats(ioim->itnim, iocomp_underrun);
2265 }
2266 if (m->resid_flags == FCP_RESID_OVER) {
2267 residue = bfa_os_ntohl(m->residue);
2268 residue = -residue;
2269 bfa_stats(ioim->itnim, iocomp_overrun);
2270 }
2271 }
2272
2273 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
2274 m->scsi_status, sns_len, snsinfo, residue);
2275}
2276
2277static void
2278__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2279{
2280 struct bfa_ioim_s *ioim = cbarg;
2281
2282 if (!complete) {
2283 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2284 return;
2285 }
2286
2287 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2288 0, 0, NULL, 0);
2289}
2290
2291static void
2292__bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2293{
2294 struct bfa_ioim_s *ioim = cbarg;
2295
2296 bfa_stats(ioim->itnim, path_tov_expired);
2297 if (!complete) {
2298 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2299 return;
2300 }
2301
2302 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2303 0, 0, NULL, 0);
2304}
2305
2306static void
2307__bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2308{
2309 struct bfa_ioim_s *ioim = cbarg;
2310
2311 if (!complete) {
2312 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2313 return;
2314 }
2315
2316 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2317}
2318
2319static void
2320bfa_ioim_sgpg_alloced(void *cbarg)
2321{
2322 struct bfa_ioim_s *ioim = cbarg;
2323
2324 ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2325 list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
2326 bfa_ioim_sgpg_setup(ioim);
2327 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
2328}
2329
2330/**
2331 * Send I/O request to firmware.
2332 */
2333static bfa_boolean_t
2334bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2335{
2336 struct bfa_itnim_s *itnim = ioim->itnim;
2337 struct bfi_ioim_req_s *m;
2338 static struct fcp_cmnd_s cmnd_z0 = { 0 };
2339 struct bfi_sge_s *sge;
2340 u32 pgdlen = 0;
2341 u32 fcp_dl;
2342 u64 addr;
2343 struct scatterlist *sg;
2344 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2345
2346 /**
2347 * check for room in queue to send request now
2348 */
2349 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2350 if (!m) {
2351 bfa_stats(ioim->itnim, qwait);
2352 bfa_reqq_wait(ioim->bfa, ioim->reqq,
2353 &ioim->iosp->reqq_wait);
2354 return BFA_FALSE;
2355 }
2356
2357 /**
2358 * build i/o request message next
2359 */
2360 m->io_tag = bfa_os_htons(ioim->iotag);
2361 m->rport_hdl = ioim->itnim->rport->fw_handle;
2362 m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);
2363
2364 /**
2365 * build inline IO SG element here
2366 */
2367 sge = &m->sges[0];
2368 if (ioim->nsges) {
2369 sg = (struct scatterlist *)scsi_sglist(cmnd);
2370 addr = bfa_os_sgaddr(sg_dma_address(sg));
2371 sge->sga = *(union bfi_addr_u *) &addr;
2372 pgdlen = sg_dma_len(sg);
2373 sge->sg_len = pgdlen;
2374 sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
2375 BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
2376 bfa_sge_to_be(sge);
2377 sge++;
2378 }
2379
2380 if (ioim->nsges > BFI_SGE_INLINE) {
2381 sge->sga = ioim->sgpg->sgpg_pa;
2382 } else {
2383 sge->sga.a32.addr_lo = 0;
2384 sge->sga.a32.addr_hi = 0;
2385 }
2386 sge->sg_len = pgdlen;
2387 sge->flags = BFI_SGE_PGDLEN;
2388 bfa_sge_to_be(sge);
2389
2390 /**
2391 * set up I/O command parameters
2392 */
2393 bfa_os_assign(m->cmnd, cmnd_z0);
2394 m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio);
2395 m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio);
2396 bfa_os_assign(m->cmnd.cdb,
2397 *(scsi_cdb_t *)bfa_cb_ioim_get_cdb(ioim->dio));
2398 fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
2399 m->cmnd.fcp_dl = bfa_os_htonl(fcp_dl);
2400
2401 /**
2402 * set up I/O message header
2403 */
2404 switch (m->cmnd.iodir) {
2405 case FCP_IODIR_READ:
2406 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
2407 bfa_stats(itnim, input_reqs);
2408 ioim->itnim->stats.rd_throughput += fcp_dl;
2409 break;
2410 case FCP_IODIR_WRITE:
2411 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
2412 bfa_stats(itnim, output_reqs);
2413 ioim->itnim->stats.wr_throughput += fcp_dl;
2414 break;
2415 case FCP_IODIR_RW:
2416 bfa_stats(itnim, input_reqs);
2417 bfa_stats(itnim, output_reqs);
2418 default:
2419 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2420 }
2421 if (itnim->seq_rec ||
2422 (bfa_cb_ioim_get_size(ioim->dio) & (sizeof(u32) - 1)))
2423 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2424
2425#ifdef IOIM_ADVANCED
2426 m->cmnd.crn = bfa_cb_ioim_get_crn(ioim->dio);
2427 m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio);
2428 m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio);
2429
2430 /**
2431 * Handle large CDB (>16 bytes).
2432 */
2433 m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) -
2434 FCP_CMND_CDB_LEN) / sizeof(u32);
2435 if (m->cmnd.addl_cdb_len) {
2436 bfa_os_memcpy(&m->cmnd.cdb + 1, (scsi_cdb_t *)
2437 bfa_cb_ioim_get_cdb(ioim->dio) + 1,
2438 m->cmnd.addl_cdb_len * sizeof(u32));
2439 fcp_cmnd_fcpdl(&m->cmnd) =
2440 bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
2441 }
2442#endif
2443
2444 /**
2445 * queue I/O message to firmware
2446 */
2447 bfa_reqq_produce(ioim->bfa, ioim->reqq);
2448 return BFA_TRUE;
2449}
2450
2451/**
2452 * Setup any additional SG pages needed.Inline SG element is setup
2453 * at queuing time.
2454 */
2455static bfa_boolean_t
2456bfa_ioim_sge_setup(struct bfa_ioim_s *ioim)
2457{
2458 u16 nsgpgs;
2459
2460 bfa_assert(ioim->nsges > BFI_SGE_INLINE);
2461
2462 /**
2463 * allocate SG pages needed
2464 */
2465 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2466 if (!nsgpgs)
2467 return BFA_TRUE;
2468
2469 if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
2470 != BFA_STATUS_OK) {
2471 bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
2472 return BFA_FALSE;
2473 }
2474
2475 ioim->nsgpgs = nsgpgs;
2476 bfa_ioim_sgpg_setup(ioim);
2477
2478 return BFA_TRUE;
2479}
2480
2481static void
2482bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
2483{
2484 int sgeid, nsges, i;
2485 struct bfi_sge_s *sge;
2486 struct bfa_sgpg_s *sgpg;
2487 u32 pgcumsz;
2488 u64 addr;
2489 struct scatterlist *sg;
2490 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2491
2492 sgeid = BFI_SGE_INLINE;
2493 ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q);
2494
2495 sg = scsi_sglist(cmnd);
2496 sg = sg_next(sg);
2497
2498 do {
2499 sge = sgpg->sgpg->sges;
2500 nsges = ioim->nsges - sgeid;
2501 if (nsges > BFI_SGPG_DATA_SGES)
2502 nsges = BFI_SGPG_DATA_SGES;
2503
2504 pgcumsz = 0;
2505 for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) {
2506 addr = bfa_os_sgaddr(sg_dma_address(sg));
2507 sge->sga = *(union bfi_addr_u *) &addr;
2508 sge->sg_len = sg_dma_len(sg);
2509 pgcumsz += sge->sg_len;
2510
2511 /**
2512 * set flags
2513 */
2514 if (i < (nsges - 1))
2515 sge->flags = BFI_SGE_DATA;
2516 else if (sgeid < (ioim->nsges - 1))
2517 sge->flags = BFI_SGE_DATA_CPL;
2518 else
2519 sge->flags = BFI_SGE_DATA_LAST;
2520
2521 bfa_sge_to_le(sge);
2522 }
2523
2524 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2525
2526 /**
2527 * set the link element of each page
2528 */
2529 if (sgeid == ioim->nsges) {
2530 sge->flags = BFI_SGE_PGDLEN;
2531 sge->sga.a32.addr_lo = 0;
2532 sge->sga.a32.addr_hi = 0;
2533 } else {
2534 sge->flags = BFI_SGE_LINK;
2535 sge->sga = sgpg->sgpg_pa;
2536 }
2537 sge->sg_len = pgcumsz;
2538
2539 bfa_sge_to_le(sge);
2540 } while (sgeid < ioim->nsges);
2541}
2542
2543/**
2544 * Send I/O abort request to firmware.
2545 */
2546static bfa_boolean_t
2547bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2548{
2549 struct bfi_ioim_abort_req_s *m;
2550 enum bfi_ioim_h2i msgop;
2551
2552 /**
2553 * check for room in queue to send request now
2554 */
2555 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2556 if (!m)
2557 return BFA_FALSE;
2558
2559 /**
2560 * build i/o request message next
2561 */
2562 if (ioim->iosp->abort_explicit)
2563 msgop = BFI_IOIM_H2I_IOABORT_REQ;
2564 else
2565 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
2566
2567 bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
2568 m->io_tag = bfa_os_htons(ioim->iotag);
2569 m->abort_tag = ++ioim->abort_tag;
2570
2571 /**
2572 * queue I/O message to firmware
2573 */
2574 bfa_reqq_produce(ioim->bfa, ioim->reqq);
2575 return BFA_TRUE;
2576}
2577
2578/**
2579 * Call to resume any I/O requests waiting for room in request queue.
2580 */
2581static void
2582bfa_ioim_qresume(void *cbarg)
2583{
2584 struct bfa_ioim_s *ioim = cbarg;
2585
2586 bfa_stats(ioim->itnim, qresumes);
2587 bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
2588}
2589
2590
2591static void
2592bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
2593{
2594 /**
2595 * Move IO from itnim queue to fcpim global queue since itnim will be
2596 * freed.
2597 */
2598 list_del(&ioim->qe);
2599 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2600
2601 if (!ioim->iosp->tskim) {
2602 if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
2603 bfa_cb_dequeue(&ioim->hcb_qe);
2604 list_del(&ioim->qe);
2605 list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
2606 }
2607 bfa_itnim_iodone(ioim->itnim);
2608 } else
2609 bfa_tskim_iodone(ioim->iosp->tskim);
2610}
2611
2612static bfa_boolean_t
2613bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
2614{
2615 if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
2616 (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim))) ||
2617 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort)) ||
2618 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull)) ||
2619 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb)) ||
2620 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free)) ||
2621 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
2622 return BFA_FALSE;
2623
2624 return BFA_TRUE;
2625}
2626
2627/**
2628 * or after the link comes back.
2629 */
2630void
2631bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2632{
2633 /**
2634 * If path tov timer expired, failback with PATHTOV status - these
2635 * IO requests are not normally retried by IO stack.
2636 *
2637 * Otherwise device cameback online and fail it with normal failed
2638 * status so that IO stack retries these failed IO requests.
2639 */
2640 if (iotov)
2641 ioim->io_cbfn = __bfa_cb_ioim_pathtov;
2642 else {
2643 ioim->io_cbfn = __bfa_cb_ioim_failed;
2644 bfa_stats(ioim->itnim, iocom_nexus_abort);
2645 }
2646 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2647
2648 /**
2649 * Move IO to fcpim global queue since itnim will be
2650 * freed.
2651 */
2652 list_del(&ioim->qe);
2653 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2654}
2655
2656
2657
2658/**
2659 * hal_ioim_friend
2660 */
2661
2662/**
2663 * Memory allocation and initialization.
2664 */
2665void
2666bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
2667{
2668 struct bfa_ioim_s *ioim;
2669 struct bfa_ioim_sp_s *iosp;
2670 u16 i;
2671 u8 *snsinfo;
2672 u32 snsbufsz;
2673
2674 /**
2675 * claim memory first
2676 */
2677 ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
2678 fcpim->ioim_arr = ioim;
2679 bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs);
2680
2681 iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
2682 fcpim->ioim_sp_arr = iosp;
2683 bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
2684
2685 /**
2686 * Claim DMA memory for per IO sense data.
2687 */
2688 snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
2689 fcpim->snsbase.pa = bfa_meminfo_dma_phys(minfo);
2690 bfa_meminfo_dma_phys(minfo) += snsbufsz;
2691
2692 fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo);
2693 bfa_meminfo_dma_virt(minfo) += snsbufsz;
2694 snsinfo = fcpim->snsbase.kva;
2695 bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
2696
2697 /**
2698 * Initialize ioim free queues
2699 */
2700 INIT_LIST_HEAD(&fcpim->ioim_free_q);
2701 INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
2702 INIT_LIST_HEAD(&fcpim->ioim_comp_q);
2703
2704 for (i = 0; i < fcpim->num_ioim_reqs;
2705 i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) {
2706 /*
2707 * initialize IOIM
2708 */
2709 bfa_os_memset(ioim, 0, sizeof(struct bfa_ioim_s));
2710 ioim->iotag = i;
2711 ioim->bfa = fcpim->bfa;
2712 ioim->fcpim = fcpim;
2713 ioim->iosp = iosp;
2714 iosp->snsinfo = snsinfo;
2715 INIT_LIST_HEAD(&ioim->sgpg_q);
2716 bfa_reqq_winit(&ioim->iosp->reqq_wait,
2717 bfa_ioim_qresume, ioim);
2718 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
2719 bfa_ioim_sgpg_alloced, ioim);
2720 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2721
2722 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2723 }
2724}
2725
2726/**
2727 * Driver detach time call.
2728 */
2729void
2730bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim)
2731{
2732}
2733
2734void
2735bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2736{
2737 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2738 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2739 struct bfa_ioim_s *ioim;
2740 u16 iotag;
2741 enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
2742
2743 iotag = bfa_os_ntohs(rsp->io_tag);
2744
2745 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2746 bfa_assert(ioim->iotag == iotag);
2747
2748 bfa_trc(ioim->bfa, ioim->iotag);
2749 bfa_trc(ioim->bfa, rsp->io_status);
2750 bfa_trc(ioim->bfa, rsp->reuse_io_tag);
2751
2752 if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
2753 bfa_os_assign(ioim->iosp->comp_rspmsg, *m);
2754
2755 switch (rsp->io_status) {
2756 case BFI_IOIM_STS_OK:
2757 bfa_stats(ioim->itnim, iocomp_ok);
2758 if (rsp->reuse_io_tag == 0)
2759 evt = BFA_IOIM_SM_DONE;
2760 else
2761 evt = BFA_IOIM_SM_COMP;
2762 break;
2763
2764 case BFI_IOIM_STS_TIMEDOUT:
2765 bfa_stats(ioim->itnim, iocomp_timedout);
2766 case BFI_IOIM_STS_ABORTED:
2767 rsp->io_status = BFI_IOIM_STS_ABORTED;
2768 bfa_stats(ioim->itnim, iocomp_aborted);
2769 if (rsp->reuse_io_tag == 0)
2770 evt = BFA_IOIM_SM_DONE;
2771 else
2772 evt = BFA_IOIM_SM_COMP;
2773 break;
2774
2775 case BFI_IOIM_STS_PROTO_ERR:
2776 bfa_stats(ioim->itnim, iocom_proto_err);
2777 bfa_assert(rsp->reuse_io_tag);
2778 evt = BFA_IOIM_SM_COMP;
2779 break;
2780
2781 case BFI_IOIM_STS_SQER_NEEDED:
2782 bfa_stats(ioim->itnim, iocom_sqer_needed);
2783 bfa_assert(rsp->reuse_io_tag == 0);
2784 evt = BFA_IOIM_SM_SQRETRY;
2785 break;
2786
2787 case BFI_IOIM_STS_RES_FREE:
2788 bfa_stats(ioim->itnim, iocom_res_free);
2789 evt = BFA_IOIM_SM_FREE;
2790 break;
2791
2792 case BFI_IOIM_STS_HOST_ABORTED:
2793 bfa_stats(ioim->itnim, iocom_hostabrts);
2794 if (rsp->abort_tag != ioim->abort_tag) {
2795 bfa_trc(ioim->bfa, rsp->abort_tag);
2796 bfa_trc(ioim->bfa, ioim->abort_tag);
2797 return;
2798 }
2799
2800 if (rsp->reuse_io_tag)
2801 evt = BFA_IOIM_SM_ABORT_COMP;
2802 else
2803 evt = BFA_IOIM_SM_ABORT_DONE;
2804 break;
2805
2806 case BFI_IOIM_STS_UTAG:
2807 bfa_stats(ioim->itnim, iocom_utags);
2808 evt = BFA_IOIM_SM_COMP_UTAG;
2809 break;
2810
2811 default:
2812 bfa_assert(0);
2813 }
2814
2815 bfa_sm_send_event(ioim, evt);
2816}
2817
2818void
2819bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2820{
2821 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2822 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2823 struct bfa_ioim_s *ioim;
2824 u16 iotag;
2825
2826 iotag = bfa_os_ntohs(rsp->io_tag);
2827
2828 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2829 bfa_assert(ioim->iotag == iotag);
2830
2831 bfa_trc_fp(ioim->bfa, ioim->iotag);
2832 bfa_ioim_cb_profile_comp(fcpim, ioim);
2833
2834 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
2835}
2836
2837void
2838bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
2839{
2840 ioim->start_time = bfa_os_get_clock();
2841}
2842
2843void
2844bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
2845{
2846 u32 fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
2847 u32 index = bfa_ioim_get_index(fcp_dl);
2848 u64 end_time = bfa_os_get_clock();
2849 struct bfa_itnim_latency_s *io_lat =
2850 &(ioim->itnim->ioprofile.io_latency);
2851 u32 val = (u32)(end_time - ioim->start_time);
2852
2853 bfa_itnim_ioprofile_update(ioim->itnim, index);
2854
2855 io_lat->count[index]++;
2856 io_lat->min[index] = (io_lat->min[index] < val) ?
2857 io_lat->min[index] : val;
2858 io_lat->max[index] = (io_lat->max[index] > val) ?
2859 io_lat->max[index] : val;
2860 io_lat->avg[index] += val;
2861}
2862/**
2863 * Called by itnim to clean up IO while going offline.
2864 */
2865void
2866bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
2867{
2868 bfa_trc(ioim->bfa, ioim->iotag);
2869 bfa_stats(ioim->itnim, io_cleanups);
2870
2871 ioim->iosp->tskim = NULL;
2872 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2873}
2874
2875void
2876bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
2877{
2878 bfa_trc(ioim->bfa, ioim->iotag);
2879 bfa_stats(ioim->itnim, io_tmaborts);
2880
2881 ioim->iosp->tskim = tskim;
2882 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2883}
2884
2885/**
2886 * IOC failure handling.
2887 */
2888void
2889bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
2890{
2891 bfa_trc(ioim->bfa, ioim->iotag);
2892 bfa_stats(ioim->itnim, io_iocdowns);
2893 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
2894}
2895
2896/**
2897 * IO offline TOV popped. Fail the pending IO.
2898 */
2899void
2900bfa_ioim_tov(struct bfa_ioim_s *ioim)
2901{
2902 bfa_trc(ioim->bfa, ioim->iotag);
2903 bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
2904}
2905
2906
2907
2908/**
2909 * hal_ioim_api
2910 */
2911
2912/**
2913 * Allocate IOIM resource for initiator mode I/O request.
2914 */
2915struct bfa_ioim_s *
2916bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2917 struct bfa_itnim_s *itnim, u16 nsges)
2918{
2919 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2920 struct bfa_ioim_s *ioim;
2921
2922 /**
2923 * alocate IOIM resource
2924 */
2925 bfa_q_deq(&fcpim->ioim_free_q, &ioim);
2926 if (!ioim) {
2927 bfa_stats(itnim, no_iotags);
2928 return NULL;
2929 }
2930
2931 ioim->dio = dio;
2932 ioim->itnim = itnim;
2933 ioim->nsges = nsges;
2934 ioim->nsgpgs = 0;
2935
2936 bfa_stats(itnim, total_ios);
2937 fcpim->ios_active++;
2938
2939 list_add_tail(&ioim->qe, &itnim->io_q);
2940 bfa_trc_fp(ioim->bfa, ioim->iotag);
2941
2942 return ioim;
2943}
2944
2945void
2946bfa_ioim_free(struct bfa_ioim_s *ioim)
2947{
2948 struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
2949
2950 bfa_trc_fp(ioim->bfa, ioim->iotag);
2951 bfa_assert_fp(bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit));
2952
2953 bfa_assert_fp(list_empty(&ioim->sgpg_q) ||
2954 (ioim->nsges > BFI_SGE_INLINE));
2955
2956 if (ioim->nsgpgs > 0)
2957 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
2958
2959 bfa_stats(ioim->itnim, io_comps);
2960 fcpim->ios_active--;
2961
2962 list_del(&ioim->qe);
2963 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2964}
2965
2966void
2967bfa_ioim_start(struct bfa_ioim_s *ioim)
2968{
2969 bfa_trc_fp(ioim->bfa, ioim->iotag);
2970
2971 bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
2972
2973 /**
2974 * Obtain the queue over which this request has to be issued
2975 */
2976 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
2977 bfa_cb_ioim_get_reqq(ioim->dio) :
2978 bfa_itnim_get_reqq(ioim);
2979
2980 bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
2981}
2982
2983/**
2984 * Driver I/O abort request.
2985 */
2986bfa_status_t
2987bfa_ioim_abort(struct bfa_ioim_s *ioim)
2988{
2989
2990 bfa_trc(ioim->bfa, ioim->iotag);
2991
2992 if (!bfa_ioim_is_abortable(ioim))
2993 return BFA_STATUS_FAILED;
2994
2995 bfa_stats(ioim->itnim, io_aborts);
2996 bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
2997
2998 return BFA_STATUS_OK;
2999}
3000
3001
3002/**
3003 * BFA TSKIM state machine functions
3004 */
3005
3006/**
3007 * Task management command beginning state.
3008 */
3009static void
3010bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3011{
3012 bfa_trc(tskim->bfa, event);
3013
3014 switch (event) {
3015 case BFA_TSKIM_SM_START:
3016 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3017 bfa_tskim_gather_ios(tskim);
3018
3019 /**
3020 * If device is offline, do not send TM on wire. Just cleanup
3021 * any pending IO requests and complete TM request.
3022 */
3023 if (!bfa_itnim_is_online(tskim->itnim)) {
3024 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3025 tskim->tsk_status = BFI_TSKIM_STS_OK;
3026 bfa_tskim_cleanup_ios(tskim);
3027 return;
3028 }
3029
3030 if (!bfa_tskim_send(tskim)) {
3031 bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
3032 bfa_stats(tskim->itnim, tm_qwait);
3033 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
3034 &tskim->reqq_wait);
3035 }
3036 break;
3037
3038 default:
3039 bfa_sm_fault(tskim->bfa, event);
3040 }
3041}
3042
3043/**
3044 * brief
3045 * TM command is active, awaiting completion from firmware to
3046 * cleanup IO requests in TM scope.
3047 */
3048static void
3049bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3050{
3051 bfa_trc(tskim->bfa, event);
3052
3053 switch (event) {
3054 case BFA_TSKIM_SM_DONE:
3055 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3056 bfa_tskim_cleanup_ios(tskim);
3057 break;
3058
3059 case BFA_TSKIM_SM_CLEANUP:
3060 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3061 if (!bfa_tskim_send_abort(tskim)) {
3062 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
3063 bfa_stats(tskim->itnim, tm_qwait);
3064 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
3065 &tskim->reqq_wait);
3066 }
3067 break;
3068
3069 case BFA_TSKIM_SM_HWFAIL:
3070 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3071 bfa_tskim_iocdisable_ios(tskim);
3072 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3073 break;
3074
3075 default:
3076 bfa_sm_fault(tskim->bfa, event);
3077 }
3078}
3079
3080/**
3081 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
3082 * completion event from firmware.
3083 */
3084static void
3085bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3086{
3087 bfa_trc(tskim->bfa, event);
3088
3089 switch (event) {
3090 case BFA_TSKIM_SM_DONE:
3091 /**
3092 * Ignore and wait for ABORT completion from firmware.
3093 */
3094 break;
3095
3096 case BFA_TSKIM_SM_CLEANUP_DONE:
3097 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3098 bfa_tskim_cleanup_ios(tskim);
3099 break;
3100
3101 case BFA_TSKIM_SM_HWFAIL:
3102 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3103 bfa_tskim_iocdisable_ios(tskim);
3104 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3105 break;
3106
3107 default:
3108 bfa_sm_fault(tskim->bfa, event);
3109 }
3110}
3111
3112static void
3113bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3114{
3115 bfa_trc(tskim->bfa, event);
3116
3117 switch (event) {
3118 case BFA_TSKIM_SM_IOS_DONE:
3119 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3120 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
3121 break;
3122
3123 case BFA_TSKIM_SM_CLEANUP:
3124 /**
3125 * Ignore, TM command completed on wire.
3126 * Notify TM conmpletion on IO cleanup completion.
3127 */
3128 break;
3129
3130 case BFA_TSKIM_SM_HWFAIL:
3131 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3132 bfa_tskim_iocdisable_ios(tskim);
3133 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3134 break;
3135
3136 default:
3137 bfa_sm_fault(tskim->bfa, event);
3138 }
3139}
3140
3141/**
3142 * Task management command is waiting for room in request CQ
3143 */
3144static void
3145bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3146{
3147 bfa_trc(tskim->bfa, event);
3148
3149 switch (event) {
3150 case BFA_TSKIM_SM_QRESUME:
3151 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3152 bfa_tskim_send(tskim);
3153 break;
3154
3155 case BFA_TSKIM_SM_CLEANUP:
3156 /**
3157 * No need to send TM on wire since ITN is offline.
3158 */
3159 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3160 bfa_reqq_wcancel(&tskim->reqq_wait);
3161 bfa_tskim_cleanup_ios(tskim);
3162 break;
3163
3164 case BFA_TSKIM_SM_HWFAIL:
3165 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3166 bfa_reqq_wcancel(&tskim->reqq_wait);
3167 bfa_tskim_iocdisable_ios(tskim);
3168 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3169 break;
3170
3171 default:
3172 bfa_sm_fault(tskim->bfa, event);
3173 }
3174}
3175
3176/**
3177 * Task management command is active, awaiting for room in request CQ
3178 * to send clean up request.
3179 */
3180static void
3181bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
3182 enum bfa_tskim_event event)
3183{
3184 bfa_trc(tskim->bfa, event);
3185
3186 switch (event) {
3187 case BFA_TSKIM_SM_DONE:
3188 bfa_reqq_wcancel(&tskim->reqq_wait);
3189 /**
3190 *
3191 * Fall through !!!
3192 */
3193
3194 case BFA_TSKIM_SM_QRESUME:
3195 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3196 bfa_tskim_send_abort(tskim);
3197 break;
3198
3199 case BFA_TSKIM_SM_HWFAIL:
3200 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3201 bfa_reqq_wcancel(&tskim->reqq_wait);
3202 bfa_tskim_iocdisable_ios(tskim);
3203 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3204 break;
3205
3206 default:
3207 bfa_sm_fault(tskim->bfa, event);
3208 }
3209}
3210
3211/**
3212 * BFA callback is pending
3213 */
3214static void
3215bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3216{
3217 bfa_trc(tskim->bfa, event);
3218
3219 switch (event) {
3220 case BFA_TSKIM_SM_HCB:
3221 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3222 bfa_tskim_free(tskim);
3223 break;
3224
3225 case BFA_TSKIM_SM_CLEANUP:
3226 bfa_tskim_notify_comp(tskim);
3227 break;
3228
3229 case BFA_TSKIM_SM_HWFAIL:
3230 break;
3231
3232 default:
3233 bfa_sm_fault(tskim->bfa, event);
3234 }
3235}
3236
3237
3238
3239/**
3240 * hal_tskim_private
3241 */
3242
3243static void
3244__bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
3245{
3246 struct bfa_tskim_s *tskim = cbarg;
3247
3248 if (!complete) {
3249 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3250 return;
3251 }
3252
3253 bfa_stats(tskim->itnim, tm_success);
3254 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
3255}
3256
3257static void
3258__bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
3259{
3260 struct bfa_tskim_s *tskim = cbarg;
3261
3262 if (!complete) {
3263 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3264 return;
3265 }
3266
3267 bfa_stats(tskim->itnim, tm_failures);
3268 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
3269 BFI_TSKIM_STS_FAILED);
3270}
3271
3272static bfa_boolean_t
3273bfa_tskim_match_scope(struct bfa_tskim_s *tskim, lun_t lun)
3274{
3275 switch (tskim->tm_cmnd) {
3276 case FCP_TM_TARGET_RESET:
3277 return BFA_TRUE;
3278
3279 case FCP_TM_ABORT_TASK_SET:
3280 case FCP_TM_CLEAR_TASK_SET:
3281 case FCP_TM_LUN_RESET:
3282 case FCP_TM_CLEAR_ACA:
3283 return (tskim->lun == lun);
3284
3285 default:
3286 bfa_assert(0);
3287 }
3288
3289 return BFA_FALSE;
3290}
3291
3292/**
3293 * Gather affected IO requests and task management commands.
3294 */
3295static void
3296bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
3297{
3298 struct bfa_itnim_s *itnim = tskim->itnim;
3299 struct bfa_ioim_s *ioim;
3300 struct list_head *qe, *qen;
3301
3302 INIT_LIST_HEAD(&tskim->io_q);
3303
3304 /**
3305 * Gather any active IO requests first.
3306 */
3307 list_for_each_safe(qe, qen, &itnim->io_q) {
3308 ioim = (struct bfa_ioim_s *) qe;
3309 if (bfa_tskim_match_scope
3310 (tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
3311 list_del(&ioim->qe);
3312 list_add_tail(&ioim->qe, &tskim->io_q);
3313 }
3314 }
3315
3316 /**
3317 * Failback any pending IO requests immediately.
3318 */
3319 list_for_each_safe(qe, qen, &itnim->pending_q) {
3320 ioim = (struct bfa_ioim_s *) qe;
3321 if (bfa_tskim_match_scope
3322 (tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
3323 list_del(&ioim->qe);
3324 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
3325 bfa_ioim_tov(ioim);
3326 }
3327 }
3328}
3329
3330/**
3331 * IO cleanup completion
3332 */
3333static void
3334bfa_tskim_cleanp_comp(void *tskim_cbarg)
3335{
3336 struct bfa_tskim_s *tskim = tskim_cbarg;
3337
3338 bfa_stats(tskim->itnim, tm_io_comps);
3339 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
3340}
3341
3342/**
3343 * Gather affected IO requests and task management commands.
3344 */
3345static void
3346bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
3347{
3348 struct bfa_ioim_s *ioim;
3349 struct list_head *qe, *qen;
3350
3351 bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
3352
3353 list_for_each_safe(qe, qen, &tskim->io_q) {
3354 ioim = (struct bfa_ioim_s *) qe;
3355 bfa_wc_up(&tskim->wc);
3356 bfa_ioim_cleanup_tm(ioim, tskim);
3357 }
3358
3359 bfa_wc_wait(&tskim->wc);
3360}
3361
3362/**
3363 * Send task management request to firmware.
3364 */
3365static bfa_boolean_t
3366bfa_tskim_send(struct bfa_tskim_s *tskim)
3367{
3368 struct bfa_itnim_s *itnim = tskim->itnim;
3369 struct bfi_tskim_req_s *m;
3370
3371 /**
3372 * check for room in queue to send request now
3373 */
3374 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3375 if (!m)
3376 return BFA_FALSE;
3377
3378 /**
3379 * build i/o request message next
3380 */
3381 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
3382 bfa_lpuid(tskim->bfa));
3383
3384 m->tsk_tag = bfa_os_htons(tskim->tsk_tag);
3385 m->itn_fhdl = tskim->itnim->rport->fw_handle;
3386 m->t_secs = tskim->tsecs;
3387 m->lun = tskim->lun;
3388 m->tm_flags = tskim->tm_cmnd;
3389
3390 /**
3391 * queue I/O message to firmware
3392 */
3393 bfa_reqq_produce(tskim->bfa, itnim->reqq);
3394 return BFA_TRUE;
3395}
3396
3397/**
3398 * Send abort request to cleanup an active TM to firmware.
3399 */
3400static bfa_boolean_t
3401bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3402{
3403 struct bfa_itnim_s *itnim = tskim->itnim;
3404 struct bfi_tskim_abortreq_s *m;
3405
3406 /**
3407 * check for room in queue to send request now
3408 */
3409 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3410 if (!m)
3411 return BFA_FALSE;
3412
3413 /**
3414 * build i/o request message next
3415 */
3416 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
3417 bfa_lpuid(tskim->bfa));
3418
3419 m->tsk_tag = bfa_os_htons(tskim->tsk_tag);
3420
3421 /**
3422 * queue I/O message to firmware
3423 */
3424 bfa_reqq_produce(tskim->bfa, itnim->reqq);
3425 return BFA_TRUE;
3426}
3427
3428/**
3429 * Call to resume task management cmnd waiting for room in request queue.
3430 */
3431static void
3432bfa_tskim_qresume(void *cbarg)
3433{
3434 struct bfa_tskim_s *tskim = cbarg;
3435
3436 bfa_stats(tskim->itnim, tm_qresumes);
3437 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
3438}
3439
3440/**
3441 * Cleanup IOs associated with a task mangement command on IOC failures.
3442 */
3443static void
3444bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3445{
3446 struct bfa_ioim_s *ioim;
3447 struct list_head *qe, *qen;
3448
3449 list_for_each_safe(qe, qen, &tskim->io_q) {
3450 ioim = (struct bfa_ioim_s *) qe;
3451 bfa_ioim_iocdisable(ioim);
3452 }
3453}
3454
3455
3456
3457/**
3458 * hal_tskim_friend
3459 */
3460
3461/**
3462 * Notification on completions from related ioim.
3463 */
3464void
3465bfa_tskim_iodone(struct bfa_tskim_s *tskim)
3466{
3467 bfa_wc_down(&tskim->wc);
3468}
3469
3470/**
3471 * Handle IOC h/w failure notification from itnim.
3472 */
3473void
3474bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
3475{
3476 tskim->notify = BFA_FALSE;
3477 bfa_stats(tskim->itnim, tm_iocdowns);
3478 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
3479}
3480
3481/**
3482 * Cleanup TM command and associated IOs as part of ITNIM offline.
3483 */
3484void
3485bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3486{
3487 tskim->notify = BFA_TRUE;
3488 bfa_stats(tskim->itnim, tm_cleanups);
3489 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
3490}
3491
3492/**
3493 * Memory allocation and initialization.
3494 */
3495void
3496bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
3497{
3498 struct bfa_tskim_s *tskim;
3499 u16 i;
3500
3501 INIT_LIST_HEAD(&fcpim->tskim_free_q);
3502
3503 tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo);
3504 fcpim->tskim_arr = tskim;
3505
3506 for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
3507 /*
3508 * initialize TSKIM
3509 */
3510 bfa_os_memset(tskim, 0, sizeof(struct bfa_tskim_s));
3511 tskim->tsk_tag = i;
3512 tskim->bfa = fcpim->bfa;
3513 tskim->fcpim = fcpim;
3514 tskim->notify = BFA_FALSE;
3515 bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
3516 tskim);
3517 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3518
3519 list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
3520 }
3521
3522 bfa_meminfo_kva(minfo) = (u8 *) tskim;
3523}
3524
3525void
3526bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim)
3527{
3528 /**
3529 * @todo
3530 */
3531}
3532
3533void
3534bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3535{
3536 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
3537 struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
3538 struct bfa_tskim_s *tskim;
3539 u16 tsk_tag = bfa_os_ntohs(rsp->tsk_tag);
3540
3541 tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
3542 bfa_assert(tskim->tsk_tag == tsk_tag);
3543
3544 tskim->tsk_status = rsp->tsk_status;
3545
3546 /**
3547 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3548 * requests. All other statuses are for normal completions.
3549 */
3550 if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
3551 bfa_stats(tskim->itnim, tm_cleanup_comps);
3552 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
3553 } else {
3554 bfa_stats(tskim->itnim, tm_fw_rsps);
3555 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
3556 }
3557}
3558
3559
3560
3561/**
3562 * hal_tskim_api
3563 */
3564
3565
3566struct bfa_tskim_s *
3567bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3568{
3569 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
3570 struct bfa_tskim_s *tskim;
3571
3572 bfa_q_deq(&fcpim->tskim_free_q, &tskim);
3573
3574 if (tskim)
3575 tskim->dtsk = dtsk;
3576
3577 return tskim;
3578}
3579
3580void
3581bfa_tskim_free(struct bfa_tskim_s *tskim)
3582{
3583 bfa_assert(bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
3584 list_del(&tskim->qe);
3585 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3586}
3587
3588/**
3589 * Start a task management command.
3590 *
3591 * @param[in] tskim BFA task management command instance
3592 * @param[in] itnim i-t nexus for the task management command
3593 * @param[in] lun lun, if applicable
3594 * @param[in] tm_cmnd Task management command code.
3595 * @param[in] t_secs Timeout in seconds
3596 *
3597 * @return None.
3598 */
3599void
3600bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim, lun_t lun,
3601 enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
3602{
3603 tskim->itnim = itnim;
3604 tskim->lun = lun;
3605 tskim->tm_cmnd = tm_cmnd;
3606 tskim->tsecs = tsecs;
3607 tskim->notify = BFA_FALSE;
3608 bfa_stats(itnim, tm_cmnds);
3609
3610 list_add_tail(&tskim->qe, &itnim->tsk_q);
3611 bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
3612}
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
new file mode 100644
index 000000000000..3bf343160aac
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -0,0 +1,401 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCPIM_H__
19#define __BFA_FCPIM_H__
20
21#include "bfa.h"
22#include "bfa_svc.h"
23#include "bfi_ms.h"
24#include "bfa_defs_svc.h"
25#include "bfa_cs.h"
26
27
28#define BFA_ITNIM_MIN 32
29#define BFA_ITNIM_MAX 1024
30
31#define BFA_IOIM_MIN 8
32#define BFA_IOIM_MAX 2000
33
34#define BFA_TSKIM_MIN 4
35#define BFA_TSKIM_MAX 512
36#define BFA_FCPIM_PATHTOV_DEF (30 * 1000) /* in millisecs */
37#define BFA_FCPIM_PATHTOV_MAX (90 * 1000) /* in millisecs */
38
39
40#define bfa_itnim_ioprofile_update(__itnim, __index) \
41 (__itnim->ioprofile.iocomps[__index]++)
42
43#define BFA_IOIM_RETRY_TAG_OFFSET 11
44#define BFA_IOIM_RETRY_TAG_MASK 0x07ff /* 2K IOs */
45#define BFA_IOIM_RETRY_MAX 7
46
47/* Buckets are are 512 bytes to 2MB */
48static inline u32
49bfa_ioim_get_index(u32 n) {
50 int pos = 0;
51 if (n >= (1UL)<<22)
52 return BFA_IOBUCKET_MAX - 1;
53 n >>= 8;
54 if (n >= (1UL)<<16)
55 n >>= 16; pos += 16;
56 if (n >= 1 << 8)
57 n >>= 8; pos += 8;
58 if (n >= 1 << 4)
59 n >>= 4; pos += 4;
60 if (n >= 1 << 2)
61 n >>= 2; pos += 2;
62 if (n >= 1 << 1)
63 pos += 1;
64
65 return (n == 0) ? (0) : pos;
66}
67
68/*
69 * forward declarations
70 */
71struct bfa_ioim_s;
72struct bfa_tskim_s;
73struct bfad_ioim_s;
74struct bfad_tskim_s;
75
76typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim);
77
78struct bfa_fcpim_mod_s {
79 struct bfa_s *bfa;
80 struct bfa_itnim_s *itnim_arr;
81 struct bfa_ioim_s *ioim_arr;
82 struct bfa_ioim_sp_s *ioim_sp_arr;
83 struct bfa_tskim_s *tskim_arr;
84 struct bfa_dma_s snsbase;
85 int num_itnims;
86 int num_ioim_reqs;
87 int num_tskim_reqs;
88 u32 path_tov;
89 u16 q_depth;
90 u8 reqq; /* Request queue to be used */
91 u8 rsvd;
92 struct list_head itnim_q; /* queue of active itnim */
93 struct list_head ioim_free_q; /* free IO resources */
94 struct list_head ioim_resfree_q; /* IOs waiting for f/w */
95 struct list_head ioim_comp_q; /* IO global comp Q */
96 struct list_head tskim_free_q;
97 u32 ios_active; /* current active IOs */
98 u32 delay_comp;
99 struct bfa_fcpim_del_itn_stats_s del_itn_stats;
100 bfa_boolean_t ioredirect;
101 bfa_boolean_t io_profile;
102 u32 io_profile_start_time;
103 bfa_fcpim_profile_t profile_comp;
104 bfa_fcpim_profile_t profile_start;
105};
106
107/**
108 * BFA IO (initiator mode)
109 */
110struct bfa_ioim_s {
111 struct list_head qe; /* queue elememt */
112 bfa_sm_t sm; /* BFA ioim state machine */
113 struct bfa_s *bfa; /* BFA module */
114 struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */
115 struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
116 struct bfad_ioim_s *dio; /* driver IO handle */
117 u16 iotag; /* FWI IO tag */
118 u16 abort_tag; /* unqiue abort request tag */
119 u16 nsges; /* number of SG elements */
120 u16 nsgpgs; /* number of SG pages */
121 struct bfa_sgpg_s *sgpg; /* first SG page */
122 struct list_head sgpg_q; /* allocated SG pages */
123 struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
124 bfa_cb_cbfn_t io_cbfn; /* IO completion handler */
125 struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */
126 u8 reqq; /* Request queue for I/O */
127 u64 start_time; /* IO's Profile start val */
128};
129
130
131struct bfa_ioim_sp_s {
132 struct bfi_msg_s comp_rspmsg; /* IO comp f/w response */
133 u8 *snsinfo; /* sense info for this IO */
134 struct bfa_sgpg_wqe_s sgpg_wqe; /* waitq elem for sgpg */
135 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
136 bfa_boolean_t abort_explicit; /* aborted by OS */
137 struct bfa_tskim_s *tskim; /* Relevant TM cmd */
138};
139
140/**
141 * BFA Task management command (initiator mode)
142 */
143struct bfa_tskim_s {
144 struct list_head qe;
145 bfa_sm_t sm;
146 struct bfa_s *bfa; /* BFA module */
147 struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */
148 struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
149 struct bfad_tskim_s *dtsk; /* driver task mgmt cmnd */
150 bfa_boolean_t notify; /* notify itnim on TM comp */
151 lun_t lun; /* lun if applicable */
152 enum fcp_tm_cmnd tm_cmnd; /* task management command */
153 u16 tsk_tag; /* FWI IO tag */
154 u8 tsecs; /* timeout in seconds */
155 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
156 struct list_head io_q; /* queue of affected IOs */
157 struct bfa_wc_s wc; /* waiting counter */
158 struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
159 enum bfi_tskim_status tsk_status; /* TM status */
160};
161
162
163/**
164 * BFA i-t-n (initiator mode)
165 */
166struct bfa_itnim_s {
167 struct list_head qe; /* queue element */
168 bfa_sm_t sm; /* i-t-n im BFA state machine */
169 struct bfa_s *bfa; /* bfa instance */
170 struct bfa_rport_s *rport; /* bfa rport */
171 void *ditn; /* driver i-t-n structure */
172 struct bfi_mhdr_s mhdr; /* pre-built mhdr */
173 u8 msg_no; /* itnim/rport firmware handle */
174 u8 reqq; /* CQ for requests */
175 struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
176 struct list_head pending_q; /* queue of pending IO requests */
177 struct list_head io_q; /* queue of active IO requests */
178 struct list_head io_cleanup_q; /* IO being cleaned up */
179 struct list_head tsk_q; /* queue of active TM commands */
180 struct list_head delay_comp_q; /* queue of failed inflight cmds */
181 bfa_boolean_t seq_rec; /* SQER supported */
182 bfa_boolean_t is_online; /* itnim is ONLINE for IO */
183 bfa_boolean_t iotov_active; /* IO TOV timer is active */
184 struct bfa_wc_s wc; /* waiting counter */
185 struct bfa_timer_s timer; /* pending IO TOV */
186 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
187 struct bfa_fcpim_mod_s *fcpim; /* fcpim module */
188 struct bfa_itnim_iostats_s stats;
189 struct bfa_itnim_ioprofile_s ioprofile;
190};
191
192
193#define bfa_itnim_is_online(_itnim) ((_itnim)->is_online)
194#define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod)
195#define BFA_IOIM_FROM_TAG(_fcpim, _iotag) \
196 (&fcpim->ioim_arr[(_iotag & BFA_IOIM_RETRY_TAG_MASK)])
197#define BFA_TSKIM_FROM_TAG(_fcpim, _tmtag) \
198 (&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)])
199
200#define bfa_io_profile_start_time(_bfa) \
201 (_bfa->modules.fcpim_mod.io_profile_start_time)
202#define bfa_fcpim_get_io_profile(_bfa) \
203 (_bfa->modules.fcpim_mod.io_profile)
204
205static inline bfa_boolean_t
206bfa_ioim_get_iotag(struct bfa_ioim_s *ioim)
207{
208 u16 k = ioim->iotag;
209
210 k >>= BFA_IOIM_RETRY_TAG_OFFSET; k++;
211
212 if (k > BFA_IOIM_RETRY_MAX)
213 return BFA_FALSE;
214 ioim->iotag &= BFA_IOIM_RETRY_TAG_MASK;
215 ioim->iotag |= k<<BFA_IOIM_RETRY_TAG_OFFSET;
216 return BFA_TRUE;
217}
218/*
219 * function prototypes
220 */
221void bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim,
222 struct bfa_meminfo_s *minfo);
223void bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim);
224void bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
225void bfa_ioim_good_comp_isr(struct bfa_s *bfa,
226 struct bfi_msg_s *msg);
227void bfa_ioim_cleanup(struct bfa_ioim_s *ioim);
228void bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim,
229 struct bfa_tskim_s *tskim);
230void bfa_ioim_iocdisable(struct bfa_ioim_s *ioim);
231void bfa_ioim_tov(struct bfa_ioim_s *ioim);
232
233void bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim,
234 struct bfa_meminfo_s *minfo);
235void bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim);
236void bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
237void bfa_tskim_iodone(struct bfa_tskim_s *tskim);
238void bfa_tskim_iocdisable(struct bfa_tskim_s *tskim);
239void bfa_tskim_cleanup(struct bfa_tskim_s *tskim);
240
241void bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
242 u32 *dm_len);
243void bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim,
244 struct bfa_meminfo_s *minfo);
245void bfa_itnim_detach(struct bfa_fcpim_mod_s *fcpim);
246void bfa_itnim_iocdisable(struct bfa_itnim_s *itnim);
247void bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
248void bfa_itnim_iodone(struct bfa_itnim_s *itnim);
249void bfa_itnim_tskdone(struct bfa_itnim_s *itnim);
250bfa_boolean_t bfa_itnim_hold_io(struct bfa_itnim_s *itnim);
251void bfa_ioim_profile_comp(struct bfa_ioim_s *ioim);
252void bfa_ioim_profile_start(struct bfa_ioim_s *ioim);
253
254
255/*
256 * bfa fcpim module API functions
257 */
258void bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov);
259u16 bfa_fcpim_path_tov_get(struct bfa_s *bfa);
260void bfa_fcpim_qdepth_set(struct bfa_s *bfa, u16 q_depth);
261u16 bfa_fcpim_qdepth_get(struct bfa_s *bfa);
262bfa_status_t bfa_fcpim_get_modstats(struct bfa_s *bfa,
263 struct bfa_itnim_iostats_s *modstats);
264bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa,
265 struct bfa_itnim_iostats_s *stats, u8 lp_tag);
266bfa_status_t bfa_fcpim_get_del_itn_stats(struct bfa_s *bfa,
267 struct bfa_fcpim_del_itn_stats_s *modstats);
268bfa_status_t bfa_fcpim_port_clear_iostats(struct bfa_s *bfa, u8 lp_tag);
269void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats,
270 struct bfa_itnim_iostats_s *itnim_stats);
271bfa_status_t bfa_fcpim_clr_modstats(struct bfa_s *bfa);
272void bfa_fcpim_set_ioredirect(struct bfa_s *bfa,
273 bfa_boolean_t state);
274void bfa_fcpim_update_ioredirect(struct bfa_s *bfa);
275bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time);
276bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
277#define bfa_fcpim_ioredirect_enabled(__bfa) \
278 (((struct bfa_fcpim_mod_s *)(BFA_FCPIM_MOD(__bfa)))->ioredirect)
279
280#define bfa_fcpim_get_next_reqq(__bfa, __qid) \
281{ \
282 struct bfa_fcpim_mod_s *__fcpim = BFA_FCPIM_MOD(__bfa); \
283 __fcpim->reqq++; \
284 __fcpim->reqq &= (BFI_IOC_MAX_CQS - 1); \
285 *(__qid) = __fcpim->reqq; \
286}
287
288#define bfa_iocfc_map_msg_to_qid(__msg, __qid) \
289 *(__qid) = (u8)((__msg) & (BFI_IOC_MAX_CQS - 1));
290/*
291 * bfa itnim API functions
292 */
293struct bfa_itnim_s *bfa_itnim_create(struct bfa_s *bfa,
294 struct bfa_rport_s *rport, void *itnim);
295void bfa_itnim_delete(struct bfa_itnim_s *itnim);
296void bfa_itnim_online(struct bfa_itnim_s *itnim,
297 bfa_boolean_t seq_rec);
298void bfa_itnim_offline(struct bfa_itnim_s *itnim);
299void bfa_itnim_get_stats(struct bfa_itnim_s *itnim,
300 struct bfa_itnim_iostats_s *stats);
301void bfa_itnim_clear_stats(struct bfa_itnim_s *itnim);
302bfa_status_t bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
303 struct bfa_itnim_ioprofile_s *ioprofile);
304#define bfa_itnim_get_reqq(__ioim) (((struct bfa_ioim_s *)__ioim)->itnim->reqq)
305
306/**
307 * BFA completion callback for bfa_itnim_online().
308 *
309 * @param[in] itnim FCS or driver itnim instance
310 *
311 * return None
312 */
313void bfa_cb_itnim_online(void *itnim);
314
315/**
316 * BFA completion callback for bfa_itnim_offline().
317 *
318 * @param[in] itnim FCS or driver itnim instance
319 *
320 * return None
321 */
322void bfa_cb_itnim_offline(void *itnim);
323void bfa_cb_itnim_tov_begin(void *itnim);
324void bfa_cb_itnim_tov(void *itnim);
325
326/**
327 * BFA notification to FCS/driver for second level error recovery.
328 *
329 * Atleast one I/O request has timedout and target is unresponsive to
330 * repeated abort requests. Second level error recovery should be initiated
331 * by starting implicit logout and recovery procedures.
332 *
333 * @param[in] itnim FCS or driver itnim instance
334 *
335 * return None
336 */
337void bfa_cb_itnim_sler(void *itnim);
338
339/*
340 * bfa ioim API functions
341 */
342struct bfa_ioim_s *bfa_ioim_alloc(struct bfa_s *bfa,
343 struct bfad_ioim_s *dio,
344 struct bfa_itnim_s *itnim,
345 u16 nsgles);
346
347void bfa_ioim_free(struct bfa_ioim_s *ioim);
348void bfa_ioim_start(struct bfa_ioim_s *ioim);
349bfa_status_t bfa_ioim_abort(struct bfa_ioim_s *ioim);
350void bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim,
351 bfa_boolean_t iotov);
352
353
354/**
355 * I/O completion notification.
356 *
357 * @param[in] dio driver IO structure
358 * @param[in] io_status IO completion status
359 * @param[in] scsi_status SCSI status returned by target
360 * @param[in] sns_len SCSI sense length, 0 if none
361 * @param[in] sns_info SCSI sense data, if any
362 * @param[in] residue Residual length
363 *
364 * @return None
365 */
366void bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio,
367 enum bfi_ioim_status io_status,
368 u8 scsi_status, int sns_len,
369 u8 *sns_info, s32 residue);
370
371/**
372 * I/O good completion notification.
373 *
374 * @param[in] dio driver IO structure
375 *
376 * @return None
377 */
378void bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio);
379
380/**
381 * I/O abort completion notification
382 *
383 * @param[in] dio driver IO that was aborted
384 *
385 * @return None
386 */
387void bfa_cb_ioim_abort(void *bfad, struct bfad_ioim_s *dio);
388
389/*
390 * bfa tskim API functions
391 */
392struct bfa_tskim_s *bfa_tskim_alloc(struct bfa_s *bfa,
393 struct bfad_tskim_s *dtsk);
394void bfa_tskim_free(struct bfa_tskim_s *tskim);
395void bfa_tskim_start(struct bfa_tskim_s *tskim,
396 struct bfa_itnim_s *itnim, lun_t lun,
397 enum fcp_tm_cmnd tm, u8 t_secs);
398void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
399 enum bfi_tskim_status tsk_status);
400
401#endif /* __BFA_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/bfa_fcpim_priv.h b/drivers/scsi/bfa/bfa_fcpim_priv.h
deleted file mode 100644
index 762516cb5cb2..000000000000
--- a/drivers/scsi/bfa/bfa_fcpim_priv.h
+++ /dev/null
@@ -1,192 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCPIM_PRIV_H__
19#define __BFA_FCPIM_PRIV_H__
20
21#include <bfa_fcpim.h>
22#include <defs/bfa_defs_fcpim.h>
23#include <cs/bfa_wc.h>
24#include "bfa_sgpg_priv.h"
25
26#define BFA_ITNIM_MIN 32
27#define BFA_ITNIM_MAX 1024
28
29#define BFA_IOIM_MIN 8
30#define BFA_IOIM_MAX 2000
31
32#define BFA_TSKIM_MIN 4
33#define BFA_TSKIM_MAX 512
34#define BFA_FCPIM_PATHTOV_DEF (30 * 1000) /* in millisecs */
35#define BFA_FCPIM_PATHTOV_MAX (90 * 1000) /* in millisecs */
36
37#define bfa_fcpim_stats(__fcpim, __stats) \
38 ((__fcpim)->stats.__stats++)
39
40struct bfa_fcpim_mod_s {
41 struct bfa_s *bfa;
42 struct bfa_itnim_s *itnim_arr;
43 struct bfa_ioim_s *ioim_arr;
44 struct bfa_ioim_sp_s *ioim_sp_arr;
45 struct bfa_tskim_s *tskim_arr;
46 struct bfa_dma_s snsbase;
47 int num_itnims;
48 int num_ioim_reqs;
49 int num_tskim_reqs;
50 u32 path_tov;
51 u16 q_depth;
52 u8 reqq; /* Request queue to be used */
53 u8 rsvd;
54 struct list_head itnim_q; /* queue of active itnim */
55 struct list_head ioim_free_q; /* free IO resources */
56 struct list_head ioim_resfree_q; /* IOs waiting for f/w */
57 struct list_head ioim_comp_q; /* IO global comp Q */
58 struct list_head tskim_free_q;
59 u32 ios_active; /* current active IOs */
60 u32 delay_comp;
61 struct bfa_fcpim_stats_s stats;
62 bfa_boolean_t ioredirect;
63};
64
65struct bfa_ioim_s;
66struct bfa_tskim_s;
67
68/**
69 * BFA IO (initiator mode)
70 */
71struct bfa_ioim_s {
72 struct list_head qe; /* queue elememt */
73 bfa_sm_t sm; /* BFA ioim state machine */
74 struct bfa_s *bfa; /* BFA module */
75 struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */
76 struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
77 struct bfad_ioim_s *dio; /* driver IO handle */
78 u16 iotag; /* FWI IO tag */
79 u16 abort_tag; /* unqiue abort request tag */
80 u16 nsges; /* number of SG elements */
81 u16 nsgpgs; /* number of SG pages */
82 struct bfa_sgpg_s *sgpg; /* first SG page */
83 struct list_head sgpg_q; /* allocated SG pages */
84 struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
85 bfa_cb_cbfn_t io_cbfn; /* IO completion handler */
86 struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */
87 u8 reqq; /* Request queue for I/O */
88};
89
90struct bfa_ioim_sp_s {
91 struct bfi_msg_s comp_rspmsg; /* IO comp f/w response */
92 u8 *snsinfo; /* sense info for this IO */
93 struct bfa_sgpg_wqe_s sgpg_wqe; /* waitq elem for sgpg */
94 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
95 bfa_boolean_t abort_explicit; /* aborted by OS */
96 struct bfa_tskim_s *tskim; /* Relevant TM cmd */
97};
98
99/**
100 * BFA Task management command (initiator mode)
101 */
102struct bfa_tskim_s {
103 struct list_head qe;
104 bfa_sm_t sm;
105 struct bfa_s *bfa; /* BFA module */
106 struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */
107 struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
108 struct bfad_tskim_s *dtsk; /* driver task mgmt cmnd */
109 bfa_boolean_t notify; /* notify itnim on TM comp */
110 lun_t lun; /* lun if applicable */
111 enum fcp_tm_cmnd tm_cmnd; /* task management command */
112 u16 tsk_tag; /* FWI IO tag */
113 u8 tsecs; /* timeout in seconds */
114 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
115 struct list_head io_q; /* queue of affected IOs */
116 struct bfa_wc_s wc; /* waiting counter */
117 struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
118 enum bfi_tskim_status tsk_status; /* TM status */
119};
120
121/**
122 * BFA i-t-n (initiator mode)
123 */
124struct bfa_itnim_s {
125 struct list_head qe; /* queue element */
126 bfa_sm_t sm; /* i-t-n im BFA state machine */
127 struct bfa_s *bfa; /* bfa instance */
128 struct bfa_rport_s *rport; /* bfa rport */
129 void *ditn; /* driver i-t-n structure */
130 struct bfi_mhdr_s mhdr; /* pre-built mhdr */
131 u8 msg_no; /* itnim/rport firmware handle */
132 u8 reqq; /* CQ for requests */
133 struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
134 struct list_head pending_q; /* queue of pending IO requests*/
135 struct list_head io_q; /* queue of active IO requests */
136 struct list_head io_cleanup_q; /* IO being cleaned up */
137 struct list_head tsk_q; /* queue of active TM commands */
138 struct list_head delay_comp_q;/* queue of failed inflight cmds */
139 bfa_boolean_t seq_rec; /* SQER supported */
140 bfa_boolean_t is_online; /* itnim is ONLINE for IO */
141 bfa_boolean_t iotov_active; /* IO TOV timer is active */
142 struct bfa_wc_s wc; /* waiting counter */
143 struct bfa_timer_s timer; /* pending IO TOV */
144 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
145 struct bfa_fcpim_mod_s *fcpim; /* fcpim module */
146 struct bfa_itnim_hal_stats_s stats;
147 struct bfa_itnim_latency_s io_latency;
148};
149
150#define bfa_itnim_is_online(_itnim) ((_itnim)->is_online)
151#define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod)
152#define BFA_IOIM_FROM_TAG(_fcpim, _iotag) \
153 (&fcpim->ioim_arr[_iotag])
154#define BFA_TSKIM_FROM_TAG(_fcpim, _tmtag) \
155 (&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)])
156
157/*
158 * function prototypes
159 */
160void bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim,
161 struct bfa_meminfo_s *minfo);
162void bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim);
163void bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
164void bfa_ioim_good_comp_isr(struct bfa_s *bfa,
165 struct bfi_msg_s *msg);
166void bfa_ioim_cleanup(struct bfa_ioim_s *ioim);
167void bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim,
168 struct bfa_tskim_s *tskim);
169void bfa_ioim_iocdisable(struct bfa_ioim_s *ioim);
170void bfa_ioim_tov(struct bfa_ioim_s *ioim);
171
172void bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim,
173 struct bfa_meminfo_s *minfo);
174void bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim);
175void bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
176void bfa_tskim_iodone(struct bfa_tskim_s *tskim);
177void bfa_tskim_iocdisable(struct bfa_tskim_s *tskim);
178void bfa_tskim_cleanup(struct bfa_tskim_s *tskim);
179
180void bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
181 u32 *dm_len);
182void bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim,
183 struct bfa_meminfo_s *minfo);
184void bfa_itnim_detach(struct bfa_fcpim_mod_s *fcpim);
185void bfa_itnim_iocdisable(struct bfa_itnim_s *itnim);
186void bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
187void bfa_itnim_iodone(struct bfa_itnim_s *itnim);
188void bfa_itnim_tskdone(struct bfa_itnim_s *itnim);
189bfa_boolean_t bfa_itnim_hold_io(struct bfa_itnim_s *itnim);
190
191#endif /* __BFA_FCPIM_PRIV_H__ */
192
diff --git a/drivers/scsi/bfa/bfa_fcport.c b/drivers/scsi/bfa/bfa_fcport.c
deleted file mode 100644
index 76867b5577fa..000000000000
--- a/drivers/scsi/bfa/bfa_fcport.c
+++ /dev/null
@@ -1,1962 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfa_svc.h>
20#include <bfi/bfi_pport.h>
21#include <bfi/bfi_pbc.h>
22#include <cs/bfa_debug.h>
23#include <aen/bfa_aen.h>
24#include <cs/bfa_plog.h>
25#include <aen/bfa_aen_port.h>
26
27BFA_TRC_FILE(HAL, FCPORT);
28BFA_MODULE(fcport);
29
30/*
31 * The port is considered disabled if corresponding physical port or IOC are
32 * disabled explicitly
33 */
34#define BFA_PORT_IS_DISABLED(bfa) \
35 ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
36 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
37
38/*
39 * forward declarations
40 */
41static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
42static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
43static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
44static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
45static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
46static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
47static void bfa_fcport_callback(struct bfa_fcport_s *fcport,
48 enum bfa_pport_linkstate event);
49static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
50 enum bfa_pport_linkstate event);
51static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
52static void bfa_fcport_stats_get_timeout(void *cbarg);
53static void bfa_fcport_stats_clr_timeout(void *cbarg);
54
55/**
56 * bfa_pport_private
57 */
58
59/**
60 * BFA port state machine events
61 */
62enum bfa_fcport_sm_event {
63 BFA_FCPORT_SM_START = 1, /* start port state machine */
64 BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
65 BFA_FCPORT_SM_ENABLE = 3, /* enable port */
66 BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
67 BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
68 BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
69 BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
70 BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
71 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
72};
73
74/**
75 * BFA port link notification state machine events
76 */
77
78enum bfa_fcport_ln_sm_event {
79 BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
80 BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
81 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
82};
83
84static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
85 enum bfa_fcport_sm_event event);
86static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
87 enum bfa_fcport_sm_event event);
88static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
89 enum bfa_fcport_sm_event event);
90static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
91 enum bfa_fcport_sm_event event);
92static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
93 enum bfa_fcport_sm_event event);
94static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
95 enum bfa_fcport_sm_event event);
96static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
97 enum bfa_fcport_sm_event event);
98static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
99 enum bfa_fcport_sm_event event);
100static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
101 enum bfa_fcport_sm_event event);
102static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
103 enum bfa_fcport_sm_event event);
104static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
105 enum bfa_fcport_sm_event event);
106
107static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
108 enum bfa_fcport_ln_sm_event event);
109static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
110 enum bfa_fcport_ln_sm_event event);
111static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
112 enum bfa_fcport_ln_sm_event event);
113static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
114 enum bfa_fcport_ln_sm_event event);
115static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
116 enum bfa_fcport_ln_sm_event event);
117static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
118 enum bfa_fcport_ln_sm_event event);
119static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
120 enum bfa_fcport_ln_sm_event event);
121
122static struct bfa_sm_table_s hal_pport_sm_table[] = {
123 {BFA_SM(bfa_fcport_sm_uninit), BFA_PPORT_ST_UNINIT},
124 {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PPORT_ST_ENABLING_QWAIT},
125 {BFA_SM(bfa_fcport_sm_enabling), BFA_PPORT_ST_ENABLING},
126 {BFA_SM(bfa_fcport_sm_linkdown), BFA_PPORT_ST_LINKDOWN},
127 {BFA_SM(bfa_fcport_sm_linkup), BFA_PPORT_ST_LINKUP},
128 {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PPORT_ST_DISABLING_QWAIT},
129 {BFA_SM(bfa_fcport_sm_disabling), BFA_PPORT_ST_DISABLING},
130 {BFA_SM(bfa_fcport_sm_disabled), BFA_PPORT_ST_DISABLED},
131 {BFA_SM(bfa_fcport_sm_stopped), BFA_PPORT_ST_STOPPED},
132 {BFA_SM(bfa_fcport_sm_iocdown), BFA_PPORT_ST_IOCDOWN},
133 {BFA_SM(bfa_fcport_sm_iocfail), BFA_PPORT_ST_IOCDOWN},
134};
135
136static void
137bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
138{
139 union bfa_aen_data_u aen_data;
140 struct bfa_log_mod_s *logmod = fcport->bfa->logm;
141 wwn_t pwwn = fcport->pwwn;
142 char pwwn_ptr[BFA_STRING_32];
143
144 memset(&aen_data, 0, sizeof(aen_data));
145 wwn2str(pwwn_ptr, pwwn);
146 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, event), pwwn_ptr);
147
148 aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
149 aen_data.port.pwwn = pwwn;
150}
151
152static void
153bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
154 enum bfa_fcport_sm_event event)
155{
156 bfa_trc(fcport->bfa, event);
157
158 switch (event) {
159 case BFA_FCPORT_SM_START:
160 /**
161 * Start event after IOC is configured and BFA is started.
162 */
163 if (bfa_fcport_send_enable(fcport))
164 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
165 else
166 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
167 break;
168
169 case BFA_FCPORT_SM_ENABLE:
170 /**
171 * Port is persistently configured to be in enabled state. Do
172 * not change state. Port enabling is done when START event is
173 * received.
174 */
175 break;
176
177 case BFA_FCPORT_SM_DISABLE:
178 /**
179 * If a port is persistently configured to be disabled, the
180 * first event will a port disable request.
181 */
182 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
183 break;
184
185 case BFA_FCPORT_SM_HWFAIL:
186 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
187 break;
188
189 default:
190 bfa_sm_fault(fcport->bfa, event);
191 }
192}
193
194static void
195bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
196 enum bfa_fcport_sm_event event)
197{
198 bfa_trc(fcport->bfa, event);
199
200 switch (event) {
201 case BFA_FCPORT_SM_QRESUME:
202 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
203 bfa_fcport_send_enable(fcport);
204 break;
205
206 case BFA_FCPORT_SM_STOP:
207 bfa_reqq_wcancel(&fcport->reqq_wait);
208 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
209 break;
210
211 case BFA_FCPORT_SM_ENABLE:
212 /**
213 * Already enable is in progress.
214 */
215 break;
216
217 case BFA_FCPORT_SM_DISABLE:
218 /**
219 * Just send disable request to firmware when room becomes
220 * available in request queue.
221 */
222 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
223 bfa_reqq_wcancel(&fcport->reqq_wait);
224 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
225 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
226 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
227 break;
228
229 case BFA_FCPORT_SM_LINKUP:
230 case BFA_FCPORT_SM_LINKDOWN:
231 /**
232 * Possible to get link events when doing back-to-back
233 * enable/disables.
234 */
235 break;
236
237 case BFA_FCPORT_SM_HWFAIL:
238 bfa_reqq_wcancel(&fcport->reqq_wait);
239 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
240 break;
241
242 default:
243 bfa_sm_fault(fcport->bfa, event);
244 }
245}
246
247static void
248bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
249 enum bfa_fcport_sm_event event)
250{
251 bfa_trc(fcport->bfa, event);
252
253 switch (event) {
254 case BFA_FCPORT_SM_FWRSP:
255 case BFA_FCPORT_SM_LINKDOWN:
256 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
257 break;
258
259 case BFA_FCPORT_SM_LINKUP:
260 bfa_fcport_update_linkinfo(fcport);
261 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
262
263 bfa_assert(fcport->event_cbfn);
264 bfa_fcport_callback(fcport, BFA_PPORT_LINKUP);
265 break;
266
267 case BFA_FCPORT_SM_ENABLE:
268 /**
269 * Already being enabled.
270 */
271 break;
272
273 case BFA_FCPORT_SM_DISABLE:
274 if (bfa_fcport_send_disable(fcport))
275 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
276 else
277 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
278
279 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
280 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
281 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
282 break;
283
284 case BFA_FCPORT_SM_STOP:
285 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
286 break;
287
288 case BFA_FCPORT_SM_HWFAIL:
289 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
290 break;
291
292 default:
293 bfa_sm_fault(fcport->bfa, event);
294 }
295}
296
297static void
298bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
299 enum bfa_fcport_sm_event event)
300{
301 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
302 bfa_trc(fcport->bfa, event);
303
304 switch (event) {
305 case BFA_FCPORT_SM_LINKUP:
306 bfa_fcport_update_linkinfo(fcport);
307 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
308 bfa_assert(fcport->event_cbfn);
309 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
310 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
311
312 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
313
314 bfa_trc(fcport->bfa,
315 pevent->link_state.vc_fcf.fcf.fipenabled);
316 bfa_trc(fcport->bfa,
317 pevent->link_state.vc_fcf.fcf.fipfailed);
318
319 if (pevent->link_state.vc_fcf.fcf.fipfailed)
320 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
321 BFA_PL_EID_FIP_FCF_DISC, 0,
322 "FIP FCF Discovery Failed");
323 else
324 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
325 BFA_PL_EID_FIP_FCF_DISC, 0,
326 "FIP FCF Discovered");
327 }
328
329 bfa_fcport_callback(fcport, BFA_PPORT_LINKUP);
330 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
331 /**
332 * If QoS is enabled and it is not online,
333 * Send a separate event.
334 */
335 if ((fcport->cfg.qos_enabled)
336 && (bfa_os_ntohl(fcport->qos_attr.state) != BFA_QOS_ONLINE))
337 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
338
339 break;
340
341 case BFA_FCPORT_SM_LINKDOWN:
342 /**
343 * Possible to get link down event.
344 */
345 break;
346
347 case BFA_FCPORT_SM_ENABLE:
348 /**
349 * Already enabled.
350 */
351 break;
352
353 case BFA_FCPORT_SM_DISABLE:
354 if (bfa_fcport_send_disable(fcport))
355 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
356 else
357 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
358
359 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
360 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
361 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
362 break;
363
364 case BFA_FCPORT_SM_STOP:
365 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
366 break;
367
368 case BFA_FCPORT_SM_HWFAIL:
369 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
370 break;
371
372 default:
373 bfa_sm_fault(fcport->bfa, event);
374 }
375}
376
377static void
378bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
379 enum bfa_fcport_sm_event event)
380{
381 bfa_trc(fcport->bfa, event);
382
383 switch (event) {
384 case BFA_FCPORT_SM_ENABLE:
385 /**
386 * Already enabled.
387 */
388 break;
389
390 case BFA_FCPORT_SM_DISABLE:
391 if (bfa_fcport_send_disable(fcport))
392 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
393 else
394 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
395
396 bfa_fcport_reset_linkinfo(fcport);
397 bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN);
398 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
399 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
400 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
401 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
402 break;
403
404 case BFA_FCPORT_SM_LINKDOWN:
405 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
406 bfa_fcport_reset_linkinfo(fcport);
407 bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN);
408 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
409 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
410 if (BFA_PORT_IS_DISABLED(fcport->bfa))
411 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
412 else
413 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
414 break;
415
416 case BFA_FCPORT_SM_STOP:
417 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
418 bfa_fcport_reset_linkinfo(fcport);
419 if (BFA_PORT_IS_DISABLED(fcport->bfa))
420 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
421 else
422 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
423 break;
424
425 case BFA_FCPORT_SM_HWFAIL:
426 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
427 bfa_fcport_reset_linkinfo(fcport);
428 bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN);
429 if (BFA_PORT_IS_DISABLED(fcport->bfa))
430 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
431 else
432 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
433 break;
434
435 default:
436 bfa_sm_fault(fcport->bfa, event);
437 }
438}
439
440static void
441bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
442 enum bfa_fcport_sm_event event)
443{
444 bfa_trc(fcport->bfa, event);
445
446 switch (event) {
447 case BFA_FCPORT_SM_QRESUME:
448 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
449 bfa_fcport_send_disable(fcport);
450 break;
451
452 case BFA_FCPORT_SM_STOP:
453 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
454 bfa_reqq_wcancel(&fcport->reqq_wait);
455 break;
456
457 case BFA_FCPORT_SM_DISABLE:
458 /**
459 * Already being disabled.
460 */
461 break;
462
463 case BFA_FCPORT_SM_LINKUP:
464 case BFA_FCPORT_SM_LINKDOWN:
465 /**
466 * Possible to get link events when doing back-to-back
467 * enable/disables.
468 */
469 break;
470
471 case BFA_FCPORT_SM_HWFAIL:
472 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
473 bfa_reqq_wcancel(&fcport->reqq_wait);
474 break;
475
476 default:
477 bfa_sm_fault(fcport->bfa, event);
478 }
479}
480
481static void
482bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
483 enum bfa_fcport_sm_event event)
484{
485 bfa_trc(fcport->bfa, event);
486
487 switch (event) {
488 case BFA_FCPORT_SM_FWRSP:
489 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
490 break;
491
492 case BFA_FCPORT_SM_DISABLE:
493 /**
494 * Already being disabled.
495 */
496 break;
497
498 case BFA_FCPORT_SM_ENABLE:
499 if (bfa_fcport_send_enable(fcport))
500 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
501 else
502 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
503
504 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
505 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
506 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
507 break;
508
509 case BFA_FCPORT_SM_STOP:
510 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
511 break;
512
513 case BFA_FCPORT_SM_LINKUP:
514 case BFA_FCPORT_SM_LINKDOWN:
515 /**
516 * Possible to get link events when doing back-to-back
517 * enable/disables.
518 */
519 break;
520
521 case BFA_FCPORT_SM_HWFAIL:
522 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
523 break;
524
525 default:
526 bfa_sm_fault(fcport->bfa, event);
527 }
528}
529
530static void
531bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
532 enum bfa_fcport_sm_event event)
533{
534 bfa_trc(fcport->bfa, event);
535
536 switch (event) {
537 case BFA_FCPORT_SM_START:
538 /**
539 * Ignore start event for a port that is disabled.
540 */
541 break;
542
543 case BFA_FCPORT_SM_STOP:
544 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
545 break;
546
547 case BFA_FCPORT_SM_ENABLE:
548 if (bfa_fcport_send_enable(fcport))
549 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
550 else
551 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
552
553 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
554 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
555 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
556 break;
557
558 case BFA_FCPORT_SM_DISABLE:
559 /**
560 * Already disabled.
561 */
562 break;
563
564 case BFA_FCPORT_SM_HWFAIL:
565 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
566 break;
567
568 default:
569 bfa_sm_fault(fcport->bfa, event);
570 }
571}
572
573static void
574bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
575 enum bfa_fcport_sm_event event)
576{
577 bfa_trc(fcport->bfa, event);
578
579 switch (event) {
580 case BFA_FCPORT_SM_START:
581 if (bfa_fcport_send_enable(fcport))
582 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
583 else
584 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
585 break;
586
587 default:
588 /**
589 * Ignore all other events.
590 */
591 ;
592 }
593}
594
595/**
596 * Port is enabled. IOC is down/failed.
597 */
598static void
599bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
600 enum bfa_fcport_sm_event event)
601{
602 bfa_trc(fcport->bfa, event);
603
604 switch (event) {
605 case BFA_FCPORT_SM_START:
606 if (bfa_fcport_send_enable(fcport))
607 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
608 else
609 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
610 break;
611
612 default:
613 /**
614 * Ignore all events.
615 */
616 ;
617 }
618}
619
620/**
621 * Port is disabled. IOC is down/failed.
622 */
623static void
624bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
625 enum bfa_fcport_sm_event event)
626{
627 bfa_trc(fcport->bfa, event);
628
629 switch (event) {
630 case BFA_FCPORT_SM_START:
631 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
632 break;
633
634 case BFA_FCPORT_SM_ENABLE:
635 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
636 break;
637
638 default:
639 /**
640 * Ignore all events.
641 */
642 ;
643 }
644}
645
646/**
647 * Link state is down
648 */
649static void
650bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
651 enum bfa_fcport_ln_sm_event event)
652{
653 bfa_trc(ln->fcport->bfa, event);
654
655 switch (event) {
656 case BFA_FCPORT_LN_SM_LINKUP:
657 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
658 bfa_fcport_queue_cb(ln, BFA_PPORT_LINKUP);
659 break;
660
661 default:
662 bfa_sm_fault(ln->fcport->bfa, event);
663 }
664}
665
666/**
667 * Link state is waiting for down notification
668 */
669static void
670bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
671 enum bfa_fcport_ln_sm_event event)
672{
673 bfa_trc(ln->fcport->bfa, event);
674
675 switch (event) {
676 case BFA_FCPORT_LN_SM_LINKUP:
677 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
678 break;
679
680 case BFA_FCPORT_LN_SM_NOTIFICATION:
681 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
682 break;
683
684 default:
685 bfa_sm_fault(ln->fcport->bfa, event);
686 }
687}
688
689/**
690 * Link state is waiting for down notification and there is a pending up
691 */
692static void
693bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
694 enum bfa_fcport_ln_sm_event event)
695{
696 bfa_trc(ln->fcport->bfa, event);
697
698 switch (event) {
699 case BFA_FCPORT_LN_SM_LINKDOWN:
700 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
701 break;
702
703 case BFA_FCPORT_LN_SM_NOTIFICATION:
704 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
705 bfa_fcport_queue_cb(ln, BFA_PPORT_LINKUP);
706 break;
707
708 default:
709 bfa_sm_fault(ln->fcport->bfa, event);
710 }
711}
712
713/**
714 * Link state is up
715 */
716static void
717bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
718 enum bfa_fcport_ln_sm_event event)
719{
720 bfa_trc(ln->fcport->bfa, event);
721
722 switch (event) {
723 case BFA_FCPORT_LN_SM_LINKDOWN:
724 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
725 bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN);
726 break;
727
728 default:
729 bfa_sm_fault(ln->fcport->bfa, event);
730 }
731}
732
733/**
734 * Link state is waiting for up notification
735 */
736static void
737bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
738 enum bfa_fcport_ln_sm_event event)
739{
740 bfa_trc(ln->fcport->bfa, event);
741
742 switch (event) {
743 case BFA_FCPORT_LN_SM_LINKDOWN:
744 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
745 break;
746
747 case BFA_FCPORT_LN_SM_NOTIFICATION:
748 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
749 break;
750
751 default:
752 bfa_sm_fault(ln->fcport->bfa, event);
753 }
754}
755
756/**
757 * Link state is waiting for up notification and there is a pending down
758 */
759static void
760bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
761 enum bfa_fcport_ln_sm_event event)
762{
763 bfa_trc(ln->fcport->bfa, event);
764
765 switch (event) {
766 case BFA_FCPORT_LN_SM_LINKUP:
767 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
768 break;
769
770 case BFA_FCPORT_LN_SM_NOTIFICATION:
771 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
772 bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN);
773 break;
774
775 default:
776 bfa_sm_fault(ln->fcport->bfa, event);
777 }
778}
779
780/**
781 * Link state is waiting for up notification and there are pending down and up
782 */
783static void
784bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
785 enum bfa_fcport_ln_sm_event event)
786{
787 bfa_trc(ln->fcport->bfa, event);
788
789 switch (event) {
790 case BFA_FCPORT_LN_SM_LINKDOWN:
791 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
792 break;
793
794 case BFA_FCPORT_LN_SM_NOTIFICATION:
795 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
796 bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN);
797 break;
798
799 default:
800 bfa_sm_fault(ln->fcport->bfa, event);
801 }
802}
803
804/**
805 * bfa_pport_private
806 */
807
808static void
809__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
810{
811 struct bfa_fcport_ln_s *ln = cbarg;
812
813 if (complete)
814 ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
815 else
816 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
817}
818
819static void
820bfa_fcport_callback(struct bfa_fcport_s *fcport, enum bfa_pport_linkstate event)
821{
822 if (fcport->bfa->fcs) {
823 fcport->event_cbfn(fcport->event_cbarg, event);
824 return;
825 }
826
827 switch (event) {
828 case BFA_PPORT_LINKUP:
829 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
830 break;
831 case BFA_PPORT_LINKDOWN:
832 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
833 break;
834 default:
835 bfa_assert(0);
836 }
837}
838
839static void
840bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_pport_linkstate event)
841{
842 ln->ln_event = event;
843 bfa_cb_queue(ln->fcport->bfa, &ln->ln_qe, __bfa_cb_fcport_event, ln);
844}
845
846#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
847 BFA_CACHELINE_SZ))
848
849static void
850bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
851 u32 *dm_len)
852{
853 *dm_len += FCPORT_STATS_DMA_SZ;
854}
855
856static void
857bfa_fcport_qresume(void *cbarg)
858{
859 struct bfa_fcport_s *fcport = cbarg;
860
861 bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
862}
863
864static void
865bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
866{
867 u8 *dm_kva;
868 u64 dm_pa;
869
870 dm_kva = bfa_meminfo_dma_virt(meminfo);
871 dm_pa = bfa_meminfo_dma_phys(meminfo);
872
873 fcport->stats_kva = dm_kva;
874 fcport->stats_pa = dm_pa;
875 fcport->stats = (union bfa_fcport_stats_u *)dm_kva;
876
877 dm_kva += FCPORT_STATS_DMA_SZ;
878 dm_pa += FCPORT_STATS_DMA_SZ;
879
880 bfa_meminfo_dma_virt(meminfo) = dm_kva;
881 bfa_meminfo_dma_phys(meminfo) = dm_pa;
882}
883
884/**
885 * Memory initialization.
886 */
887static void
888bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
889 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
890{
891 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
892 struct bfa_pport_cfg_s *port_cfg = &fcport->cfg;
893 struct bfa_fcport_ln_s *ln = &fcport->ln;
894 struct bfa_timeval_s tv;
895
896 bfa_os_memset(fcport, 0, sizeof(struct bfa_fcport_s));
897 fcport->bfa = bfa;
898 ln->fcport = fcport;
899
900 bfa_fcport_mem_claim(fcport, meminfo);
901
902 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
903 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
904
905 /**
906 * initialize time stamp for stats reset
907 */
908 bfa_os_gettimeofday(&tv);
909 fcport->stats_reset_time = tv.tv_sec;
910
911 /**
912 * initialize and set default configuration
913 */
914 port_cfg->topology = BFA_PPORT_TOPOLOGY_P2P;
915 port_cfg->speed = BFA_PPORT_SPEED_AUTO;
916 port_cfg->trunked = BFA_FALSE;
917 port_cfg->maxfrsize = 0;
918
919 port_cfg->trl_def_speed = BFA_PPORT_SPEED_1GBPS;
920
921 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
922}
923
924static void
925bfa_fcport_detach(struct bfa_s *bfa)
926{
927}
928
929/**
930 * Called when IOC is ready.
931 */
932static void
933bfa_fcport_start(struct bfa_s *bfa)
934{
935 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
936}
937
938/**
939 * Called before IOC is stopped.
940 */
941static void
942bfa_fcport_stop(struct bfa_s *bfa)
943{
944 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
945}
946
947/**
948 * Called when IOC failure is detected.
949 */
950static void
951bfa_fcport_iocdisable(struct bfa_s *bfa)
952{
953 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_HWFAIL);
954}
955
956static void
957bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
958{
959 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
960
961 fcport->speed = pevent->link_state.speed;
962 fcport->topology = pevent->link_state.topology;
963
964 if (fcport->topology == BFA_PPORT_TOPOLOGY_LOOP)
965 fcport->myalpa = 0;
966
967 /*
968 * QoS Details
969 */
970 bfa_os_assign(fcport->qos_attr, pevent->link_state.qos_attr);
971 bfa_os_assign(fcport->qos_vc_attr,
972 pevent->link_state.vc_fcf.qos_vc_attr);
973
974
975 bfa_trc(fcport->bfa, fcport->speed);
976 bfa_trc(fcport->bfa, fcport->topology);
977}
978
979static void
980bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
981{
982 fcport->speed = BFA_PPORT_SPEED_UNKNOWN;
983 fcport->topology = BFA_PPORT_TOPOLOGY_NONE;
984}
985
986/**
987 * Send port enable message to firmware.
988 */
989static bfa_boolean_t
990bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
991{
992 struct bfi_fcport_enable_req_s *m;
993
994 /**
995 * Increment message tag before queue check, so that responses to old
996 * requests are discarded.
997 */
998 fcport->msgtag++;
999
1000 /**
1001 * check for room in queue to send request now
1002 */
1003 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
1004 if (!m) {
1005 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
1006 &fcport->reqq_wait);
1007 return BFA_FALSE;
1008 }
1009
1010 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
1011 bfa_lpuid(fcport->bfa));
1012 m->nwwn = fcport->nwwn;
1013 m->pwwn = fcport->pwwn;
1014 m->port_cfg = fcport->cfg;
1015 m->msgtag = fcport->msgtag;
1016 m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize);
1017 bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
1018 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
1019 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
1020
1021 /**
1022 * queue I/O message to firmware
1023 */
1024 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
1025 return BFA_TRUE;
1026}
1027
1028/**
1029 * Send port disable message to firmware.
1030 */
1031static bfa_boolean_t
1032bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
1033{
1034 struct bfi_fcport_req_s *m;
1035
1036 /**
1037 * Increment message tag before queue check, so that responses to old
1038 * requests are discarded.
1039 */
1040 fcport->msgtag++;
1041
1042 /**
1043 * check for room in queue to send request now
1044 */
1045 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
1046 if (!m) {
1047 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
1048 &fcport->reqq_wait);
1049 return BFA_FALSE;
1050 }
1051
1052 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
1053 bfa_lpuid(fcport->bfa));
1054 m->msgtag = fcport->msgtag;
1055
1056 /**
1057 * queue I/O message to firmware
1058 */
1059 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
1060
1061 return BFA_TRUE;
1062}
1063
1064static void
1065bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
1066{
1067 fcport->pwwn = bfa_ioc_get_pwwn(&fcport->bfa->ioc);
1068 fcport->nwwn = bfa_ioc_get_nwwn(&fcport->bfa->ioc);
1069
1070 bfa_trc(fcport->bfa, fcport->pwwn);
1071 bfa_trc(fcport->bfa, fcport->nwwn);
1072}
1073
1074static void
1075bfa_fcport_send_txcredit(void *port_cbarg)
1076{
1077
1078 struct bfa_fcport_s *fcport = port_cbarg;
1079 struct bfi_fcport_set_svc_params_req_s *m;
1080
1081 /**
1082 * check for room in queue to send request now
1083 */
1084 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
1085 if (!m) {
1086 bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
1087 return;
1088 }
1089
1090 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
1091 bfa_lpuid(fcport->bfa));
1092 m->tx_bbcredit = bfa_os_htons((u16) fcport->cfg.tx_bbcredit);
1093
1094 /**
1095 * queue I/O message to firmware
1096 */
1097 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
1098}
1099
1100static void
1101bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
1102 struct bfa_qos_stats_s *s)
1103{
1104 u32 *dip = (u32 *) d;
1105 u32 *sip = (u32 *) s;
1106 int i;
1107
1108 /* Now swap the 32 bit fields */
1109 for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
1110 dip[i] = bfa_os_ntohl(sip[i]);
1111}
1112
1113static void
1114bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
1115 struct bfa_fcoe_stats_s *s)
1116{
1117 u32 *dip = (u32 *) d;
1118 u32 *sip = (u32 *) s;
1119 int i;
1120
1121 for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
1122 i = i + 2) {
1123#ifdef __BIGENDIAN
1124 dip[i] = bfa_os_ntohl(sip[i]);
1125 dip[i + 1] = bfa_os_ntohl(sip[i + 1]);
1126#else
1127 dip[i] = bfa_os_ntohl(sip[i + 1]);
1128 dip[i + 1] = bfa_os_ntohl(sip[i]);
1129#endif
1130 }
1131}
1132
1133static void
1134__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
1135{
1136 struct bfa_fcport_s *fcport = cbarg;
1137
1138 if (complete) {
1139 if (fcport->stats_status == BFA_STATUS_OK) {
1140 struct bfa_timeval_s tv;
1141
1142 /* Swap FC QoS or FCoE stats */
1143 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
1144 bfa_fcport_qos_stats_swap(
1145 &fcport->stats_ret->fcqos,
1146 &fcport->stats->fcqos);
1147 } else {
1148 bfa_fcport_fcoe_stats_swap(
1149 &fcport->stats_ret->fcoe,
1150 &fcport->stats->fcoe);
1151
1152 bfa_os_gettimeofday(&tv);
1153 fcport->stats_ret->fcoe.secs_reset =
1154 tv.tv_sec - fcport->stats_reset_time;
1155 }
1156 }
1157 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
1158 } else {
1159 fcport->stats_busy = BFA_FALSE;
1160 fcport->stats_status = BFA_STATUS_OK;
1161 }
1162}
1163
1164static void
1165bfa_fcport_stats_get_timeout(void *cbarg)
1166{
1167 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
1168
1169 bfa_trc(fcport->bfa, fcport->stats_qfull);
1170
1171 if (fcport->stats_qfull) {
1172 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
1173 fcport->stats_qfull = BFA_FALSE;
1174 }
1175
1176 fcport->stats_status = BFA_STATUS_ETIMER;
1177 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
1178 fcport);
1179}
1180
1181static void
1182bfa_fcport_send_stats_get(void *cbarg)
1183{
1184 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
1185 struct bfi_fcport_req_s *msg;
1186
1187 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
1188
1189 if (!msg) {
1190 fcport->stats_qfull = BFA_TRUE;
1191 bfa_reqq_winit(&fcport->stats_reqq_wait,
1192 bfa_fcport_send_stats_get, fcport);
1193 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
1194 &fcport->stats_reqq_wait);
1195 return;
1196 }
1197 fcport->stats_qfull = BFA_FALSE;
1198
1199 bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
1200 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
1201 bfa_lpuid(fcport->bfa));
1202 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
1203}
1204
1205static void
1206__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
1207{
1208 struct bfa_fcport_s *fcport = cbarg;
1209
1210 if (complete) {
1211 struct bfa_timeval_s tv;
1212
1213 /**
1214 * re-initialize time stamp for stats reset
1215 */
1216 bfa_os_gettimeofday(&tv);
1217 fcport->stats_reset_time = tv.tv_sec;
1218
1219 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
1220 } else {
1221 fcport->stats_busy = BFA_FALSE;
1222 fcport->stats_status = BFA_STATUS_OK;
1223 }
1224}
1225
1226static void
1227bfa_fcport_stats_clr_timeout(void *cbarg)
1228{
1229 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
1230
1231 bfa_trc(fcport->bfa, fcport->stats_qfull);
1232
1233 if (fcport->stats_qfull) {
1234 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
1235 fcport->stats_qfull = BFA_FALSE;
1236 }
1237
1238 fcport->stats_status = BFA_STATUS_ETIMER;
1239 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
1240 __bfa_cb_fcport_stats_clr, fcport);
1241}
1242
1243static void
1244bfa_fcport_send_stats_clear(void *cbarg)
1245{
1246 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
1247 struct bfi_fcport_req_s *msg;
1248
1249 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
1250
1251 if (!msg) {
1252 fcport->stats_qfull = BFA_TRUE;
1253 bfa_reqq_winit(&fcport->stats_reqq_wait,
1254 bfa_fcport_send_stats_clear, fcport);
1255 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
1256 &fcport->stats_reqq_wait);
1257 return;
1258 }
1259 fcport->stats_qfull = BFA_FALSE;
1260
1261 bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
1262 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
1263 bfa_lpuid(fcport->bfa));
1264 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
1265}
1266
1267/**
1268 * bfa_pport_public
1269 */
1270
1271/**
1272 * Called to initialize port attributes
1273 */
1274void
1275bfa_fcport_init(struct bfa_s *bfa)
1276{
1277 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1278
1279 /**
1280 * Initialize port attributes from IOC hardware data.
1281 */
1282 bfa_fcport_set_wwns(fcport);
1283 if (fcport->cfg.maxfrsize == 0)
1284 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
1285 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
1286 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
1287
1288 bfa_assert(fcport->cfg.maxfrsize);
1289 bfa_assert(fcport->cfg.rx_bbcredit);
1290 bfa_assert(fcport->speed_sup);
1291}
1292
1293
1294/**
1295 * Firmware message handler.
1296 */
1297void
1298bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1299{
1300 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1301 union bfi_fcport_i2h_msg_u i2hmsg;
1302
1303 i2hmsg.msg = msg;
1304 fcport->event_arg.i2hmsg = i2hmsg;
1305
1306 switch (msg->mhdr.msg_id) {
1307 case BFI_FCPORT_I2H_ENABLE_RSP:
1308 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
1309 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
1310 break;
1311
1312 case BFI_FCPORT_I2H_DISABLE_RSP:
1313 if (fcport->msgtag == i2hmsg.pdisable_rsp->msgtag)
1314 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
1315 break;
1316
1317 case BFI_FCPORT_I2H_EVENT:
1318 switch (i2hmsg.event->link_state.linkstate) {
1319 case BFA_PPORT_LINKUP:
1320 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
1321 break;
1322 case BFA_PPORT_LINKDOWN:
1323 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
1324 break;
1325 case BFA_PPORT_TRUNK_LINKDOWN:
1326 /** todo: event notification */
1327 break;
1328 }
1329 break;
1330
1331 case BFI_FCPORT_I2H_STATS_GET_RSP:
1332 /*
1333 * check for timer pop before processing the rsp
1334 */
1335 if (fcport->stats_busy == BFA_FALSE ||
1336 fcport->stats_status == BFA_STATUS_ETIMER)
1337 break;
1338
1339 bfa_timer_stop(&fcport->timer);
1340 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
1341 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
1342 __bfa_cb_fcport_stats_get, fcport);
1343 break;
1344
1345 case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
1346 /*
1347 * check for timer pop before processing the rsp
1348 */
1349 if (fcport->stats_busy == BFA_FALSE ||
1350 fcport->stats_status == BFA_STATUS_ETIMER)
1351 break;
1352
1353 bfa_timer_stop(&fcport->timer);
1354 fcport->stats_status = BFA_STATUS_OK;
1355 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
1356 __bfa_cb_fcport_stats_clr, fcport);
1357 break;
1358
1359 default:
1360 bfa_assert(0);
1361 break;
1362 }
1363}
1364
1365/**
1366 * bfa_pport_api
1367 */
1368
1369/**
1370 * Registered callback for port events.
1371 */
1372void
1373bfa_fcport_event_register(struct bfa_s *bfa,
1374 void (*cbfn) (void *cbarg, bfa_pport_event_t event),
1375 void *cbarg)
1376{
1377 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1378
1379 fcport->event_cbfn = cbfn;
1380 fcport->event_cbarg = cbarg;
1381}
1382
1383bfa_status_t
1384bfa_fcport_enable(struct bfa_s *bfa)
1385{
1386 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1387 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1388 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1389
1390 /* if port is PBC disabled, return error */
1391 if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
1392 bfa_trc(bfa, fcport->pwwn);
1393 return BFA_STATUS_PBC;
1394 }
1395
1396 if (bfa_ioc_is_disabled(&bfa->ioc))
1397 return BFA_STATUS_IOC_DISABLED;
1398
1399 if (fcport->diag_busy)
1400 return BFA_STATUS_DIAG_BUSY;
1401 else if (bfa_sm_cmp_state
1402 (BFA_FCPORT_MOD(bfa), bfa_fcport_sm_disabling_qwait))
1403 return BFA_STATUS_DEVBUSY;
1404
1405 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
1406 return BFA_STATUS_OK;
1407}
1408
1409bfa_status_t
1410bfa_fcport_disable(struct bfa_s *bfa)
1411{
1412 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1413 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1414 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1415
1416 /* if port is PBC disabled, return error */
1417 if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
1418 bfa_trc(bfa, fcport->pwwn);
1419 return BFA_STATUS_PBC;
1420 }
1421
1422 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
1423 return BFA_STATUS_OK;
1424}
1425
1426/**
1427 * Configure port speed.
1428 */
1429bfa_status_t
1430bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
1431{
1432 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1433
1434 bfa_trc(bfa, speed);
1435
1436 if ((speed != BFA_PPORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
1437 bfa_trc(bfa, fcport->speed_sup);
1438 return BFA_STATUS_UNSUPP_SPEED;
1439 }
1440
1441 fcport->cfg.speed = speed;
1442
1443 return BFA_STATUS_OK;
1444}
1445
1446/**
1447 * Get current speed.
1448 */
1449enum bfa_pport_speed
1450bfa_fcport_get_speed(struct bfa_s *bfa)
1451{
1452 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1453
1454 return fcport->speed;
1455}
1456
1457/**
1458 * Configure port topology.
1459 */
1460bfa_status_t
1461bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology)
1462{
1463 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1464
1465 bfa_trc(bfa, topology);
1466 bfa_trc(bfa, fcport->cfg.topology);
1467
1468 switch (topology) {
1469 case BFA_PPORT_TOPOLOGY_P2P:
1470 case BFA_PPORT_TOPOLOGY_LOOP:
1471 case BFA_PPORT_TOPOLOGY_AUTO:
1472 break;
1473
1474 default:
1475 return BFA_STATUS_EINVAL;
1476 }
1477
1478 fcport->cfg.topology = topology;
1479 return BFA_STATUS_OK;
1480}
1481
1482/**
1483 * Get current topology.
1484 */
1485enum bfa_pport_topology
1486bfa_fcport_get_topology(struct bfa_s *bfa)
1487{
1488 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1489
1490 return fcport->topology;
1491}
1492
1493bfa_status_t
1494bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
1495{
1496 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1497
1498 bfa_trc(bfa, alpa);
1499 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
1500 bfa_trc(bfa, fcport->cfg.hardalpa);
1501
1502 fcport->cfg.cfg_hardalpa = BFA_TRUE;
1503 fcport->cfg.hardalpa = alpa;
1504
1505 return BFA_STATUS_OK;
1506}
1507
1508bfa_status_t
1509bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
1510{
1511 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1512
1513 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
1514 bfa_trc(bfa, fcport->cfg.hardalpa);
1515
1516 fcport->cfg.cfg_hardalpa = BFA_FALSE;
1517 return BFA_STATUS_OK;
1518}
1519
1520bfa_boolean_t
1521bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
1522{
1523 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1524
1525 *alpa = fcport->cfg.hardalpa;
1526 return fcport->cfg.cfg_hardalpa;
1527}
1528
1529u8
1530bfa_fcport_get_myalpa(struct bfa_s *bfa)
1531{
1532 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1533
1534 return fcport->myalpa;
1535}
1536
1537bfa_status_t
1538bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
1539{
1540 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1541
1542 bfa_trc(bfa, maxfrsize);
1543 bfa_trc(bfa, fcport->cfg.maxfrsize);
1544
1545 /*
1546 * with in range
1547 */
1548 if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
1549 return BFA_STATUS_INVLD_DFSZ;
1550
1551 /*
1552 * power of 2, if not the max frame size of 2112
1553 */
1554 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
1555 return BFA_STATUS_INVLD_DFSZ;
1556
1557 fcport->cfg.maxfrsize = maxfrsize;
1558 return BFA_STATUS_OK;
1559}
1560
1561u16
1562bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
1563{
1564 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1565
1566 return fcport->cfg.maxfrsize;
1567}
1568
1569u32
1570bfa_fcport_mypid(struct bfa_s *bfa)
1571{
1572 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1573
1574 return fcport->mypid;
1575}
1576
1577u8
1578bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
1579{
1580 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1581
1582 return fcport->cfg.rx_bbcredit;
1583}
1584
1585void
1586bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
1587{
1588 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1589
1590 fcport->cfg.tx_bbcredit = (u8) tx_bbcredit;
1591 bfa_fcport_send_txcredit(fcport);
1592}
1593
1594/**
1595 * Get port attributes.
1596 */
1597
1598wwn_t
1599bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
1600{
1601 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1602 if (node)
1603 return fcport->nwwn;
1604 else
1605 return fcport->pwwn;
1606}
1607
1608void
1609bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr)
1610{
1611 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1612 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1613 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1614
1615 bfa_os_memset(attr, 0, sizeof(struct bfa_pport_attr_s));
1616
1617 attr->nwwn = fcport->nwwn;
1618 attr->pwwn = fcport->pwwn;
1619
1620 attr->factorypwwn = bfa_ioc_get_mfg_pwwn(&bfa->ioc);
1621 attr->factorynwwn = bfa_ioc_get_mfg_nwwn(&bfa->ioc);
1622
1623 bfa_os_memcpy(&attr->pport_cfg, &fcport->cfg,
1624 sizeof(struct bfa_pport_cfg_s));
1625 /*
1626 * speed attributes
1627 */
1628 attr->pport_cfg.speed = fcport->cfg.speed;
1629 attr->speed_supported = fcport->speed_sup;
1630 attr->speed = fcport->speed;
1631 attr->cos_supported = FC_CLASS_3;
1632
1633 /*
1634 * topology attributes
1635 */
1636 attr->pport_cfg.topology = fcport->cfg.topology;
1637 attr->topology = fcport->topology;
1638
1639 /*
1640 * beacon attributes
1641 */
1642 attr->beacon = fcport->beacon;
1643 attr->link_e2e_beacon = fcport->link_e2e_beacon;
1644 attr->plog_enabled = bfa_plog_get_setting(fcport->bfa->plog);
1645
1646 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
1647 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
1648
1649 /* PBC Disabled State */
1650 if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED)
1651 attr->port_state = BFA_PPORT_ST_PREBOOT_DISABLED;
1652 else {
1653 attr->port_state = bfa_sm_to_state(
1654 hal_pport_sm_table, fcport->sm);
1655 if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
1656 attr->port_state = BFA_PPORT_ST_IOCDIS;
1657 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
1658 attr->port_state = BFA_PPORT_ST_FWMISMATCH;
1659 }
1660}
1661
1662#define BFA_FCPORT_STATS_TOV 1000
1663
1664/**
1665 * Fetch port attributes (FCQoS or FCoE).
1666 */
1667bfa_status_t
1668bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
1669 bfa_cb_pport_t cbfn, void *cbarg)
1670{
1671 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1672
1673 if (fcport->stats_busy) {
1674 bfa_trc(bfa, fcport->stats_busy);
1675 return BFA_STATUS_DEVBUSY;
1676 }
1677
1678 fcport->stats_busy = BFA_TRUE;
1679 fcport->stats_ret = stats;
1680 fcport->stats_cbfn = cbfn;
1681 fcport->stats_cbarg = cbarg;
1682
1683 bfa_fcport_send_stats_get(fcport);
1684
1685 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
1686 fcport, BFA_FCPORT_STATS_TOV);
1687 return BFA_STATUS_OK;
1688}
1689
1690/**
1691 * Reset port statistics (FCQoS or FCoE).
1692 */
1693bfa_status_t
1694bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
1695{
1696 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1697
1698 if (fcport->stats_busy) {
1699 bfa_trc(bfa, fcport->stats_busy);
1700 return BFA_STATUS_DEVBUSY;
1701 }
1702
1703 fcport->stats_busy = BFA_TRUE;
1704 fcport->stats_cbfn = cbfn;
1705 fcport->stats_cbarg = cbarg;
1706
1707 bfa_fcport_send_stats_clear(fcport);
1708
1709 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
1710 fcport, BFA_FCPORT_STATS_TOV);
1711 return BFA_STATUS_OK;
1712}
1713
1714/**
1715 * Fetch FCQoS port statistics
1716 */
1717bfa_status_t
1718bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
1719 bfa_cb_pport_t cbfn, void *cbarg)
1720{
1721 /* Meaningful only for FC mode */
1722 bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
1723
1724 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
1725}
1726
1727/**
1728 * Reset FCoE port statistics
1729 */
1730bfa_status_t
1731bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
1732{
1733 /* Meaningful only for FC mode */
1734 bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
1735
1736 return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
1737}
1738
1739/**
1740 * Fetch FCQoS port statistics
1741 */
1742bfa_status_t
1743bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
1744 bfa_cb_pport_t cbfn, void *cbarg)
1745{
1746 /* Meaningful only for FCoE mode */
1747 bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
1748
1749 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
1750}
1751
1752/**
1753 * Reset FCoE port statistics
1754 */
1755bfa_status_t
1756bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
1757{
1758 /* Meaningful only for FCoE mode */
1759 bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
1760
1761 return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
1762}
1763
1764bfa_status_t
1765bfa_fcport_trunk_enable(struct bfa_s *bfa, u8 bitmap)
1766{
1767 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1768
1769 bfa_trc(bfa, bitmap);
1770 bfa_trc(bfa, fcport->cfg.trunked);
1771 bfa_trc(bfa, fcport->cfg.trunk_ports);
1772
1773 if (!bitmap || (bitmap & (bitmap - 1)))
1774 return BFA_STATUS_EINVAL;
1775
1776 fcport->cfg.trunked = BFA_TRUE;
1777 fcport->cfg.trunk_ports = bitmap;
1778
1779 return BFA_STATUS_OK;
1780}
1781
1782void
1783bfa_fcport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr)
1784{
1785 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1786
1787 qos_attr->state = bfa_os_ntohl(fcport->qos_attr.state);
1788 qos_attr->total_bb_cr = bfa_os_ntohl(fcport->qos_attr.total_bb_cr);
1789}
1790
1791void
1792bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
1793 struct bfa_qos_vc_attr_s *qos_vc_attr)
1794{
1795 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1796 struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
1797 u32 i = 0;
1798
1799 qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count);
1800 qos_vc_attr->shared_credit = bfa_os_ntohs(bfa_vc_attr->shared_credit);
1801 qos_vc_attr->elp_opmode_flags =
1802 bfa_os_ntohl(bfa_vc_attr->elp_opmode_flags);
1803
1804 /*
1805 * Individual VC info
1806 */
1807 while (i < qos_vc_attr->total_vc_count) {
1808 qos_vc_attr->vc_info[i].vc_credit =
1809 bfa_vc_attr->vc_info[i].vc_credit;
1810 qos_vc_attr->vc_info[i].borrow_credit =
1811 bfa_vc_attr->vc_info[i].borrow_credit;
1812 qos_vc_attr->vc_info[i].priority =
1813 bfa_vc_attr->vc_info[i].priority;
1814 ++i;
1815 }
1816}
1817
1818/**
1819 * Fetch port attributes.
1820 */
1821bfa_status_t
1822bfa_fcport_trunk_disable(struct bfa_s *bfa)
1823{
1824 return BFA_STATUS_OK;
1825}
1826
1827bfa_boolean_t
1828bfa_fcport_trunk_query(struct bfa_s *bfa, u32 *bitmap)
1829{
1830 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1831
1832 *bitmap = fcport->cfg.trunk_ports;
1833 return fcport->cfg.trunked;
1834}
1835
1836bfa_boolean_t
1837bfa_fcport_is_disabled(struct bfa_s *bfa)
1838{
1839 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1840
1841 return bfa_sm_to_state(hal_pport_sm_table, fcport->sm) ==
1842 BFA_PPORT_ST_DISABLED;
1843
1844}
1845
1846bfa_boolean_t
1847bfa_fcport_is_ratelim(struct bfa_s *bfa)
1848{
1849 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1850
1851 return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
1852
1853}
1854
1855void
1856bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
1857{
1858 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1859 enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
1860
1861 bfa_trc(bfa, on_off);
1862 bfa_trc(bfa, fcport->cfg.qos_enabled);
1863
1864 bfa_trc(bfa, ioc_type);
1865
1866 if (ioc_type == BFA_IOC_TYPE_FC) {
1867 fcport->cfg.qos_enabled = on_off;
1868 /**
1869 * Notify fcpim of the change in QoS state
1870 */
1871 bfa_fcpim_update_ioredirect(bfa);
1872 }
1873}
1874
1875void
1876bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off)
1877{
1878 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1879
1880 bfa_trc(bfa, on_off);
1881 bfa_trc(bfa, fcport->cfg.ratelimit);
1882
1883 fcport->cfg.ratelimit = on_off;
1884 if (fcport->cfg.trl_def_speed == BFA_PPORT_SPEED_UNKNOWN)
1885 fcport->cfg.trl_def_speed = BFA_PPORT_SPEED_1GBPS;
1886}
1887
1888/**
1889 * Configure default minimum ratelim speed
1890 */
1891bfa_status_t
1892bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
1893{
1894 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1895
1896 bfa_trc(bfa, speed);
1897
1898 /*
1899 * Auto and speeds greater than the supported speed, are invalid
1900 */
1901 if ((speed == BFA_PPORT_SPEED_AUTO) || (speed > fcport->speed_sup)) {
1902 bfa_trc(bfa, fcport->speed_sup);
1903 return BFA_STATUS_UNSUPP_SPEED;
1904 }
1905
1906 fcport->cfg.trl_def_speed = speed;
1907
1908 return BFA_STATUS_OK;
1909}
1910
1911/**
1912 * Get default minimum ratelim speed
1913 */
1914enum bfa_pport_speed
1915bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
1916{
1917 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1918
1919 bfa_trc(bfa, fcport->cfg.trl_def_speed);
1920 return fcport->cfg.trl_def_speed;
1921
1922}
1923
1924void
1925bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status)
1926{
1927 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1928
1929 bfa_trc(bfa, status);
1930 bfa_trc(bfa, fcport->diag_busy);
1931
1932 fcport->diag_busy = status;
1933}
1934
1935void
1936bfa_fcport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon,
1937 bfa_boolean_t link_e2e_beacon)
1938{
1939 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1940
1941 bfa_trc(bfa, beacon);
1942 bfa_trc(bfa, link_e2e_beacon);
1943 bfa_trc(bfa, fcport->beacon);
1944 bfa_trc(bfa, fcport->link_e2e_beacon);
1945
1946 fcport->beacon = beacon;
1947 fcport->link_e2e_beacon = link_e2e_beacon;
1948}
1949
1950bfa_boolean_t
1951bfa_fcport_is_linkup(struct bfa_s *bfa)
1952{
1953 return bfa_sm_cmp_state(BFA_FCPORT_MOD(bfa), bfa_fcport_sm_linkup);
1954}
1955
1956bfa_boolean_t
1957bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
1958{
1959 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1960
1961 return fcport->cfg.qos_enabled;
1962}
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index d1a99209bf5f..9cebbe30a678 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -19,35 +19,28 @@
19 * bfa_fcs.c BFA FCS main 19 * bfa_fcs.c BFA FCS main
20 */ 20 */
21 21
22#include <fcs/bfa_fcs.h> 22#include "bfa_fcs.h"
23#include "fcs_port.h" 23#include "bfa_fcbuild.h"
24#include "fcs_uf.h"
25#include "fcs_vport.h"
26#include "fcs_rport.h"
27#include "fcs_fabric.h"
28#include "fcs_fcpim.h"
29#include "fcs_fcptm.h"
30#include "fcbuild.h"
31#include "fcs.h"
32#include "bfad_drv.h" 24#include "bfad_drv.h"
33#include <fcb/bfa_fcb.h> 25
26BFA_TRC_FILE(FCS, FCS);
34 27
35/** 28/**
36 * FCS sub-modules 29 * FCS sub-modules
37 */ 30 */
38struct bfa_fcs_mod_s { 31struct bfa_fcs_mod_s {
39 void (*attach) (struct bfa_fcs_s *fcs); 32 void (*attach) (struct bfa_fcs_s *fcs);
40 void (*modinit) (struct bfa_fcs_s *fcs); 33 void (*modinit) (struct bfa_fcs_s *fcs);
41 void (*modexit) (struct bfa_fcs_s *fcs); 34 void (*modexit) (struct bfa_fcs_s *fcs);
42}; 35};
43 36
44#define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit } 37#define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
45 38
46static struct bfa_fcs_mod_s fcs_modules[] = { 39static struct bfa_fcs_mod_s fcs_modules[] = {
47 { bfa_fcs_pport_attach, NULL, NULL }, 40 { bfa_fcs_port_attach, NULL, NULL },
48 { bfa_fcs_uf_attach, NULL, NULL }, 41 { bfa_fcs_uf_attach, NULL, NULL },
49 { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit, 42 { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
50 bfa_fcs_fabric_modexit }, 43 bfa_fcs_fabric_modexit },
51}; 44};
52 45
53/** 46/**
@@ -57,8 +50,8 @@ static struct bfa_fcs_mod_s fcs_modules[] = {
57static void 50static void
58bfa_fcs_exit_comp(void *fcs_cbarg) 51bfa_fcs_exit_comp(void *fcs_cbarg)
59{ 52{
60 struct bfa_fcs_s *fcs = fcs_cbarg; 53 struct bfa_fcs_s *fcs = fcs_cbarg;
61 struct bfad_s *bfad = fcs->bfad; 54 struct bfad_s *bfad = fcs->bfad;
62 55
63 complete(&bfad->comp); 56 complete(&bfad->comp);
64} 57}
@@ -74,9 +67,9 @@ bfa_fcs_exit_comp(void *fcs_cbarg)
74 */ 67 */
75void 68void
76bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, 69bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
77 bfa_boolean_t min_cfg) 70 bfa_boolean_t min_cfg)
78{ 71{
79 int i; 72 int i;
80 struct bfa_fcs_mod_s *mod; 73 struct bfa_fcs_mod_s *mod;
81 74
82 fcs->bfa = bfa; 75 fcs->bfa = bfa;
@@ -86,7 +79,7 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
86 bfa_attach_fcs(bfa); 79 bfa_attach_fcs(bfa);
87 fcbuild_init(); 80 fcbuild_init();
88 81
89 for (i = 0; i < ARRAY_SIZE(fcs_modules); i++) { 82 for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
90 mod = &fcs_modules[i]; 83 mod = &fcs_modules[i];
91 if (mod->attach) 84 if (mod->attach)
92 mod->attach(fcs); 85 mod->attach(fcs);
@@ -99,11 +92,11 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
99void 92void
100bfa_fcs_init(struct bfa_fcs_s *fcs) 93bfa_fcs_init(struct bfa_fcs_s *fcs)
101{ 94{
102 int i, npbc_vports; 95 int i, npbc_vports;
103 struct bfa_fcs_mod_s *mod; 96 struct bfa_fcs_mod_s *mod;
104 struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS]; 97 struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS];
105 98
106 for (i = 0; i < ARRAY_SIZE(fcs_modules); i++) { 99 for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
107 mod = &fcs_modules[i]; 100 mod = &fcs_modules[i];
108 if (mod->modinit) 101 if (mod->modinit)
109 mod->modinit(fcs); 102 mod->modinit(fcs);
@@ -111,7 +104,7 @@ bfa_fcs_init(struct bfa_fcs_s *fcs)
111 /* Initialize pbc vports */ 104 /* Initialize pbc vports */
112 if (!fcs->min_cfg) { 105 if (!fcs->min_cfg) {
113 npbc_vports = 106 npbc_vports =
114 bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports); 107 bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports);
115 for (i = 0; i < npbc_vports; i++) 108 for (i = 0; i < npbc_vports; i++)
116 bfa_fcb_pbc_vport_create(fcs->bfa->bfad, pbc_vports[i]); 109 bfa_fcb_pbc_vport_create(fcs->bfa->bfad, pbc_vports[i]);
117 } 110 }
@@ -127,12 +120,13 @@ bfa_fcs_start(struct bfa_fcs_s *fcs)
127} 120}
128 121
129/** 122/**
130 * FCS driver details initialization. 123 * brief
124 * FCS driver details initialization.
131 * 125 *
132 * param[in] fcs FCS instance 126 * param[in] fcs FCS instance
133 * param[in] driver_info Driver Details 127 * param[in] driver_info Driver Details
134 * 128 *
135 * return None 129 * return None
136 */ 130 */
137void 131void
138bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, 132bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
@@ -145,13 +139,13 @@ bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
145} 139}
146 140
147/** 141/**
148 * @brief 142 * brief
149 * FCS FDMI Driver Parameter Initialization 143 * FCS FDMI Driver Parameter Initialization
150 * 144 *
151 * @param[in] fcs FCS instance 145 * param[in] fcs FCS instance
152 * @param[in] fdmi_enable TRUE/FALSE 146 * param[in] fdmi_enable TRUE/FALSE
153 * 147 *
154 * @return None 148 * return None
155 */ 149 */
156void 150void
157bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable) 151bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable)
@@ -160,22 +154,24 @@ bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable)
160 fcs->fdmi_enabled = fdmi_enable; 154 fcs->fdmi_enabled = fdmi_enable;
161 155
162} 156}
163
164/** 157/**
165 * FCS instance cleanup and exit. 158 * brief
159 * FCS instance cleanup and exit.
166 * 160 *
167 * param[in] fcs FCS instance 161 * param[in] fcs FCS instance
168 * return None 162 * return None
169 */ 163 */
170void 164void
171bfa_fcs_exit(struct bfa_fcs_s *fcs) 165bfa_fcs_exit(struct bfa_fcs_s *fcs)
172{ 166{
173 struct bfa_fcs_mod_s *mod; 167 struct bfa_fcs_mod_s *mod;
174 int i; 168 int nmods, i;
175 169
176 bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs); 170 bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs);
177 171
178 for (i = 0; i < ARRAY_SIZE(fcs_modules); i++) { 172 nmods = sizeof(fcs_modules) / sizeof(fcs_modules[0]);
173
174 for (i = 0; i < nmods; i++) {
179 175
180 mod = &fcs_modules[i]; 176 mod = &fcs_modules[i];
181 if (mod->modexit) { 177 if (mod->modexit) {
@@ -194,24 +190,1547 @@ bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod)
194 fcs->trcmod = trcmod; 190 fcs->trcmod = trcmod;
195} 191}
196 192
193void
194bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs)
195{
196 bfa_wc_down(&fcs->wc);
197}
198
199/**
200 * Fabric module implementation.
201 */
202
203#define BFA_FCS_FABRIC_RETRY_DELAY (2000) /* Milliseconds */
204#define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */
205
206#define bfa_fcs_fabric_set_opertype(__fabric) do { \
207 if (bfa_fcport_get_topology((__fabric)->fcs->bfa) \
208 == BFA_PORT_TOPOLOGY_P2P) \
209 (__fabric)->oper_type = BFA_PORT_TYPE_NPORT; \
210 else \
211 (__fabric)->oper_type = BFA_PORT_TYPE_NLPORT; \
212} while (0)
213
214/*
215 * forward declarations
216 */
217static void bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric);
218static void bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric);
219static void bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric);
220static void bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric);
221static void bfa_fcs_fabric_delay(void *cbarg);
222static void bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric);
223static void bfa_fcs_fabric_delete_comp(void *cbarg);
224static void bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric,
225 struct fchs_s *fchs, u16 len);
226static void bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
227 struct fchs_s *fchs, u16 len);
228static void bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric);
229static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg,
230 struct bfa_fcxp_s *fcxp, void *cbarg,
231 bfa_status_t status,
232 u32 rsp_len,
233 u32 resid_len,
234 struct fchs_s *rspfchs);
235/**
236 * fcs_fabric_sm fabric state machine functions
237 */
238
239/**
240 * Fabric state machine events
241 */
242enum bfa_fcs_fabric_event {
243 BFA_FCS_FABRIC_SM_CREATE = 1, /* create from driver */
244 BFA_FCS_FABRIC_SM_DELETE = 2, /* delete from driver */
245 BFA_FCS_FABRIC_SM_LINK_DOWN = 3, /* link down from port */
246 BFA_FCS_FABRIC_SM_LINK_UP = 4, /* link up from port */
247 BFA_FCS_FABRIC_SM_CONT_OP = 5, /* flogi/auth continue op */
248 BFA_FCS_FABRIC_SM_RETRY_OP = 6, /* flogi/auth retry op */
249 BFA_FCS_FABRIC_SM_NO_FABRIC = 7, /* from flogi/auth */
250 BFA_FCS_FABRIC_SM_PERF_EVFP = 8, /* from flogi/auth */
251 BFA_FCS_FABRIC_SM_ISOLATE = 9, /* from EVFP processing */
252 BFA_FCS_FABRIC_SM_NO_TAGGING = 10, /* no VFT tagging from EVFP */
253 BFA_FCS_FABRIC_SM_DELAYED = 11, /* timeout delay event */
254 BFA_FCS_FABRIC_SM_AUTH_FAILED = 12, /* auth failed */
255 BFA_FCS_FABRIC_SM_AUTH_SUCCESS = 13, /* auth successful */
256 BFA_FCS_FABRIC_SM_DELCOMP = 14, /* all vports deleted event */
257 BFA_FCS_FABRIC_SM_LOOPBACK = 15, /* Received our own FLOGI */
258 BFA_FCS_FABRIC_SM_START = 16, /* from driver */
259};
260
261static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
262 enum bfa_fcs_fabric_event event);
263static void bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
264 enum bfa_fcs_fabric_event event);
265static void bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
266 enum bfa_fcs_fabric_event event);
267static void bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
268 enum bfa_fcs_fabric_event event);
269static void bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric,
270 enum bfa_fcs_fabric_event event);
271static void bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
272 enum bfa_fcs_fabric_event event);
273static void bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
274 enum bfa_fcs_fabric_event event);
275static void bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
276 enum bfa_fcs_fabric_event event);
277static void bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
278 enum bfa_fcs_fabric_event event);
279static void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
280 enum bfa_fcs_fabric_event event);
281static void bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric,
282 enum bfa_fcs_fabric_event event);
283static void bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric,
284 enum bfa_fcs_fabric_event event);
285static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
286 enum bfa_fcs_fabric_event event);
287static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
288 enum bfa_fcs_fabric_event event);
289/**
290 * Beginning state before fabric creation.
291 */
292static void
293bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
294 enum bfa_fcs_fabric_event event)
295{
296 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
297 bfa_trc(fabric->fcs, event);
298
299 switch (event) {
300 case BFA_FCS_FABRIC_SM_CREATE:
301 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
302 bfa_fcs_fabric_init(fabric);
303 bfa_fcs_lport_init(&fabric->bport, &fabric->bport.port_cfg);
304 break;
305
306 case BFA_FCS_FABRIC_SM_LINK_UP:
307 case BFA_FCS_FABRIC_SM_LINK_DOWN:
308 break;
309
310 default:
311 bfa_sm_fault(fabric->fcs, event);
312 }
313}
314
315/**
316 * Beginning state before fabric creation.
317 */
318static void
319bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
320 enum bfa_fcs_fabric_event event)
321{
322 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
323 bfa_trc(fabric->fcs, event);
324
325 switch (event) {
326 case BFA_FCS_FABRIC_SM_START:
327 if (bfa_fcport_is_linkup(fabric->fcs->bfa)) {
328 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
329 bfa_fcs_fabric_login(fabric);
330 } else
331 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
332 break;
333
334 case BFA_FCS_FABRIC_SM_LINK_UP:
335 case BFA_FCS_FABRIC_SM_LINK_DOWN:
336 break;
337
338 case BFA_FCS_FABRIC_SM_DELETE:
339 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
340 bfa_fcs_modexit_comp(fabric->fcs);
341 break;
342
343 default:
344 bfa_sm_fault(fabric->fcs, event);
345 }
346}
347
348/**
349 * Link is down, awaiting LINK UP event from port. This is also the
350 * first state at fabric creation.
351 */
352static void
353bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
354 enum bfa_fcs_fabric_event event)
355{
356 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
357 bfa_trc(fabric->fcs, event);
358
359 switch (event) {
360 case BFA_FCS_FABRIC_SM_LINK_UP:
361 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
362 bfa_fcs_fabric_login(fabric);
363 break;
364
365 case BFA_FCS_FABRIC_SM_RETRY_OP:
366 break;
367
368 case BFA_FCS_FABRIC_SM_DELETE:
369 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
370 bfa_fcs_fabric_delete(fabric);
371 break;
372
373 default:
374 bfa_sm_fault(fabric->fcs, event);
375 }
376}
377
378/**
379 * FLOGI is in progress, awaiting FLOGI reply.
380 */
381static void
382bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
383 enum bfa_fcs_fabric_event event)
384{
385 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
386 bfa_trc(fabric->fcs, event);
387
388 switch (event) {
389 case BFA_FCS_FABRIC_SM_CONT_OP:
390
391 bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
392 fabric->bb_credit);
393 fabric->fab_type = BFA_FCS_FABRIC_SWITCHED;
394
395 if (fabric->auth_reqd && fabric->is_auth) {
396 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth);
397 bfa_trc(fabric->fcs, event);
398 } else {
399 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online);
400 bfa_fcs_fabric_notify_online(fabric);
401 }
402 break;
403
404 case BFA_FCS_FABRIC_SM_RETRY_OP:
405 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi_retry);
406 bfa_timer_start(fabric->fcs->bfa, &fabric->delay_timer,
407 bfa_fcs_fabric_delay, fabric,
408 BFA_FCS_FABRIC_RETRY_DELAY);
409 break;
410
411 case BFA_FCS_FABRIC_SM_LOOPBACK:
412 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_loopback);
413 bfa_lps_discard(fabric->lps);
414 bfa_fcs_fabric_set_opertype(fabric);
415 break;
416
417 case BFA_FCS_FABRIC_SM_NO_FABRIC:
418 fabric->fab_type = BFA_FCS_FABRIC_N2N;
419 bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
420 fabric->bb_credit);
421 bfa_fcs_fabric_notify_online(fabric);
422 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric);
423 break;
424
425 case BFA_FCS_FABRIC_SM_LINK_DOWN:
426 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
427 bfa_lps_discard(fabric->lps);
428 break;
429
430 case BFA_FCS_FABRIC_SM_DELETE:
431 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
432 bfa_lps_discard(fabric->lps);
433 bfa_fcs_fabric_delete(fabric);
434 break;
435
436 default:
437 bfa_sm_fault(fabric->fcs, event);
438 }
439}
440
441
442static void
443bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric,
444 enum bfa_fcs_fabric_event event)
445{
446 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
447 bfa_trc(fabric->fcs, event);
448
449 switch (event) {
450 case BFA_FCS_FABRIC_SM_DELAYED:
451 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
452 bfa_fcs_fabric_login(fabric);
453 break;
454
455 case BFA_FCS_FABRIC_SM_LINK_DOWN:
456 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
457 bfa_timer_stop(&fabric->delay_timer);
458 break;
459
460 case BFA_FCS_FABRIC_SM_DELETE:
461 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
462 bfa_timer_stop(&fabric->delay_timer);
463 bfa_fcs_fabric_delete(fabric);
464 break;
465
466 default:
467 bfa_sm_fault(fabric->fcs, event);
468 }
469}
470
471/**
472 * Authentication is in progress, awaiting authentication results.
473 */
474static void
475bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
476 enum bfa_fcs_fabric_event event)
477{
478 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
479 bfa_trc(fabric->fcs, event);
480
481 switch (event) {
482 case BFA_FCS_FABRIC_SM_AUTH_FAILED:
483 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
484 bfa_lps_discard(fabric->lps);
485 break;
486
487 case BFA_FCS_FABRIC_SM_AUTH_SUCCESS:
488 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online);
489 bfa_fcs_fabric_notify_online(fabric);
490 break;
491
492 case BFA_FCS_FABRIC_SM_PERF_EVFP:
493 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp);
494 break;
495
496 case BFA_FCS_FABRIC_SM_LINK_DOWN:
497 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
498 bfa_lps_discard(fabric->lps);
499 break;
500
501 case BFA_FCS_FABRIC_SM_DELETE:
502 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
503 bfa_fcs_fabric_delete(fabric);
504 break;
505
506 default:
507 bfa_sm_fault(fabric->fcs, event);
508 }
509}
510
511/**
512 * Authentication failed
513 */
514static void
515bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
516 enum bfa_fcs_fabric_event event)
517{
518 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
519 bfa_trc(fabric->fcs, event);
520
521 switch (event) {
522 case BFA_FCS_FABRIC_SM_LINK_DOWN:
523 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
524 bfa_fcs_fabric_notify_offline(fabric);
525 break;
526
527 case BFA_FCS_FABRIC_SM_DELETE:
528 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
529 bfa_fcs_fabric_delete(fabric);
530 break;
531
532 default:
533 bfa_sm_fault(fabric->fcs, event);
534 }
535}
536
537/**
538 * Port is in loopback mode.
539 */
540static void
541bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
542 enum bfa_fcs_fabric_event event)
543{
544 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
545 bfa_trc(fabric->fcs, event);
546
547 switch (event) {
548 case BFA_FCS_FABRIC_SM_LINK_DOWN:
549 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
550 bfa_fcs_fabric_notify_offline(fabric);
551 break;
552
553 case BFA_FCS_FABRIC_SM_DELETE:
554 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
555 bfa_fcs_fabric_delete(fabric);
556 break;
557
558 default:
559 bfa_sm_fault(fabric->fcs, event);
560 }
561}
562
563/**
564 * There is no attached fabric - private loop or NPort-to-NPort topology.
565 */
566static void
567bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
568 enum bfa_fcs_fabric_event event)
569{
570 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
571 bfa_trc(fabric->fcs, event);
572
573 switch (event) {
574 case BFA_FCS_FABRIC_SM_LINK_DOWN:
575 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
576 bfa_lps_discard(fabric->lps);
577 bfa_fcs_fabric_notify_offline(fabric);
578 break;
579
580 case BFA_FCS_FABRIC_SM_DELETE:
581 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
582 bfa_fcs_fabric_delete(fabric);
583 break;
584
585 case BFA_FCS_FABRIC_SM_NO_FABRIC:
586 bfa_trc(fabric->fcs, fabric->bb_credit);
587 bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
588 fabric->bb_credit);
589 break;
590
591 default:
592 bfa_sm_fault(fabric->fcs, event);
593 }
594}
595
596/**
597 * Fabric is online - normal operating state.
598 */
599static void
600bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
601 enum bfa_fcs_fabric_event event)
602{
603 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
604 bfa_trc(fabric->fcs, event);
605
606 switch (event) {
607 case BFA_FCS_FABRIC_SM_LINK_DOWN:
608 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
609 bfa_lps_discard(fabric->lps);
610 bfa_fcs_fabric_notify_offline(fabric);
611 break;
612
613 case BFA_FCS_FABRIC_SM_DELETE:
614 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
615 bfa_fcs_fabric_delete(fabric);
616 break;
617
618 case BFA_FCS_FABRIC_SM_AUTH_FAILED:
619 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
620 bfa_lps_discard(fabric->lps);
621 break;
622
623 case BFA_FCS_FABRIC_SM_AUTH_SUCCESS:
624 break;
625
626 default:
627 bfa_sm_fault(fabric->fcs, event);
628 }
629}
630
631/**
632 * Exchanging virtual fabric parameters.
633 */
634static void
635bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric,
636 enum bfa_fcs_fabric_event event)
637{
638 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
639 bfa_trc(fabric->fcs, event);
640
641 switch (event) {
642 case BFA_FCS_FABRIC_SM_CONT_OP:
643 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp_done);
644 break;
197 645
646 case BFA_FCS_FABRIC_SM_ISOLATE:
647 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_isolated);
648 break;
649
650 default:
651 bfa_sm_fault(fabric->fcs, event);
652 }
653}
654
655/**
656 * EVFP exchange complete and VFT tagging is enabled.
657 */
658static void
659bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric,
660 enum bfa_fcs_fabric_event event)
661{
662 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
663 bfa_trc(fabric->fcs, event);
664}
665
666/**
667 * Port is isolated after EVFP exchange due to VF_ID mismatch (N and F).
668 */
669static void
670bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
671 enum bfa_fcs_fabric_event event)
672{
673 struct bfad_s *bfad = (struct bfad_s *)fabric->fcs->bfad;
674 char pwwn_ptr[BFA_STRING_32];
675
676 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
677 bfa_trc(fabric->fcs, event);
678 wwn2str(pwwn_ptr, fabric->bport.port_cfg.pwwn);
679
680 BFA_LOG(KERN_INFO, bfad, log_level,
681 "Port is isolated due to VF_ID mismatch. "
682 "PWWN: %s Port VF_ID: %04x switch port VF_ID: %04x.",
683 pwwn_ptr, fabric->fcs->port_vfid,
684 fabric->event_arg.swp_vfid);
685}
686
687/**
688 * Fabric is being deleted, awaiting vport delete completions.
689 */
690static void
691bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
692 enum bfa_fcs_fabric_event event)
693{
694 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
695 bfa_trc(fabric->fcs, event);
696
697 switch (event) {
698 case BFA_FCS_FABRIC_SM_DELCOMP:
699 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
700 bfa_fcs_modexit_comp(fabric->fcs);
701 break;
702
703 case BFA_FCS_FABRIC_SM_LINK_UP:
704 break;
705
706 case BFA_FCS_FABRIC_SM_LINK_DOWN:
707 bfa_fcs_fabric_notify_offline(fabric);
708 break;
709
710 default:
711 bfa_sm_fault(fabric->fcs, event);
712 }
713}
714
715
716
717/**
718 * fcs_fabric_private fabric private functions
719 */
720
721static void
722bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric)
723{
724 struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg;
725
726 port_cfg->roles = BFA_LPORT_ROLE_FCP_IM;
727 port_cfg->nwwn = bfa_ioc_get_nwwn(&fabric->fcs->bfa->ioc);
728 port_cfg->pwwn = bfa_ioc_get_pwwn(&fabric->fcs->bfa->ioc);
729}
730
731/**
732 * Port Symbolic Name Creation for base port.
733 */
198void 734void
199bfa_fcs_log_init(struct bfa_fcs_s *fcs, struct bfa_log_mod_s *logmod) 735bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
736{
737 struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg;
738 char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0};
739 struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info;
740
741 bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
742
743 /* Model name/number */
744 strncpy((char *)&port_cfg->sym_name, model,
745 BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
746 strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
747 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
748
749 /* Driver Version */
750 strncat((char *)&port_cfg->sym_name, (char *)driver_info->version,
751 BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
752 strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
753 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
754
755 /* Host machine name */
756 strncat((char *)&port_cfg->sym_name,
757 (char *)driver_info->host_machine_name,
758 BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
759 strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
760 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
761
762 /*
763 * Host OS Info :
764 * If OS Patch Info is not there, do not truncate any bytes from the
765 * OS name string and instead copy the entire OS info string (64 bytes).
766 */
767 if (driver_info->host_os_patch[0] == '\0') {
768 strncat((char *)&port_cfg->sym_name,
769 (char *)driver_info->host_os_name,
770 BFA_FCS_OS_STR_LEN);
771 strncat((char *)&port_cfg->sym_name,
772 BFA_FCS_PORT_SYMBNAME_SEPARATOR,
773 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
774 } else {
775 strncat((char *)&port_cfg->sym_name,
776 (char *)driver_info->host_os_name,
777 BFA_FCS_PORT_SYMBNAME_OSINFO_SZ);
778 strncat((char *)&port_cfg->sym_name,
779 BFA_FCS_PORT_SYMBNAME_SEPARATOR,
780 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
781
782 /* Append host OS Patch Info */
783 strncat((char *)&port_cfg->sym_name,
784 (char *)driver_info->host_os_patch,
785 BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ);
786 }
787
788 /* null terminate */
789 port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
790}
791
792/**
793 * bfa lps login completion callback
794 */
795void
796bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
797{
798 struct bfa_fcs_fabric_s *fabric = uarg;
799
800 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
801 bfa_trc(fabric->fcs, status);
802
803 switch (status) {
804 case BFA_STATUS_OK:
805 fabric->stats.flogi_accepts++;
806 break;
807
808 case BFA_STATUS_INVALID_MAC:
809 /* Only for CNA */
810 fabric->stats.flogi_acc_err++;
811 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
812
813 return;
814
815 case BFA_STATUS_EPROTOCOL:
816 switch (bfa_lps_get_extstatus(fabric->lps)) {
817 case BFA_EPROTO_BAD_ACCEPT:
818 fabric->stats.flogi_acc_err++;
819 break;
820
821 case BFA_EPROTO_UNKNOWN_RSP:
822 fabric->stats.flogi_unknown_rsp++;
823 break;
824
825 default:
826 break;
827 }
828 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
829
830 return;
831
832 case BFA_STATUS_FABRIC_RJT:
833 fabric->stats.flogi_rejects++;
834 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
835 return;
836
837 default:
838 fabric->stats.flogi_rsp_err++;
839 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
840 return;
841 }
842
843 fabric->bb_credit = bfa_lps_get_peer_bbcredit(fabric->lps);
844 bfa_trc(fabric->fcs, fabric->bb_credit);
845
846 if (!bfa_lps_is_brcd_fabric(fabric->lps))
847 fabric->fabric_name = bfa_lps_get_peer_nwwn(fabric->lps);
848
849 /*
850 * Check port type. It should be 1 = F-port.
851 */
852 if (bfa_lps_is_fport(fabric->lps)) {
853 fabric->bport.pid = bfa_lps_get_pid(fabric->lps);
854 fabric->is_npiv = bfa_lps_is_npiv_en(fabric->lps);
855 fabric->is_auth = bfa_lps_is_authreq(fabric->lps);
856 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CONT_OP);
857 } else {
858 /*
859 * Nport-2-Nport direct attached
860 */
861 fabric->bport.port_topo.pn2n.rem_port_wwn =
862 bfa_lps_get_peer_pwwn(fabric->lps);
863 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC);
864 }
865
866 bfa_trc(fabric->fcs, fabric->bport.pid);
867 bfa_trc(fabric->fcs, fabric->is_npiv);
868 bfa_trc(fabric->fcs, fabric->is_auth);
869}
870/**
871 * Allocate and send FLOGI.
872 */
873static void
874bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric)
875{
876 struct bfa_s *bfa = fabric->fcs->bfa;
877 struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg;
878 u8 alpa = 0;
879
880 if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)
881 alpa = bfa_fcport_get_myalpa(bfa);
882
883 bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa),
884 pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd);
885
886 fabric->stats.flogi_sent++;
887}
888
889static void
890bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric)
891{
892 struct bfa_fcs_vport_s *vport;
893 struct list_head *qe, *qen;
894
895 bfa_trc(fabric->fcs, fabric->fabric_name);
896
897 bfa_fcs_fabric_set_opertype(fabric);
898 fabric->stats.fabric_onlines++;
899
900 /**
901 * notify online event to base and then virtual ports
902 */
903 bfa_fcs_lport_online(&fabric->bport);
904
905 list_for_each_safe(qe, qen, &fabric->vport_q) {
906 vport = (struct bfa_fcs_vport_s *) qe;
907 bfa_fcs_vport_online(vport);
908 }
909}
910
911static void
912bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric)
913{
914 struct bfa_fcs_vport_s *vport;
915 struct list_head *qe, *qen;
916
917 bfa_trc(fabric->fcs, fabric->fabric_name);
918 fabric->stats.fabric_offlines++;
919
920 /**
921 * notify offline event first to vports and then base port.
922 */
923 list_for_each_safe(qe, qen, &fabric->vport_q) {
924 vport = (struct bfa_fcs_vport_s *) qe;
925 bfa_fcs_vport_offline(vport);
926 }
927
928 bfa_fcs_lport_offline(&fabric->bport);
929
930 fabric->fabric_name = 0;
931 fabric->fabric_ip_addr[0] = 0;
932}
933
934static void
935bfa_fcs_fabric_delay(void *cbarg)
200{ 936{
201 fcs->logm = logmod; 937 struct bfa_fcs_fabric_s *fabric = cbarg;
938
939 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED);
202} 940}
203 941
942/**
943 * Delete all vports and wait for vport delete completions.
944 */
945static void
946bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric)
947{
948 struct bfa_fcs_vport_s *vport;
949 struct list_head *qe, *qen;
204 950
951 list_for_each_safe(qe, qen, &fabric->vport_q) {
952 vport = (struct bfa_fcs_vport_s *) qe;
953 bfa_fcs_vport_fcs_delete(vport);
954 }
955
956 bfa_fcs_lport_delete(&fabric->bport);
957 bfa_wc_wait(&fabric->wc);
958}
959
960static void
961bfa_fcs_fabric_delete_comp(void *cbarg)
962{
963 struct bfa_fcs_fabric_s *fabric = cbarg;
964
965 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP);
966}
967
968/**
969 * fcs_fabric_public fabric public functions
970 */
971
972/**
973 * Attach time initialization.
974 */
205void 975void
206bfa_fcs_aen_init(struct bfa_fcs_s *fcs, struct bfa_aen_s *aen) 976bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
207{ 977{
208 fcs->aen = aen; 978 struct bfa_fcs_fabric_s *fabric;
979
980 fabric = &fcs->fabric;
981 bfa_os_memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s));
982
983 /**
984 * Initialize base fabric.
985 */
986 fabric->fcs = fcs;
987 INIT_LIST_HEAD(&fabric->vport_q);
988 INIT_LIST_HEAD(&fabric->vf_q);
989 fabric->lps = bfa_lps_alloc(fcs->bfa);
990 bfa_assert(fabric->lps);
991
992 /**
993 * Initialize fabric delete completion handler. Fabric deletion is
994 * complete when the last vport delete is complete.
995 */
996 bfa_wc_init(&fabric->wc, bfa_fcs_fabric_delete_comp, fabric);
997 bfa_wc_up(&fabric->wc); /* For the base port */
998
999 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
1000 bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL);
209} 1001}
210 1002
211void 1003void
212bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs) 1004bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs)
213{ 1005{
214 bfa_wc_down(&fcs->wc); 1006 bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE);
1007 bfa_trc(fcs, 0);
215} 1008}
216 1009
1010/**
1011 * Module cleanup
1012 */
1013void
1014bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs)
1015{
1016 struct bfa_fcs_fabric_s *fabric;
1017
1018 bfa_trc(fcs, 0);
1019
1020 /**
1021 * Cleanup base fabric.
1022 */
1023 fabric = &fcs->fabric;
1024 bfa_lps_delete(fabric->lps);
1025 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE);
1026}
217 1027
1028/**
1029 * Fabric module start -- kick starts FCS actions
1030 */
1031void
1032bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs)
1033{
1034 struct bfa_fcs_fabric_s *fabric;
1035
1036 bfa_trc(fcs, 0);
1037 fabric = &fcs->fabric;
1038 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START);
1039}
1040
1041/**
1042 * Suspend fabric activity as part of driver suspend.
1043 */
1044void
1045bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs)
1046{
1047}
1048
1049bfa_boolean_t
1050bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric)
1051{
1052 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_loopback);
1053}
1054
1055bfa_boolean_t
1056bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric)
1057{
1058 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_auth_failed);
1059}
1060
1061enum bfa_port_type
1062bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric)
1063{
1064 return fabric->oper_type;
1065}
1066
1067/**
1068 * Link up notification from BFA physical port module.
1069 */
1070void
1071bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric)
1072{
1073 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
1074 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP);
1075}
1076
1077/**
1078 * Link down notification from BFA physical port module.
1079 */
1080void
1081bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric)
1082{
1083 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
1084 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN);
1085}
1086
1087/**
1088 * A child vport is being created in the fabric.
1089 *
1090 * Call from vport module at vport creation. A list of base port and vports
1091 * belonging to a fabric is maintained to propagate link events.
1092 *
1093 * param[in] fabric - Fabric instance. This can be a base fabric or vf.
1094 * param[in] vport - Vport being created.
1095 *
1096 * @return None (always succeeds)
1097 */
1098void
1099bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
1100 struct bfa_fcs_vport_s *vport)
1101{
1102 /**
1103 * - add vport to fabric's vport_q
1104 */
1105 bfa_trc(fabric->fcs, fabric->vf_id);
1106
1107 list_add_tail(&vport->qe, &fabric->vport_q);
1108 fabric->num_vports++;
1109 bfa_wc_up(&fabric->wc);
1110}
1111
1112/**
1113 * A child vport is being deleted from fabric.
1114 *
1115 * Vport is being deleted.
1116 */
1117void
1118bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
1119 struct bfa_fcs_vport_s *vport)
1120{
1121 list_del(&vport->qe);
1122 fabric->num_vports--;
1123 bfa_wc_down(&fabric->wc);
1124}
1125
1126/**
1127 * Base port is deleted.
1128 */
1129void
1130bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric)
1131{
1132 bfa_wc_down(&fabric->wc);
1133}
1134
1135
1136/**
1137 * Check if fabric is online.
1138 *
1139 * param[in] fabric - Fabric instance. This can be a base fabric or vf.
1140 *
1141 * @return TRUE/FALSE
1142 */
1143int
1144bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric)
1145{
1146 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online);
1147}
1148
1149/**
1150 * brief
1151 *
1152 */
1153bfa_status_t
1154bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf, struct bfa_fcs_s *fcs,
1155 struct bfa_lport_cfg_s *port_cfg, struct bfad_vf_s *vf_drv)
1156{
1157 bfa_sm_set_state(vf, bfa_fcs_fabric_sm_uninit);
1158 return BFA_STATUS_OK;
1159}
1160
1161/**
1162 * Lookup for a vport withing a fabric given its pwwn
1163 */
1164struct bfa_fcs_vport_s *
1165bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn)
1166{
1167 struct bfa_fcs_vport_s *vport;
1168 struct list_head *qe;
1169
1170 list_for_each(qe, &fabric->vport_q) {
1171 vport = (struct bfa_fcs_vport_s *) qe;
1172 if (bfa_fcs_lport_get_pwwn(&vport->lport) == pwwn)
1173 return vport;
1174 }
1175
1176 return NULL;
1177}
1178
1179/**
1180 * In a given fabric, return the number of lports.
1181 *
1182 * param[in] fabric - Fabric instance. This can be a base fabric or vf.
1183 *
1184 * @return : 1 or more.
1185 */
1186u16
1187bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric)
1188{
1189 return fabric->num_vports;
1190}
1191
1192/*
1193 * Get OUI of the attached switch.
1194 *
1195 * Note : Use of this function should be avoided as much as possible.
1196 * This function should be used only if there is any requirement
1197* to check for FOS version below 6.3.
1198 * To check if the attached fabric is a brocade fabric, use
1199 * bfa_lps_is_brcd_fabric() which works for FOS versions 6.3
1200 * or above only.
1201 */
1202
1203u16
1204bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric)
1205{
1206 wwn_t fab_nwwn;
1207 u8 *tmp;
1208 u16 oui;
1209
1210 fab_nwwn = bfa_lps_get_peer_nwwn(fabric->lps);
1211
1212 tmp = (u8 *)&fab_nwwn;
1213 oui = (tmp[3] << 8) | tmp[4];
1214
1215 return oui;
1216}
1217/**
1218 * Unsolicited frame receive handling.
1219 */
1220void
1221bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
1222 u16 len)
1223{
1224 u32 pid = fchs->d_id;
1225 struct bfa_fcs_vport_s *vport;
1226 struct list_head *qe;
1227 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
1228 struct fc_logi_s *flogi = (struct fc_logi_s *) els_cmd;
1229
1230 bfa_trc(fabric->fcs, len);
1231 bfa_trc(fabric->fcs, pid);
1232
1233 /**
1234 * Look for our own FLOGI frames being looped back. This means an
1235 * external loopback cable is in place. Our own FLOGI frames are
1236 * sometimes looped back when switch port gets temporarily bypassed.
1237 */
1238 if ((pid == bfa_os_ntoh3b(FC_FABRIC_PORT)) &&
1239 (els_cmd->els_code == FC_ELS_FLOGI) &&
1240 (flogi->port_name == bfa_fcs_lport_get_pwwn(&fabric->bport))) {
1241 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOOPBACK);
1242 return;
1243 }
1244
1245 /**
1246 * FLOGI/EVFP exchanges should be consumed by base fabric.
1247 */
1248 if (fchs->d_id == bfa_os_hton3b(FC_FABRIC_PORT)) {
1249 bfa_trc(fabric->fcs, pid);
1250 bfa_fcs_fabric_process_uf(fabric, fchs, len);
1251 return;
1252 }
1253
1254 if (fabric->bport.pid == pid) {
1255 /**
1256 * All authentication frames should be routed to auth
1257 */
1258 bfa_trc(fabric->fcs, els_cmd->els_code);
1259 if (els_cmd->els_code == FC_ELS_AUTH) {
1260 bfa_trc(fabric->fcs, els_cmd->els_code);
1261 return;
1262 }
1263
1264 bfa_trc(fabric->fcs, *(u8 *) ((u8 *) fchs));
1265 bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len);
1266 return;
1267 }
1268
1269 /**
1270 * look for a matching local port ID
1271 */
1272 list_for_each(qe, &fabric->vport_q) {
1273 vport = (struct bfa_fcs_vport_s *) qe;
1274 if (vport->lport.pid == pid) {
1275 bfa_fcs_lport_uf_recv(&vport->lport, fchs, len);
1276 return;
1277 }
1278 }
1279 bfa_trc(fabric->fcs, els_cmd->els_code);
1280 bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len);
1281}
1282
1283/**
1284 * Unsolicited frames to be processed by fabric.
1285 */
1286static void
1287bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
1288 u16 len)
1289{
1290 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
1291
1292 bfa_trc(fabric->fcs, els_cmd->els_code);
1293
1294 switch (els_cmd->els_code) {
1295 case FC_ELS_FLOGI:
1296 bfa_fcs_fabric_process_flogi(fabric, fchs, len);
1297 break;
1298
1299 default:
1300 /*
1301 * need to generate a LS_RJT
1302 */
1303 break;
1304 }
1305}
1306
1307/**
1308 * Process incoming FLOGI
1309 */
1310static void
1311bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
1312 struct fchs_s *fchs, u16 len)
1313{
1314 struct fc_logi_s *flogi = (struct fc_logi_s *) (fchs + 1);
1315 struct bfa_fcs_lport_s *bport = &fabric->bport;
1316
1317 bfa_trc(fabric->fcs, fchs->s_id);
1318
1319 fabric->stats.flogi_rcvd++;
1320 /*
1321 * Check port type. It should be 0 = n-port.
1322 */
1323 if (flogi->csp.port_type) {
1324 /*
1325 * @todo: may need to send a LS_RJT
1326 */
1327 bfa_trc(fabric->fcs, flogi->port_name);
1328 fabric->stats.flogi_rejected++;
1329 return;
1330 }
1331
1332 fabric->bb_credit = bfa_os_ntohs(flogi->csp.bbcred);
1333 bport->port_topo.pn2n.rem_port_wwn = flogi->port_name;
1334 bport->port_topo.pn2n.reply_oxid = fchs->ox_id;
1335
1336 /*
1337 * Send a Flogi Acc
1338 */
1339 bfa_fcs_fabric_send_flogi_acc(fabric);
1340 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC);
1341}
1342
1343static void
1344bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
1345{
1346 struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg;
1347 struct bfa_fcs_lport_n2n_s *n2n_port = &fabric->bport.port_topo.pn2n;
1348 struct bfa_s *bfa = fabric->fcs->bfa;
1349 struct bfa_fcxp_s *fcxp;
1350 u16 reqlen;
1351 struct fchs_s fchs;
1352
1353 fcxp = bfa_fcs_fcxp_alloc(fabric->fcs);
1354 /**
1355 * Do not expect this failure -- expect remote node to retry
1356 */
1357 if (!fcxp)
1358 return;
1359
1360 reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
1361 bfa_os_hton3b(FC_FABRIC_PORT),
1362 n2n_port->reply_oxid, pcfg->pwwn,
1363 pcfg->nwwn,
1364 bfa_fcport_get_maxfrsize(bfa),
1365 bfa_fcport_get_rx_bbcredit(bfa));
1366
1367 bfa_fcxp_send(fcxp, NULL, fabric->vf_id, bfa_lps_get_tag(fabric->lps),
1368 BFA_FALSE, FC_CLASS_3,
1369 reqlen, &fchs, bfa_fcs_fabric_flogiacc_comp, fabric,
1370 FC_MAX_PDUSZ, 0);
1371}
1372
1373/**
1374 * Flogi Acc completion callback.
1375 */
1376static void
1377bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1378 bfa_status_t status, u32 rsp_len,
1379 u32 resid_len, struct fchs_s *rspfchs)
1380{
1381 struct bfa_fcs_fabric_s *fabric = cbarg;
1382
1383 bfa_trc(fabric->fcs, status);
1384}
1385
1386/*
1387 *
1388 * @param[in] fabric - fabric
1389 * @param[in] wwn_t - new fabric name
1390 *
1391 * @return - none
1392 */
1393void
1394bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
1395 wwn_t fabric_name)
1396{
1397 struct bfad_s *bfad = (struct bfad_s *)fabric->fcs->bfad;
1398 char pwwn_ptr[BFA_STRING_32];
1399 char fwwn_ptr[BFA_STRING_32];
1400
1401 bfa_trc(fabric->fcs, fabric_name);
1402
1403 if (fabric->fabric_name == 0) {
1404 /*
1405 * With BRCD switches, we don't get Fabric Name in FLOGI.
1406 * Don't generate a fabric name change event in this case.
1407 */
1408 fabric->fabric_name = fabric_name;
1409 } else {
1410 fabric->fabric_name = fabric_name;
1411 wwn2str(pwwn_ptr, bfa_fcs_lport_get_pwwn(&fabric->bport));
1412 wwn2str(fwwn_ptr,
1413 bfa_fcs_lport_get_fabric_name(&fabric->bport));
1414 BFA_LOG(KERN_WARNING, bfad, log_level,
1415 "Base port WWN = %s Fabric WWN = %s\n",
1416 pwwn_ptr, fwwn_ptr);
1417 }
1418}
1419
1420/**
1421 * fcs_vf_api virtual fabrics API
1422 */
1423
1424/**
1425 * Enable VF mode.
1426 *
1427 * @param[in] fcs fcs module instance
1428 * @param[in] vf_id default vf_id of port, FC_VF_ID_NULL
1429 * to use standard default vf_id of 1.
1430 *
1431 * @retval BFA_STATUS_OK vf mode is enabled
1432 * @retval BFA_STATUS_BUSY Port is active. Port must be disabled
1433 * before VF mode can be enabled.
1434 */
1435bfa_status_t
1436bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id)
1437{
1438 return BFA_STATUS_OK;
1439}
1440
1441/**
1442 * Disable VF mode.
1443 *
1444 * @param[in] fcs fcs module instance
1445 *
1446 * @retval BFA_STATUS_OK vf mode is disabled
1447 * @retval BFA_STATUS_BUSY VFs are present and being used. All
1448 * VFs must be deleted before disabling
1449 * VF mode.
1450 */
1451bfa_status_t
1452bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs)
1453{
1454 return BFA_STATUS_OK;
1455}
1456
1457/**
1458 * Create a new VF instance.
1459 *
1460 * A new VF is created using the given VF configuration. A VF is identified
1461 * by VF id. No duplicate VF creation is allowed with the same VF id. Once
1462 * a VF is created, VF is automatically started after link initialization
1463 * and EVFP exchange is completed.
1464 *
1465 * param[in] vf - FCS vf data structure. Memory is
1466 * allocated by caller (driver)
1467 * param[in] fcs - FCS module
1468 * param[in] vf_cfg - VF configuration
1469 * param[in] vf_drv - Opaque handle back to the driver's
1470 * virtual vf structure
1471 *
1472 * retval BFA_STATUS_OK VF creation is successful
1473 * retval BFA_STATUS_FAILED VF creation failed
1474 * retval BFA_STATUS_EEXIST A VF exists with the given vf_id
1475 */
1476bfa_status_t
1477bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs, u16 vf_id,
1478 struct bfa_lport_cfg_s *port_cfg, struct bfad_vf_s *vf_drv)
1479{
1480 bfa_trc(fcs, vf_id);
1481 return BFA_STATUS_OK;
1482}
1483
1484/**
1485 * Use this function to delete a BFA VF object. VF object should
1486 * be stopped before this function call.
1487 *
1488 * param[in] vf - pointer to bfa_vf_t.
1489 *
1490 * retval BFA_STATUS_OK On vf deletion success
1491 * retval BFA_STATUS_BUSY VF is not in a stopped state
1492 * retval BFA_STATUS_INPROGRESS VF deletion in in progress
1493 */
1494bfa_status_t
1495bfa_fcs_vf_delete(bfa_fcs_vf_t *vf)
1496{
1497 bfa_trc(vf->fcs, vf->vf_id);
1498 return BFA_STATUS_OK;
1499}
1500
1501
1502/**
1503 * Returns attributes of the given VF.
1504 *
1505 * param[in] vf pointer to bfa_vf_t.
1506 * param[out] vf_attr vf attributes returned
1507 *
1508 * return None
1509 */
1510void
1511bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr)
1512{
1513 bfa_trc(vf->fcs, vf->vf_id);
1514}
1515
1516/**
1517 * Return statistics associated with the given vf.
1518 *
1519 * param[in] vf pointer to bfa_vf_t.
1520 * param[out] vf_stats vf statistics returned
1521 *
1522 * @return None
1523 */
1524void
1525bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf, struct bfa_vf_stats_s *vf_stats)
1526{
1527 bfa_os_memcpy(vf_stats, &vf->stats, sizeof(struct bfa_vf_stats_s));
1528}
1529
1530/**
1531 * clear statistics associated with the given vf.
1532 *
1533 * param[in] vf pointer to bfa_vf_t.
1534 *
1535 * @return None
1536 */
1537void
1538bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf)
1539{
1540 bfa_os_memset(&vf->stats, 0, sizeof(struct bfa_vf_stats_s));
1541}
1542
1543/**
1544 * Returns FCS vf structure for a given vf_id.
1545 *
1546 * param[in] vf_id - VF_ID
1547 *
1548 * return
1549 * If lookup succeeds, retuns fcs vf object, otherwise returns NULL
1550 */
1551bfa_fcs_vf_t *
1552bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id)
1553{
1554 bfa_trc(fcs, vf_id);
1555 if (vf_id == FC_VF_ID_NULL)
1556 return &fcs->fabric;
1557
1558 return NULL;
1559}
1560
1561/**
1562 * Return the list of VFs configured.
1563 *
1564 * param[in] fcs fcs module instance
1565 * param[out] vf_ids returned list of vf_ids
1566 * param[in,out] nvfs in:size of vf_ids array,
1567 * out:total elements present,
1568 * actual elements returned is limited by the size
1569 *
1570 * return Driver VF structure
1571 */
1572void
1573bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs)
1574{
1575 bfa_trc(fcs, *nvfs);
1576}
1577
1578/**
1579 * Return the list of all VFs visible from fabric.
1580 *
1581 * param[in] fcs fcs module instance
1582 * param[out] vf_ids returned list of vf_ids
1583 * param[in,out] nvfs in:size of vf_ids array,
1584 * out:total elements present,
1585 * actual elements returned is limited by the size
1586 *
1587 * return Driver VF structure
1588 */
1589void
1590bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs)
1591{
1592 bfa_trc(fcs, *nvfs);
1593}
1594
1595/**
1596 * Return the list of local logical ports present in the given VF.
1597 *
1598 * param[in] vf vf for which logical ports are returned
1599 * param[out] lpwwn returned logical port wwn list
1600 * param[in,out] nlports in:size of lpwwn list;
1601 * out:total elements present,
1602 * actual elements returned is limited by the size
1603 */
1604void
1605bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t lpwwn[], int *nlports)
1606{
1607 struct list_head *qe;
1608 struct bfa_fcs_vport_s *vport;
1609 int i;
1610 struct bfa_fcs_s *fcs;
1611
1612 if (vf == NULL || lpwwn == NULL || *nlports == 0)
1613 return;
1614
1615 fcs = vf->fcs;
1616
1617 bfa_trc(fcs, vf->vf_id);
1618 bfa_trc(fcs, (u32) *nlports);
1619
1620 i = 0;
1621 lpwwn[i++] = vf->bport.port_cfg.pwwn;
1622
1623 list_for_each(qe, &vf->vport_q) {
1624 if (i >= *nlports)
1625 break;
1626
1627 vport = (struct bfa_fcs_vport_s *) qe;
1628 lpwwn[i++] = vport->lport.port_cfg.pwwn;
1629 }
1630
1631 bfa_trc(fcs, i);
1632 *nlports = i;
1633}
1634
1635/**
1636 * BFA FCS PPORT ( physical port)
1637 */
1638static void
1639bfa_fcs_port_event_handler(void *cbarg, enum bfa_port_linkstate event)
1640{
1641 struct bfa_fcs_s *fcs = cbarg;
1642
1643 bfa_trc(fcs, event);
1644
1645 switch (event) {
1646 case BFA_PORT_LINKUP:
1647 bfa_fcs_fabric_link_up(&fcs->fabric);
1648 break;
1649
1650 case BFA_PORT_LINKDOWN:
1651 bfa_fcs_fabric_link_down(&fcs->fabric);
1652 break;
1653
1654 default:
1655 bfa_assert(0);
1656 }
1657}
1658
1659void
1660bfa_fcs_port_attach(struct bfa_fcs_s *fcs)
1661{
1662 bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs);
1663}
1664
1665/**
1666 * BFA FCS UF ( Unsolicited Frames)
1667 */
1668
1669/**
1670 * BFA callback for unsolicited frame receive handler.
1671 *
1672 * @param[in] cbarg callback arg for receive handler
1673 * @param[in] uf unsolicited frame descriptor
1674 *
1675 * @return None
1676 */
1677static void
1678bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
1679{
1680 struct bfa_fcs_s *fcs = (struct bfa_fcs_s *) cbarg;
1681 struct fchs_s *fchs = bfa_uf_get_frmbuf(uf);
1682 u16 len = bfa_uf_get_frmlen(uf);
1683 struct fc_vft_s *vft;
1684 struct bfa_fcs_fabric_s *fabric;
1685
1686 /**
1687 * check for VFT header
1688 */
1689 if (fchs->routing == FC_RTG_EXT_HDR &&
1690 fchs->cat_info == FC_CAT_VFT_HDR) {
1691 bfa_stats(fcs, uf.tagged);
1692 vft = bfa_uf_get_frmbuf(uf);
1693 if (fcs->port_vfid == vft->vf_id)
1694 fabric = &fcs->fabric;
1695 else
1696 fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id);
1697
1698 /**
1699 * drop frame if vfid is unknown
1700 */
1701 if (!fabric) {
1702 bfa_assert(0);
1703 bfa_stats(fcs, uf.vfid_unknown);
1704 bfa_uf_free(uf);
1705 return;
1706 }
1707
1708 /**
1709 * skip vft header
1710 */
1711 fchs = (struct fchs_s *) (vft + 1);
1712 len -= sizeof(struct fc_vft_s);
1713
1714 bfa_trc(fcs, vft->vf_id);
1715 } else {
1716 bfa_stats(fcs, uf.untagged);
1717 fabric = &fcs->fabric;
1718 }
1719
1720 bfa_trc(fcs, ((u32 *) fchs)[0]);
1721 bfa_trc(fcs, ((u32 *) fchs)[1]);
1722 bfa_trc(fcs, ((u32 *) fchs)[2]);
1723 bfa_trc(fcs, ((u32 *) fchs)[3]);
1724 bfa_trc(fcs, ((u32 *) fchs)[4]);
1725 bfa_trc(fcs, ((u32 *) fchs)[5]);
1726 bfa_trc(fcs, len);
1727
1728 bfa_fcs_fabric_uf_recv(fabric, fchs, len);
1729 bfa_uf_free(uf);
1730}
1731
1732void
1733bfa_fcs_uf_attach(struct bfa_fcs_s *fcs)
1734{
1735 bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs);
1736}
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
new file mode 100644
index 000000000000..d75045df1e7e
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -0,0 +1,779 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCS_H__
19#define __BFA_FCS_H__
20
21#include "bfa_cs.h"
22#include "bfa_defs.h"
23#include "bfa_defs_fcs.h"
24#include "bfa_modules.h"
25#include "bfa_fc.h"
26
27#define BFA_FCS_OS_STR_LEN 64
28
29/*
30 * !!! Only append to the enums defined here to avoid any versioning
31 * !!! needed between trace utility and driver version
32 */
33enum {
34 BFA_TRC_FCS_FCS = 1,
35 BFA_TRC_FCS_PORT = 2,
36 BFA_TRC_FCS_RPORT = 3,
37 BFA_TRC_FCS_FCPIM = 4,
38};
39
40
41struct bfa_fcs_s;
42
43#define __fcs_min_cfg(__fcs) ((__fcs)->min_cfg)
44void bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs);
45
46#define BFA_FCS_BRCD_SWITCH_OUI 0x051e
47#define N2N_LOCAL_PID 0x010000
48#define N2N_REMOTE_PID 0x020000
49#define BFA_FCS_RETRY_TIMEOUT 2000
50#define BFA_FCS_PID_IS_WKA(pid) ((bfa_os_ntoh3b(pid) > 0xFFF000) ? 1 : 0)
51
52
53
54struct bfa_fcs_lport_ns_s {
55 bfa_sm_t sm; /* state machine */
56 struct bfa_timer_s timer;
57 struct bfa_fcs_lport_s *port; /* parent port */
58 struct bfa_fcxp_s *fcxp;
59 struct bfa_fcxp_wqe_s fcxp_wqe;
60};
61
62
63struct bfa_fcs_lport_scn_s {
64 bfa_sm_t sm; /* state machine */
65 struct bfa_timer_s timer;
66 struct bfa_fcs_lport_s *port; /* parent port */
67 struct bfa_fcxp_s *fcxp;
68 struct bfa_fcxp_wqe_s fcxp_wqe;
69};
70
71
72struct bfa_fcs_lport_fdmi_s {
73 bfa_sm_t sm; /* state machine */
74 struct bfa_timer_s timer;
75 struct bfa_fcs_lport_ms_s *ms; /* parent ms */
76 struct bfa_fcxp_s *fcxp;
77 struct bfa_fcxp_wqe_s fcxp_wqe;
78 u8 retry_cnt; /* retry count */
79 u8 rsvd[3];
80};
81
82
83struct bfa_fcs_lport_ms_s {
84 bfa_sm_t sm; /* state machine */
85 struct bfa_timer_s timer;
86 struct bfa_fcs_lport_s *port; /* parent port */
87 struct bfa_fcxp_s *fcxp;
88 struct bfa_fcxp_wqe_s fcxp_wqe;
89 struct bfa_fcs_lport_fdmi_s fdmi; /* FDMI component of MS */
90 u8 retry_cnt; /* retry count */
91 u8 rsvd[3];
92};
93
94
95struct bfa_fcs_lport_fab_s {
96 struct bfa_fcs_lport_ns_s ns; /* NS component of port */
97 struct bfa_fcs_lport_scn_s scn; /* scn component of port */
98 struct bfa_fcs_lport_ms_s ms; /* MS component of port */
99};
100
101#define MAX_ALPA_COUNT 127
102
103struct bfa_fcs_lport_loop_s {
104 u8 num_alpa; /* Num of ALPA entries in the map */
105 u8 alpa_pos_map[MAX_ALPA_COUNT]; /* ALPA Positional
106 *Map */
107 struct bfa_fcs_lport_s *port; /* parent port */
108};
109
110struct bfa_fcs_lport_n2n_s {
111 u32 rsvd;
112 u16 reply_oxid; /* ox_id from the req flogi to be
113 *used in flogi acc */
114 wwn_t rem_port_wwn; /* Attached port's wwn */
115};
116
117
118union bfa_fcs_lport_topo_u {
119 struct bfa_fcs_lport_fab_s pfab;
120 struct bfa_fcs_lport_loop_s ploop;
121 struct bfa_fcs_lport_n2n_s pn2n;
122};
123
124
125struct bfa_fcs_lport_s {
126 struct list_head qe; /* used by port/vport */
127 bfa_sm_t sm; /* state machine */
128 struct bfa_fcs_fabric_s *fabric; /* parent fabric */
129 struct bfa_lport_cfg_s port_cfg; /* port configuration */
130 struct bfa_timer_s link_timer; /* timer for link offline */
131 u32 pid:24; /* FC address */
132 u8 lp_tag; /* lport tag */
133 u16 num_rports; /* Num of r-ports */
134 struct list_head rport_q; /* queue of discovered r-ports */
135 struct bfa_fcs_s *fcs; /* FCS instance */
136 union bfa_fcs_lport_topo_u port_topo; /* fabric/loop/n2n details */
137 struct bfad_port_s *bfad_port; /* driver peer instance */
138 struct bfa_fcs_vport_s *vport; /* NULL for base ports */
139 struct bfa_fcxp_s *fcxp;
140 struct bfa_fcxp_wqe_s fcxp_wqe;
141 struct bfa_lport_stats_s stats;
142 struct bfa_wc_s wc; /* waiting counter for events */
143};
144#define BFA_FCS_GET_HAL_FROM_PORT(port) (port->fcs->bfa)
145#define BFA_FCS_GET_NS_FROM_PORT(port) (&port->port_topo.pfab.ns)
146#define BFA_FCS_GET_SCN_FROM_PORT(port) (&port->port_topo.pfab.scn)
147#define BFA_FCS_GET_MS_FROM_PORT(port) (&port->port_topo.pfab.ms)
148#define BFA_FCS_GET_FDMI_FROM_PORT(port) (&port->port_topo.pfab.ms.fdmi)
149#define BFA_FCS_VPORT_IS_INITIATOR_MODE(port) \
150 (port->port_cfg.roles & BFA_LPORT_ROLE_FCP_IM)
151
152/*
153 * forward declaration
154 */
155struct bfad_vf_s;
156
157enum bfa_fcs_fabric_type {
158 BFA_FCS_FABRIC_UNKNOWN = 0,
159 BFA_FCS_FABRIC_SWITCHED = 1,
160 BFA_FCS_FABRIC_N2N = 2,
161};
162
163
164struct bfa_fcs_fabric_s {
165 struct list_head qe; /* queue element */
166 bfa_sm_t sm; /* state machine */
167 struct bfa_fcs_s *fcs; /* FCS instance */
168 struct bfa_fcs_lport_s bport; /* base logical port */
169 enum bfa_fcs_fabric_type fab_type; /* fabric type */
170 enum bfa_port_type oper_type; /* current link topology */
171 u8 is_vf; /* is virtual fabric? */
172 u8 is_npiv; /* is NPIV supported ? */
173 u8 is_auth; /* is Security/Auth supported ? */
174 u16 bb_credit; /* BB credit from fabric */
175 u16 vf_id; /* virtual fabric ID */
176 u16 num_vports; /* num vports */
177 u16 rsvd;
178 struct list_head vport_q; /* queue of virtual ports */
179 struct list_head vf_q; /* queue of virtual fabrics */
180 struct bfad_vf_s *vf_drv; /* driver vf structure */
181 struct bfa_timer_s link_timer; /* Link Failure timer. Vport */
182 wwn_t fabric_name; /* attached fabric name */
183 bfa_boolean_t auth_reqd; /* authentication required */
184 struct bfa_timer_s delay_timer; /* delay timer */
185 union {
186 u16 swp_vfid;/* switch port VF id */
187 } event_arg;
188 struct bfa_wc_s wc; /* wait counter for delete */
189 struct bfa_vf_stats_s stats; /* fabric/vf stats */
190 struct bfa_lps_s *lps; /* lport login services */
191 u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ];
192 /* attached fabric's ip addr */
193};
194
195#define bfa_fcs_fabric_npiv_capable(__f) ((__f)->is_npiv)
196#define bfa_fcs_fabric_is_switched(__f) \
197 ((__f)->fab_type == BFA_FCS_FABRIC_SWITCHED)
198
199/**
200 * The design calls for a single implementation of base fabric and vf.
201 */
202#define bfa_fcs_vf_t struct bfa_fcs_fabric_s
203
204struct bfa_vf_event_s {
205 u32 undefined;
206};
207
208struct bfa_fcs_s;
209struct bfa_fcs_fabric_s;
210
211/*
212 * @todo : need to move to a global config file.
213 * Maximum Rports supported per port (physical/logical).
214 */
215#define BFA_FCS_MAX_RPORTS_SUPP 256 /* @todo : tentative value */
216
217#define bfa_fcs_lport_t struct bfa_fcs_lport_s
218
219/**
220 * Symbolic Name related defines
221 * Total bytes 255.
222 * Physical Port's symbolic name 128 bytes.
223 * For Vports, Vport's symbolic name is appended to the Physical port's
224 * Symbolic Name.
225 *
226 * Physical Port's symbolic name Format : (Total 128 bytes)
227 * Adapter Model number/name : 12 bytes
228 * Driver Version : 10 bytes
229 * Host Machine Name : 30 bytes
230 * Host OS Info : 48 bytes
231 * Host OS PATCH Info : 16 bytes
232 * ( remaining 12 bytes reserved to be used for separator)
233 */
234#define BFA_FCS_PORT_SYMBNAME_SEPARATOR " | "
235
236#define BFA_FCS_PORT_SYMBNAME_MODEL_SZ 12
237#define BFA_FCS_PORT_SYMBNAME_VERSION_SZ 10
238#define BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ 30
239#define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ 48
240#define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ 16
241
242/**
243 * Get FC port ID for a logical port.
244 */
245#define bfa_fcs_lport_get_fcid(_lport) ((_lport)->pid)
246#define bfa_fcs_lport_get_pwwn(_lport) ((_lport)->port_cfg.pwwn)
247#define bfa_fcs_lport_get_nwwn(_lport) ((_lport)->port_cfg.nwwn)
248#define bfa_fcs_lport_get_psym_name(_lport) ((_lport)->port_cfg.sym_name)
249#define bfa_fcs_lport_is_initiator(_lport) \
250 ((_lport)->port_cfg.roles & BFA_LPORT_ROLE_FCP_IM)
251#define bfa_fcs_lport_get_nrports(_lport) \
252 ((_lport) ? (_lport)->num_rports : 0)
253
254static inline struct bfad_port_s *
255bfa_fcs_lport_get_drvport(struct bfa_fcs_lport_s *port)
256{
257 return port->bfad_port;
258}
259
260#define bfa_fcs_lport_get_opertype(_lport) ((_lport)->fabric->oper_type)
261#define bfa_fcs_lport_get_fabric_name(_lport) ((_lport)->fabric->fabric_name)
262#define bfa_fcs_lport_get_fabric_ipaddr(_lport) \
263 ((_lport)->fabric->fabric_ip_addr)
264
265/**
266 * bfa fcs port public functions
267 */
268
269bfa_boolean_t bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port);
270struct bfa_fcs_lport_s *bfa_fcs_get_base_port(struct bfa_fcs_s *fcs);
271void bfa_fcs_lport_get_rports(struct bfa_fcs_lport_s *port,
272 wwn_t rport_wwns[], int *nrports);
273
274wwn_t bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn,
275 int index, int nrports, bfa_boolean_t bwwn);
276
277struct bfa_fcs_lport_s *bfa_fcs_lookup_port(struct bfa_fcs_s *fcs,
278 u16 vf_id, wwn_t lpwwn);
279
280void bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port,
281 struct bfa_lport_info_s *port_info);
282void bfa_fcs_lport_get_attr(struct bfa_fcs_lport_s *port,
283 struct bfa_lport_attr_s *port_attr);
284void bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port,
285 struct bfa_lport_stats_s *port_stats);
286void bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port);
287enum bfa_port_speed bfa_fcs_lport_get_rport_max_speed(
288 struct bfa_fcs_lport_s *port);
289
290/* MS FCS routines */
291void bfa_fcs_lport_ms_init(struct bfa_fcs_lport_s *port);
292void bfa_fcs_lport_ms_offline(struct bfa_fcs_lport_s *port);
293void bfa_fcs_lport_ms_online(struct bfa_fcs_lport_s *port);
294void bfa_fcs_lport_ms_fabric_rscn(struct bfa_fcs_lport_s *port);
295
296/* FDMI FCS routines */
297void bfa_fcs_lport_fdmi_init(struct bfa_fcs_lport_ms_s *ms);
298void bfa_fcs_lport_fdmi_offline(struct bfa_fcs_lport_ms_s *ms);
299void bfa_fcs_lport_fdmi_online(struct bfa_fcs_lport_ms_s *ms);
300void bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport, struct fchs_s *fchs,
301 u16 len);
302void bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs,
303 u16 vf_id, struct bfa_fcs_vport_s *vport);
304void bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
305 struct bfa_lport_cfg_s *port_cfg);
306void bfa_fcs_lport_online(struct bfa_fcs_lport_s *port);
307void bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port);
308void bfa_fcs_lport_delete(struct bfa_fcs_lport_s *port);
309struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pid(
310 struct bfa_fcs_lport_s *port, u32 pid);
311struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pwwn(
312 struct bfa_fcs_lport_s *port, wwn_t pwwn);
313struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_nwwn(
314 struct bfa_fcs_lport_s *port, wwn_t nwwn);
315void bfa_fcs_lport_add_rport(struct bfa_fcs_lport_s *port,
316 struct bfa_fcs_rport_s *rport);
317void bfa_fcs_lport_del_rport(struct bfa_fcs_lport_s *port,
318 struct bfa_fcs_rport_s *rport);
319void bfa_fcs_lport_modinit(struct bfa_fcs_s *fcs);
320void bfa_fcs_lport_modexit(struct bfa_fcs_s *fcs);
321void bfa_fcs_lport_ns_init(struct bfa_fcs_lport_s *vport);
322void bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *vport);
323void bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *vport);
324void bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port);
325void bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *vport);
326void bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *vport);
327void bfa_fcs_lport_scn_online(struct bfa_fcs_lport_s *vport);
328void bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
329 struct fchs_s *rx_frame, u32 len);
330
331struct bfa_fcs_vport_s {
332 struct list_head qe; /* queue elem */
333 bfa_sm_t sm; /* state machine */
334 bfa_fcs_lport_t lport; /* logical port */
335 struct bfa_timer_s timer;
336 struct bfad_vport_s *vport_drv; /* Driver private */
337 struct bfa_vport_stats_s vport_stats; /* vport statistics */
338 struct bfa_lps_s *lps; /* Lport login service*/
339 int fdisc_retries;
340};
341
342#define bfa_fcs_vport_get_port(vport) \
343 ((struct bfa_fcs_lport_s *)(&vport->port))
344
345/**
346 * bfa fcs vport public functions
347 */
348bfa_status_t bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport,
349 struct bfa_fcs_s *fcs, u16 vf_id,
350 struct bfa_lport_cfg_s *port_cfg,
351 struct bfad_vport_s *vport_drv);
352bfa_status_t bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport,
353 struct bfa_fcs_s *fcs, u16 vf_id,
354 struct bfa_lport_cfg_s *port_cfg,
355 struct bfad_vport_s *vport_drv);
356bfa_boolean_t bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport);
357bfa_status_t bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport);
358bfa_status_t bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport);
359bfa_status_t bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport);
360void bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
361 struct bfa_vport_attr_s *vport_attr);
362void bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
363 struct bfa_vport_stats_s *vport_stats);
364void bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport);
365struct bfa_fcs_vport_s *bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs,
366 u16 vf_id, wwn_t vpwwn);
367void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport);
368void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport);
369void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport);
370void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport);
371void bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport);
372
373#define BFA_FCS_RPORT_DEF_DEL_TIMEOUT 90 /* in secs */
374#define BFA_FCS_RPORT_MAX_RETRIES (5)
375
376/*
377 * forward declarations
378 */
379struct bfad_rport_s;
380
381struct bfa_fcs_itnim_s;
382struct bfa_fcs_tin_s;
383struct bfa_fcs_iprp_s;
384
385/* Rport Features (RPF) */
386struct bfa_fcs_rpf_s {
387 bfa_sm_t sm; /* state machine */
388 struct bfa_fcs_rport_s *rport; /* parent rport */
389 struct bfa_timer_s timer; /* general purpose timer */
390 struct bfa_fcxp_s *fcxp; /* FCXP needed for discarding */
391 struct bfa_fcxp_wqe_s fcxp_wqe; /* fcxp wait queue element */
392 int rpsc_retries; /* max RPSC retry attempts */
393 enum bfa_port_speed rpsc_speed;
394 /* Current Speed from RPSC. O if RPSC fails */
395 enum bfa_port_speed assigned_speed;
396 /**
397 * Speed assigned by the user. will be used if RPSC is
398 * not supported by the rport.
399 */
400};
401
402struct bfa_fcs_rport_s {
403 struct list_head qe; /* used by port/vport */
404 struct bfa_fcs_lport_s *port; /* parent FCS port */
405 struct bfa_fcs_s *fcs; /* fcs instance */
406 struct bfad_rport_s *rp_drv; /* driver peer instance */
407 u32 pid; /* port ID of rport */
408 u16 maxfrsize; /* maximum frame size */
409 u16 reply_oxid; /* OX_ID of inbound requests */
410 enum fc_cos fc_cos; /* FC classes of service supp */
411 bfa_boolean_t cisc; /* CISC capable device */
412 bfa_boolean_t prlo; /* processing prlo or LOGO */
413 wwn_t pwwn; /* port wwn of rport */
414 wwn_t nwwn; /* node wwn of rport */
415 struct bfa_rport_symname_s psym_name; /* port symbolic name */
416 bfa_sm_t sm; /* state machine */
417 struct bfa_timer_s timer; /* general purpose timer */
418 struct bfa_fcs_itnim_s *itnim; /* ITN initiator mode role */
419 struct bfa_fcs_tin_s *tin; /* ITN initiator mode role */
420 struct bfa_fcs_iprp_s *iprp; /* IP/FC role */
421 struct bfa_rport_s *bfa_rport; /* BFA Rport */
422 struct bfa_fcxp_s *fcxp; /* FCXP needed for discarding */
423 int plogi_retries; /* max plogi retry attempts */
424 int ns_retries; /* max NS query retry attempts */
425 struct bfa_fcxp_wqe_s fcxp_wqe; /* fcxp wait queue element */
426 struct bfa_rport_stats_s stats; /* rport stats */
427 enum bfa_rport_function scsi_function; /* Initiator/Target */
428 struct bfa_fcs_rpf_s rpf; /* Rport features module */
429};
430
431static inline struct bfa_rport_s *
432bfa_fcs_rport_get_halrport(struct bfa_fcs_rport_s *rport)
433{
434 return rport->bfa_rport;
435}
436
437/**
438 * bfa fcs rport API functions
439 */
440bfa_status_t bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn,
441 struct bfa_fcs_rport_s *rport,
442 struct bfad_rport_s *rport_drv);
443bfa_status_t bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport);
444void bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
445 struct bfa_rport_attr_s *attr);
446void bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport,
447 struct bfa_rport_stats_s *stats);
448void bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport);
449struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port,
450 wwn_t rpwwn);
451struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn(
452 struct bfa_fcs_lport_s *port, wwn_t rnwwn);
453void bfa_fcs_rport_set_del_timeout(u8 rport_tmo);
454
455void bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport,
456 enum bfa_port_speed speed);
457void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport,
458 struct fchs_s *fchs, u16 len);
459void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport);
460
461struct bfa_fcs_rport_s *bfa_fcs_rport_create(struct bfa_fcs_lport_s *port,
462 u32 pid);
463void bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport);
464void bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport);
465void bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport);
466void bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
467 struct fc_logi_s *plogi_rsp);
468void bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port,
469 struct fchs_s *rx_fchs,
470 struct fc_logi_s *plogi);
471void bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
472 struct fc_logi_s *plogi);
473void bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport);
474void bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id);
475
476void bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport);
477void bfa_fcs_rport_itntm_ack(struct bfa_fcs_rport_s *rport);
478void bfa_fcs_rport_fcptm_offline_done(struct bfa_fcs_rport_s *rport);
479int bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport);
480struct bfa_fcs_rport_s *bfa_fcs_rport_create_by_wwn(
481 struct bfa_fcs_lport_s *port, wwn_t wwn);
482void bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport);
483void bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport);
484void bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport);
485
486/*
487 * forward declarations
488 */
489struct bfad_itnim_s;
490
491struct bfa_fcs_itnim_s {
492 bfa_sm_t sm; /* state machine */
493 struct bfa_fcs_rport_s *rport; /* parent remote rport */
494 struct bfad_itnim_s *itnim_drv; /* driver peer instance */
495 struct bfa_fcs_s *fcs; /* fcs instance */
496 struct bfa_timer_s timer; /* timer functions */
497 struct bfa_itnim_s *bfa_itnim; /* BFA itnim struct */
498 u32 prli_retries; /* max prli retry attempts */
499 bfa_boolean_t seq_rec; /* seq recovery support */
500 bfa_boolean_t rec_support; /* REC supported */
501 bfa_boolean_t conf_comp; /* FCP_CONF support */
502 bfa_boolean_t task_retry_id; /* task retry id supp */
503 struct bfa_fcxp_wqe_s fcxp_wqe; /* wait qelem for fcxp */
504 struct bfa_fcxp_s *fcxp; /* FCXP in use */
505 struct bfa_itnim_stats_s stats; /* itn statistics */
506};
507#define bfa_fcs_fcxp_alloc(__fcs) \
508 bfa_fcxp_alloc(NULL, (__fcs)->bfa, 0, 0, NULL, NULL, NULL, NULL)
509
510#define bfa_fcs_fcxp_alloc_wait(__bfa, __wqe, __alloc_cbfn, __alloc_cbarg) \
511 bfa_fcxp_alloc_wait(__bfa, __wqe, __alloc_cbfn, __alloc_cbarg, \
512 NULL, 0, 0, NULL, NULL, NULL, NULL)
513
514static inline struct bfad_port_s *
515bfa_fcs_itnim_get_drvport(struct bfa_fcs_itnim_s *itnim)
516{
517 return itnim->rport->port->bfad_port;
518}
519
520
521static inline struct bfa_fcs_lport_s *
522bfa_fcs_itnim_get_port(struct bfa_fcs_itnim_s *itnim)
523{
524 return itnim->rport->port;
525}
526
527
528static inline wwn_t
529bfa_fcs_itnim_get_nwwn(struct bfa_fcs_itnim_s *itnim)
530{
531 return itnim->rport->nwwn;
532}
533
534
535static inline wwn_t
536bfa_fcs_itnim_get_pwwn(struct bfa_fcs_itnim_s *itnim)
537{
538 return itnim->rport->pwwn;
539}
540
541
542static inline u32
543bfa_fcs_itnim_get_fcid(struct bfa_fcs_itnim_s *itnim)
544{
545 return itnim->rport->pid;
546}
547
548
549static inline u32
550bfa_fcs_itnim_get_maxfrsize(struct bfa_fcs_itnim_s *itnim)
551{
552 return itnim->rport->maxfrsize;
553}
554
555
556static inline enum fc_cos
557bfa_fcs_itnim_get_cos(struct bfa_fcs_itnim_s *itnim)
558{
559 return itnim->rport->fc_cos;
560}
561
562
563static inline struct bfad_itnim_s *
564bfa_fcs_itnim_get_drvitn(struct bfa_fcs_itnim_s *itnim)
565{
566 return itnim->itnim_drv;
567}
568
569
570static inline struct bfa_itnim_s *
571bfa_fcs_itnim_get_halitn(struct bfa_fcs_itnim_s *itnim)
572{
573 return itnim->bfa_itnim;
574}
575
576/**
577 * bfa fcs FCP Initiator mode API functions
578 */
579void bfa_fcs_itnim_get_attr(struct bfa_fcs_itnim_s *itnim,
580 struct bfa_itnim_attr_s *attr);
581void bfa_fcs_itnim_get_stats(struct bfa_fcs_itnim_s *itnim,
582 struct bfa_itnim_stats_s *stats);
583struct bfa_fcs_itnim_s *bfa_fcs_itnim_lookup(struct bfa_fcs_lport_s *port,
584 wwn_t rpwwn);
585bfa_status_t bfa_fcs_itnim_attr_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
586 struct bfa_itnim_attr_s *attr);
587bfa_status_t bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
588 struct bfa_itnim_stats_s *stats);
589bfa_status_t bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port,
590 wwn_t rpwwn);
591struct bfa_fcs_itnim_s *bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport);
592void bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim);
593void bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim);
594void bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim);
595bfa_status_t bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim);
596void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim);
597void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim,
598 struct fchs_s *fchs, u16 len);
599
600#define BFA_FCS_FDMI_SUPORTED_SPEEDS (FDMI_TRANS_SPEED_1G | \
601 FDMI_TRANS_SPEED_2G | \
602 FDMI_TRANS_SPEED_4G | \
603 FDMI_TRANS_SPEED_8G)
604
605/*
606 * HBA Attribute Block : BFA internal representation. Note : Some variable
607 * sizes have been trimmed to suit BFA For Ex : Model will be "Brocade". Based
608 * on this the size has been reduced to 16 bytes from the standard's 64 bytes.
609 */
610struct bfa_fcs_fdmi_hba_attr_s {
611 wwn_t node_name;
612 u8 manufacturer[64];
613 u8 serial_num[64];
614 u8 model[16];
615 u8 model_desc[256];
616 u8 hw_version[8];
617 u8 driver_version[8];
618 u8 option_rom_ver[BFA_VERSION_LEN];
619 u8 fw_version[8];
620 u8 os_name[256];
621 u32 max_ct_pyld;
622};
623
624/*
625 * Port Attribute Block
626 */
627struct bfa_fcs_fdmi_port_attr_s {
628 u8 supp_fc4_types[32]; /* supported FC4 types */
629 u32 supp_speed; /* supported speed */
630 u32 curr_speed; /* current Speed */
631 u32 max_frm_size; /* max frame size */
632 u8 os_device_name[256]; /* OS device Name */
633 u8 host_name[256]; /* host name */
634};
635
636struct bfa_fcs_stats_s {
637 struct {
638 u32 untagged; /* untagged receive frames */
639 u32 tagged; /* tagged receive frames */
640 u32 vfid_unknown; /* VF id is unknown */
641 } uf;
642};
643
644struct bfa_fcs_driver_info_s {
645 u8 version[BFA_VERSION_LEN]; /* Driver Version */
646 u8 host_machine_name[BFA_FCS_OS_STR_LEN];
647 u8 host_os_name[BFA_FCS_OS_STR_LEN]; /* OS name and version */
648 u8 host_os_patch[BFA_FCS_OS_STR_LEN]; /* patch or service pack */
649 u8 os_device_name[BFA_FCS_OS_STR_LEN]; /* Driver Device Name */
650};
651
652struct bfa_fcs_s {
653 struct bfa_s *bfa; /* corresponding BFA bfa instance */
654 struct bfad_s *bfad; /* corresponding BDA driver instance */
655 struct bfa_trc_mod_s *trcmod; /* tracing module */
656 bfa_boolean_t vf_enabled; /* VF mode is enabled */
657 bfa_boolean_t fdmi_enabled; /* FDMI is enabled */
658 bfa_boolean_t min_cfg; /* min cfg enabled/disabled */
659 u16 port_vfid; /* port default VF ID */
660 struct bfa_fcs_driver_info_s driver_info;
661 struct bfa_fcs_fabric_s fabric; /* base fabric state machine */
662 struct bfa_fcs_stats_s stats; /* FCS statistics */
663 struct bfa_wc_s wc; /* waiting counter */
664};
665
666/*
667 * bfa fcs API functions
668 */
669void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa,
670 struct bfad_s *bfad,
671 bfa_boolean_t min_cfg);
672void bfa_fcs_init(struct bfa_fcs_s *fcs);
673void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
674 struct bfa_fcs_driver_info_s *driver_info);
675void bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable);
676void bfa_fcs_exit(struct bfa_fcs_s *fcs);
677void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod);
678void bfa_fcs_start(struct bfa_fcs_s *fcs);
679
680/**
681 * bfa fcs vf public functions
682 */
683bfa_status_t bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id);
684bfa_status_t bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs);
685bfa_status_t bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs,
686 u16 vf_id, struct bfa_lport_cfg_s *port_cfg,
687 struct bfad_vf_s *vf_drv);
688bfa_status_t bfa_fcs_vf_delete(bfa_fcs_vf_t *vf);
689void bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs);
690void bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs);
691void bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr);
692void bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf,
693 struct bfa_vf_stats_s *vf_stats);
694void bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf);
695void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports);
696bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id);
697u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric);
698
699/*
700 * fabric protected interface functions
701 */
702void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs);
703void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs);
704void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs);
705void bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs);
706void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric);
707void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric);
708void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
709 struct bfa_fcs_vport_s *vport);
710void bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
711 struct bfa_fcs_vport_s *vport);
712int bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric);
713struct bfa_fcs_vport_s *bfa_fcs_fabric_vport_lookup(
714 struct bfa_fcs_fabric_s *fabric, wwn_t pwwn);
715void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs);
716void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric,
717 struct fchs_s *fchs, u16 len);
718bfa_boolean_t bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric);
719bfa_boolean_t bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric);
720enum bfa_port_type bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric);
721void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric);
722void bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric);
723bfa_status_t bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf,
724 struct bfa_fcs_s *fcs, struct bfa_lport_cfg_s *port_cfg,
725 struct bfad_vf_s *vf_drv);
726void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
727 wwn_t fabric_name);
728u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric);
729void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs);
730void bfa_fcs_port_attach(struct bfa_fcs_s *fcs);
731
732/**
733 * BFA FCS callback interfaces
734 */
735
736/**
737 * fcb Main fcs callbacks
738 */
739
740struct bfad_port_s;
741struct bfad_vf_s;
742struct bfad_vport_s;
743struct bfad_rport_s;
744
745/**
746 * lport callbacks
747 */
748struct bfad_port_s *bfa_fcb_lport_new(struct bfad_s *bfad,
749 struct bfa_fcs_lport_s *port,
750 enum bfa_lport_role roles,
751 struct bfad_vf_s *vf_drv,
752 struct bfad_vport_s *vp_drv);
753void bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
754 struct bfad_vf_s *vf_drv,
755 struct bfad_vport_s *vp_drv);
756
757/**
758 * vport callbacks
759 */
760void bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s);
761
762/**
763 * rport callbacks
764 */
765bfa_status_t bfa_fcb_rport_alloc(struct bfad_s *bfad,
766 struct bfa_fcs_rport_s **rport,
767 struct bfad_rport_s **rport_drv);
768
769/**
770 * itnim callbacks
771 */
772void bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
773 struct bfad_itnim_s **itnim_drv);
774void bfa_fcb_itnim_free(struct bfad_s *bfad,
775 struct bfad_itnim_s *itnim_drv);
776void bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv);
777void bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv);
778
779#endif /* __BFA_FCS_H__ */
diff --git a/drivers/scsi/bfa/fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c
index 6b8976ad22fa..569dfefab70d 100644
--- a/drivers/scsi/bfa/fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -19,36 +19,24 @@
19 * fcpim.c - FCP initiator mode i-t nexus state machine 19 * fcpim.c - FCP initiator mode i-t nexus state machine
20 */ 20 */
21 21
22#include <bfa.h> 22#include "bfa_fcs.h"
23#include <bfa_svc.h> 23#include "bfa_fcbuild.h"
24#include "fcs_fcpim.h" 24#include "bfad_drv.h"
25#include "fcs_rport.h" 25#include "bfad_im.h"
26#include "fcs_lport.h"
27#include "fcs_trcmod.h"
28#include "fcs_fcxp.h"
29#include "fcs.h"
30#include <fcs/bfa_fcs_fcpim.h>
31#include <fcb/bfa_fcb_fcpim.h>
32#include <aen/bfa_aen_itnim.h>
33 26
34BFA_TRC_FILE(FCS, FCPIM); 27BFA_TRC_FILE(FCS, FCPIM);
35 28
36/* 29/*
37 * forward declarations 30 * forward declarations
38 */ 31 */
39static void bfa_fcs_itnim_timeout(void *arg); 32static void bfa_fcs_itnim_timeout(void *arg);
40static void bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim); 33static void bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim);
41static void bfa_fcs_itnim_send_prli(void *itnim_cbarg, 34static void bfa_fcs_itnim_send_prli(void *itnim_cbarg,
42 struct bfa_fcxp_s *fcxp_alloced); 35 struct bfa_fcxp_s *fcxp_alloced);
43static void bfa_fcs_itnim_prli_response(void *fcsarg, 36static void bfa_fcs_itnim_prli_response(void *fcsarg,
44 struct bfa_fcxp_s *fcxp, 37 struct bfa_fcxp_s *fcxp, void *cbarg,
45 void *cbarg, 38 bfa_status_t req_status, u32 rsp_len,
46 bfa_status_t req_status, 39 u32 resid_len, struct fchs_s *rsp_fchs);
47 u32 rsp_len,
48 u32 resid_len,
49 struct fchs_s *rsp_fchs);
50static void bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
51 enum bfa_itnim_aen_event event);
52 40
53/** 41/**
54 * fcs_itnim_sm FCS itnim state machine events 42 * fcs_itnim_sm FCS itnim state machine events
@@ -61,28 +49,28 @@ enum bfa_fcs_itnim_event {
61 BFA_FCS_ITNIM_SM_RSP_OK = 4, /* good response */ 49 BFA_FCS_ITNIM_SM_RSP_OK = 4, /* good response */
62 BFA_FCS_ITNIM_SM_RSP_ERROR = 5, /* error response */ 50 BFA_FCS_ITNIM_SM_RSP_ERROR = 5, /* error response */
63 BFA_FCS_ITNIM_SM_TIMEOUT = 6, /* delay timeout */ 51 BFA_FCS_ITNIM_SM_TIMEOUT = 6, /* delay timeout */
64 BFA_FCS_ITNIM_SM_HCB_OFFLINE = 7, /* BFA online callback */ 52 BFA_FCS_ITNIM_SM_HCB_OFFLINE = 7, /* BFA online callback */
65 BFA_FCS_ITNIM_SM_HCB_ONLINE = 8, /* BFA offline callback */ 53 BFA_FCS_ITNIM_SM_HCB_ONLINE = 8, /* BFA offline callback */
66 BFA_FCS_ITNIM_SM_INITIATOR = 9, /* rport is initiator */ 54 BFA_FCS_ITNIM_SM_INITIATOR = 9, /* rport is initiator */
67 BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */ 55 BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */
68 BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */ 56 BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */
69}; 57};
70 58
71static void bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim, 59static void bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
72 enum bfa_fcs_itnim_event event); 60 enum bfa_fcs_itnim_event event);
73static void bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim, 61static void bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
74 enum bfa_fcs_itnim_event event); 62 enum bfa_fcs_itnim_event event);
75static void bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim, 63static void bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
76 enum bfa_fcs_itnim_event event); 64 enum bfa_fcs_itnim_event event);
77static void bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim, 65static void bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
78 enum bfa_fcs_itnim_event event); 66 enum bfa_fcs_itnim_event event);
79static void bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim, 67static void bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
80 enum bfa_fcs_itnim_event event); 68 enum bfa_fcs_itnim_event event);
81static void bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim, 69static void bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
82 enum bfa_fcs_itnim_event event); 70 enum bfa_fcs_itnim_event event);
83static void bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim, 71static void bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
84 enum bfa_fcs_itnim_event event); 72 enum bfa_fcs_itnim_event event);
85static void bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim, 73static void bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
86 enum bfa_fcs_itnim_event event); 74 enum bfa_fcs_itnim_event event);
87 75
88static struct bfa_sm_table_s itnim_sm_table[] = { 76static struct bfa_sm_table_s itnim_sm_table[] = {
@@ -102,7 +90,7 @@ static struct bfa_sm_table_s itnim_sm_table[] = {
102 90
103static void 91static void
104bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim, 92bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
105 enum bfa_fcs_itnim_event event) 93 enum bfa_fcs_itnim_event event)
106{ 94{
107 bfa_trc(itnim->fcs, itnim->rport->pwwn); 95 bfa_trc(itnim->fcs, itnim->rport->pwwn);
108 bfa_trc(itnim->fcs, event); 96 bfa_trc(itnim->fcs, event);
@@ -134,7 +122,7 @@ bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
134 122
135static void 123static void
136bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim, 124bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
137 enum bfa_fcs_itnim_event event) 125 enum bfa_fcs_itnim_event event)
138{ 126{
139 bfa_trc(itnim->fcs, itnim->rport->pwwn); 127 bfa_trc(itnim->fcs, itnim->rport->pwwn);
140 bfa_trc(itnim->fcs, event); 128 bfa_trc(itnim->fcs, event);
@@ -168,7 +156,7 @@ bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
168 156
169static void 157static void
170bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim, 158bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
171 enum bfa_fcs_itnim_event event) 159 enum bfa_fcs_itnim_event event)
172{ 160{
173 bfa_trc(itnim->fcs, itnim->rport->pwwn); 161 bfa_trc(itnim->fcs, itnim->rport->pwwn);
174 bfa_trc(itnim->fcs, event); 162 bfa_trc(itnim->fcs, event);
@@ -233,6 +221,7 @@ bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
233 } 221 }
234 break; 222 break;
235 223
224
236 case BFA_FCS_ITNIM_SM_OFFLINE: 225 case BFA_FCS_ITNIM_SM_OFFLINE:
237 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); 226 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
238 bfa_timer_stop(&itnim->timer); 227 bfa_timer_stop(&itnim->timer);
@@ -259,6 +248,10 @@ static void
259bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim, 248bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
260 enum bfa_fcs_itnim_event event) 249 enum bfa_fcs_itnim_event event)
261{ 250{
251 struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad;
252 char lpwwn_buf[BFA_STRING_32];
253 char rpwwn_buf[BFA_STRING_32];
254
262 bfa_trc(itnim->fcs, itnim->rport->pwwn); 255 bfa_trc(itnim->fcs, itnim->rport->pwwn);
263 bfa_trc(itnim->fcs, event); 256 bfa_trc(itnim->fcs, event);
264 257
@@ -266,7 +259,11 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
266 case BFA_FCS_ITNIM_SM_HCB_ONLINE: 259 case BFA_FCS_ITNIM_SM_HCB_ONLINE:
267 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_online); 260 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_online);
268 bfa_fcb_itnim_online(itnim->itnim_drv); 261 bfa_fcb_itnim_online(itnim->itnim_drv);
269 bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_ONLINE); 262 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port));
263 wwn2str(rpwwn_buf, itnim->rport->pwwn);
264 BFA_LOG(KERN_INFO, bfad, log_level,
265 "Target (WWN = %s) is online for initiator (WWN = %s)\n",
266 rpwwn_buf, lpwwn_buf);
270 break; 267 break;
271 268
272 case BFA_FCS_ITNIM_SM_OFFLINE: 269 case BFA_FCS_ITNIM_SM_OFFLINE:
@@ -287,8 +284,12 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
287 284
288static void 285static void
289bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim, 286bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
290 enum bfa_fcs_itnim_event event) 287 enum bfa_fcs_itnim_event event)
291{ 288{
289 struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad;
290 char lpwwn_buf[BFA_STRING_32];
291 char rpwwn_buf[BFA_STRING_32];
292
292 bfa_trc(itnim->fcs, itnim->rport->pwwn); 293 bfa_trc(itnim->fcs, itnim->rport->pwwn);
293 bfa_trc(itnim->fcs, event); 294 bfa_trc(itnim->fcs, event);
294 295
@@ -297,10 +298,16 @@ bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
297 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline); 298 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline);
298 bfa_fcb_itnim_offline(itnim->itnim_drv); 299 bfa_fcb_itnim_offline(itnim->itnim_drv);
299 bfa_itnim_offline(itnim->bfa_itnim); 300 bfa_itnim_offline(itnim->bfa_itnim);
300 if (bfa_fcs_port_is_online(itnim->rport->port) == BFA_TRUE) 301 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port));
301 bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT); 302 wwn2str(rpwwn_buf, itnim->rport->pwwn);
303 if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE)
304 BFA_LOG(KERN_ERR, bfad, log_level,
305 "Target (WWN = %s) connectivity lost for "
306 "initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf);
302 else 307 else
303 bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE); 308 BFA_LOG(KERN_INFO, bfad, log_level,
309 "Target (WWN = %s) offlined by initiator (WWN = %s)\n",
310 rpwwn_buf, lpwwn_buf);
304 break; 311 break;
305 312
306 case BFA_FCS_ITNIM_SM_DELETE: 313 case BFA_FCS_ITNIM_SM_DELETE:
@@ -343,7 +350,7 @@ bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
343 */ 350 */
344static void 351static void
345bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim, 352bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
346 enum bfa_fcs_itnim_event event) 353 enum bfa_fcs_itnim_event event)
347{ 354{
348 bfa_trc(itnim->fcs, itnim->rport->pwwn); 355 bfa_trc(itnim->fcs, itnim->rport->pwwn);
349 bfa_trc(itnim->fcs, event); 356 bfa_trc(itnim->fcs, event);
@@ -369,71 +376,34 @@ bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
369 } 376 }
370} 377}
371 378
372
373
374/**
375 * itnim_private FCS ITNIM private interfaces
376 */
377
378static void
379bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
380 enum bfa_itnim_aen_event event)
381{
382 struct bfa_fcs_rport_s *rport = itnim->rport;
383 union bfa_aen_data_u aen_data;
384 struct bfa_log_mod_s *logmod = rport->fcs->logm;
385 wwn_t lpwwn = bfa_fcs_port_get_pwwn(rport->port);
386 wwn_t rpwwn = rport->pwwn;
387 char lpwwn_ptr[BFA_STRING_32];
388 char rpwwn_ptr[BFA_STRING_32];
389
390 /*
391 * Don't post events for well known addresses
392 */
393 if (BFA_FCS_PID_IS_WKA(rport->pid))
394 return;
395
396 wwn2str(lpwwn_ptr, lpwwn);
397 wwn2str(rpwwn_ptr, rpwwn);
398
399 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_ITNIM, event),
400 rpwwn_ptr, lpwwn_ptr);
401
402 aen_data.itnim.vf_id = rport->port->fabric->vf_id;
403 aen_data.itnim.ppwwn =
404 bfa_fcs_port_get_pwwn(bfa_fcs_get_base_port(itnim->fcs));
405 aen_data.itnim.lpwwn = lpwwn;
406 aen_data.itnim.rpwwn = rpwwn;
407}
408
409static void 379static void
410bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced) 380bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced)
411{ 381{
412 struct bfa_fcs_itnim_s *itnim = itnim_cbarg; 382 struct bfa_fcs_itnim_s *itnim = itnim_cbarg;
413 struct bfa_fcs_rport_s *rport = itnim->rport; 383 struct bfa_fcs_rport_s *rport = itnim->rport;
414 struct bfa_fcs_port_s *port = rport->port; 384 struct bfa_fcs_lport_s *port = rport->port;
415 struct fchs_s fchs; 385 struct fchs_s fchs;
416 struct bfa_fcxp_s *fcxp; 386 struct bfa_fcxp_s *fcxp;
417 int len; 387 int len;
418 388
419 bfa_trc(itnim->fcs, itnim->rport->pwwn); 389 bfa_trc(itnim->fcs, itnim->rport->pwwn);
420 390
421 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 391 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
422 if (!fcxp) { 392 if (!fcxp) {
423 itnim->stats.fcxp_alloc_wait++; 393 itnim->stats.fcxp_alloc_wait++;
424 bfa_fcxp_alloc_wait(port->fcs->bfa, &itnim->fcxp_wqe, 394 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &itnim->fcxp_wqe,
425 bfa_fcs_itnim_send_prli, itnim); 395 bfa_fcs_itnim_send_prli, itnim);
426 return; 396 return;
427 } 397 }
428 itnim->fcxp = fcxp; 398 itnim->fcxp = fcxp;
429 399
430 len = fc_prli_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), itnim->rport->pid, 400 len = fc_prli_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
431 bfa_fcs_port_get_fcid(port), 0); 401 itnim->rport->pid, bfa_fcs_lport_get_fcid(port), 0);
432 402
433 bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id, port->lp_tag, 403 bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id, port->lp_tag,
434 BFA_FALSE, FC_CLASS_3, len, &fchs, 404 BFA_FALSE, FC_CLASS_3, len, &fchs,
435 bfa_fcs_itnim_prli_response, (void *)itnim, FC_MAX_PDUSZ, 405 bfa_fcs_itnim_prli_response, (void *)itnim,
436 FC_ELS_TOV); 406 FC_MAX_PDUSZ, FC_ELS_TOV);
437 407
438 itnim->stats.prli_sent++; 408 itnim->stats.prli_sent++;
439 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_FRMSENT); 409 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_FRMSENT);
@@ -444,10 +414,10 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
444 bfa_status_t req_status, u32 rsp_len, 414 bfa_status_t req_status, u32 rsp_len,
445 u32 resid_len, struct fchs_s *rsp_fchs) 415 u32 resid_len, struct fchs_s *rsp_fchs)
446{ 416{
447 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cbarg; 417 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cbarg;
448 struct fc_els_cmd_s *els_cmd; 418 struct fc_els_cmd_s *els_cmd;
449 struct fc_prli_s *prli_resp; 419 struct fc_prli_s *prli_resp;
450 struct fc_ls_rjt_s *ls_rjt; 420 struct fc_ls_rjt_s *ls_rjt;
451 struct fc_prli_params_s *sparams; 421 struct fc_prli_params_s *sparams;
452 422
453 bfa_trc(itnim->fcs, req_status); 423 bfa_trc(itnim->fcs, req_status);
@@ -475,7 +445,7 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
475 if (prli_resp->parampage.servparams.initiator) { 445 if (prli_resp->parampage.servparams.initiator) {
476 bfa_trc(itnim->fcs, prli_resp->parampage.type); 446 bfa_trc(itnim->fcs, prli_resp->parampage.type);
477 itnim->rport->scsi_function = 447 itnim->rport->scsi_function =
478 BFA_RPORT_INITIATOR; 448 BFA_RPORT_INITIATOR;
479 itnim->stats.prli_rsp_acc++; 449 itnim->stats.prli_rsp_acc++;
480 bfa_sm_send_event(itnim, 450 bfa_sm_send_event(itnim,
481 BFA_FCS_ITNIM_SM_RSP_OK); 451 BFA_FCS_ITNIM_SM_RSP_OK);
@@ -488,10 +458,10 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
488 itnim->rport->scsi_function = BFA_RPORT_TARGET; 458 itnim->rport->scsi_function = BFA_RPORT_TARGET;
489 459
490 sparams = &prli_resp->parampage.servparams; 460 sparams = &prli_resp->parampage.servparams;
491 itnim->seq_rec = sparams->retry; 461 itnim->seq_rec = sparams->retry;
492 itnim->rec_support = sparams->rec_support; 462 itnim->rec_support = sparams->rec_support;
493 itnim->task_retry_id = sparams->task_retry_id; 463 itnim->task_retry_id = sparams->task_retry_id;
494 itnim->conf_comp = sparams->confirm; 464 itnim->conf_comp = sparams->confirm;
495 465
496 itnim->stats.prli_rsp_acc++; 466 itnim->stats.prli_rsp_acc++;
497 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_OK); 467 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_OK);
@@ -509,7 +479,7 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
509static void 479static void
510bfa_fcs_itnim_timeout(void *arg) 480bfa_fcs_itnim_timeout(void *arg)
511{ 481{
512 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)arg; 482 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) arg;
513 483
514 itnim->stats.timeout++; 484 itnim->stats.timeout++;
515 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_TIMEOUT); 485 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_TIMEOUT);
@@ -529,16 +499,16 @@ bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim)
529 */ 499 */
530 500
531/** 501/**
532 * Called by rport when a new rport is created. 502 * Called by rport when a new rport is created.
533 * 503 *
534 * @param[in] rport - remote port. 504 * @param[in] rport - remote port.
535 */ 505 */
536struct bfa_fcs_itnim_s * 506struct bfa_fcs_itnim_s *
537bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport) 507bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
538{ 508{
539 struct bfa_fcs_port_s *port = rport->port; 509 struct bfa_fcs_lport_s *port = rport->port;
540 struct bfa_fcs_itnim_s *itnim; 510 struct bfa_fcs_itnim_s *itnim;
541 struct bfad_itnim_s *itnim_drv; 511 struct bfad_itnim_s *itnim_drv;
542 struct bfa_itnim_s *bfa_itnim; 512 struct bfa_itnim_s *bfa_itnim;
543 513
544 /* 514 /*
@@ -560,7 +530,8 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
560 /* 530 /*
561 * call BFA to create the itnim 531 * call BFA to create the itnim
562 */ 532 */
563 bfa_itnim = bfa_itnim_create(port->fcs->bfa, rport->bfa_rport, itnim); 533 bfa_itnim =
534 bfa_itnim_create(port->fcs->bfa, rport->bfa_rport, itnim);
564 535
565 if (bfa_itnim == NULL) { 536 if (bfa_itnim == NULL) {
566 bfa_trc(port->fcs, rport->pwwn); 537 bfa_trc(port->fcs, rport->pwwn);
@@ -569,10 +540,10 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
569 return NULL; 540 return NULL;
570 } 541 }
571 542
572 itnim->bfa_itnim = bfa_itnim; 543 itnim->bfa_itnim = bfa_itnim;
573 itnim->seq_rec = BFA_FALSE; 544 itnim->seq_rec = BFA_FALSE;
574 itnim->rec_support = BFA_FALSE; 545 itnim->rec_support = BFA_FALSE;
575 itnim->conf_comp = BFA_FALSE; 546 itnim->conf_comp = BFA_FALSE;
576 itnim->task_retry_id = BFA_FALSE; 547 itnim->task_retry_id = BFA_FALSE;
577 548
578 /* 549 /*
@@ -584,7 +555,7 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
584} 555}
585 556
586/** 557/**
587 * Called by rport to delete the instance of FCPIM. 558 * Called by rport to delete the instance of FCPIM.
588 * 559 *
589 * @param[in] rport - remote port. 560 * @param[in] rport - remote port.
590 */ 561 */
@@ -607,8 +578,8 @@ bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim)
607 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_ONLINE); 578 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_ONLINE);
608 } else { 579 } else {
609 /* 580 /*
610 * For well known addresses, we set the itnim to initiator 581 * For well known addresses, we set the itnim to initiator
611 * state 582 * state
612 */ 583 */
613 itnim->stats.initiator++; 584 itnim->stats.initiator++;
614 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR); 585 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR);
@@ -651,7 +622,6 @@ bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim)
651 622
652 default: 623 default:
653 return BFA_STATUS_NO_FCPIM_NEXUS; 624 return BFA_STATUS_NO_FCPIM_NEXUS;
654
655 } 625 }
656} 626}
657 627
@@ -661,7 +631,7 @@ bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim)
661void 631void
662bfa_cb_itnim_online(void *cbarg) 632bfa_cb_itnim_online(void *cbarg)
663{ 633{
664 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cbarg; 634 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cbarg;
665 635
666 bfa_trc(itnim->fcs, itnim->rport->pwwn); 636 bfa_trc(itnim->fcs, itnim->rport->pwwn);
667 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE); 637 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE);
@@ -673,7 +643,7 @@ bfa_cb_itnim_online(void *cbarg)
673void 643void
674bfa_cb_itnim_offline(void *cb_arg) 644bfa_cb_itnim_offline(void *cb_arg)
675{ 645{
676 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg; 646 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg;
677 647
678 bfa_trc(itnim->fcs, itnim->rport->pwwn); 648 bfa_trc(itnim->fcs, itnim->rport->pwwn);
679 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE); 649 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE);
@@ -686,7 +656,7 @@ bfa_cb_itnim_offline(void *cb_arg)
686void 656void
687bfa_cb_itnim_tov_begin(void *cb_arg) 657bfa_cb_itnim_tov_begin(void *cb_arg)
688{ 658{
689 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg; 659 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg;
690 660
691 bfa_trc(itnim->fcs, itnim->rport->pwwn); 661 bfa_trc(itnim->fcs, itnim->rport->pwwn);
692} 662}
@@ -697,14 +667,15 @@ bfa_cb_itnim_tov_begin(void *cb_arg)
697void 667void
698bfa_cb_itnim_tov(void *cb_arg) 668bfa_cb_itnim_tov(void *cb_arg)
699{ 669{
700 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg; 670 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg;
671 struct bfad_itnim_s *itnim_drv = itnim->itnim_drv;
701 672
702 bfa_trc(itnim->fcs, itnim->rport->pwwn); 673 bfa_trc(itnim->fcs, itnim->rport->pwwn);
703 bfa_fcb_itnim_tov(itnim->itnim_drv); 674 itnim_drv->state = ITNIM_STATE_TIMEOUT;
704} 675}
705 676
706/** 677/**
707 * BFA notification to FCS/driver for second level error recovery. 678 * BFA notification to FCS/driver for second level error recovery.
708 * 679 *
709 * Atleast one I/O request has timedout and target is unresponsive to 680 * Atleast one I/O request has timedout and target is unresponsive to
710 * repeated abort requests. Second level error recovery should be initiated 681 * repeated abort requests. Second level error recovery should be initiated
@@ -713,7 +684,7 @@ bfa_cb_itnim_tov(void *cb_arg)
713void 684void
714bfa_cb_itnim_sler(void *cb_arg) 685bfa_cb_itnim_sler(void *cb_arg)
715{ 686{
716 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg; 687 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg;
717 688
718 itnim->stats.sler++; 689 itnim->stats.sler++;
719 bfa_trc(itnim->fcs, itnim->rport->pwwn); 690 bfa_trc(itnim->fcs, itnim->rport->pwwn);
@@ -721,7 +692,7 @@ bfa_cb_itnim_sler(void *cb_arg)
721} 692}
722 693
723struct bfa_fcs_itnim_s * 694struct bfa_fcs_itnim_s *
724bfa_fcs_itnim_lookup(struct bfa_fcs_port_s *port, wwn_t rpwwn) 695bfa_fcs_itnim_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
725{ 696{
726 struct bfa_fcs_rport_s *rport; 697 struct bfa_fcs_rport_s *rport;
727 rport = bfa_fcs_rport_lookup(port, rpwwn); 698 rport = bfa_fcs_rport_lookup(port, rpwwn);
@@ -734,7 +705,7 @@ bfa_fcs_itnim_lookup(struct bfa_fcs_port_s *port, wwn_t rpwwn)
734} 705}
735 706
736bfa_status_t 707bfa_status_t
737bfa_fcs_itnim_attr_get(struct bfa_fcs_port_s *port, wwn_t rpwwn, 708bfa_fcs_itnim_attr_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
738 struct bfa_itnim_attr_s *attr) 709 struct bfa_itnim_attr_s *attr)
739{ 710{
740 struct bfa_fcs_itnim_s *itnim = NULL; 711 struct bfa_fcs_itnim_s *itnim = NULL;
@@ -744,18 +715,16 @@ bfa_fcs_itnim_attr_get(struct bfa_fcs_port_s *port, wwn_t rpwwn,
744 if (itnim == NULL) 715 if (itnim == NULL)
745 return BFA_STATUS_NO_FCPIM_NEXUS; 716 return BFA_STATUS_NO_FCPIM_NEXUS;
746 717
747 attr->state = bfa_sm_to_state(itnim_sm_table, itnim->sm); 718 attr->state = bfa_sm_to_state(itnim_sm_table, itnim->sm);
748 attr->retry = itnim->seq_rec; 719 attr->retry = itnim->seq_rec;
749 attr->rec_support = itnim->rec_support; 720 attr->rec_support = itnim->rec_support;
750 attr->conf_comp = itnim->conf_comp; 721 attr->conf_comp = itnim->conf_comp;
751 attr->task_retry_id = itnim->task_retry_id; 722 attr->task_retry_id = itnim->task_retry_id;
752 bfa_os_memset(&attr->io_latency, 0, sizeof(struct bfa_itnim_latency_s));
753
754 return BFA_STATUS_OK; 723 return BFA_STATUS_OK;
755} 724}
756 725
757bfa_status_t 726bfa_status_t
758bfa_fcs_itnim_stats_get(struct bfa_fcs_port_s *port, wwn_t rpwwn, 727bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
759 struct bfa_itnim_stats_s *stats) 728 struct bfa_itnim_stats_s *stats)
760{ 729{
761 struct bfa_fcs_itnim_s *itnim = NULL; 730 struct bfa_fcs_itnim_s *itnim = NULL;
@@ -773,7 +742,7 @@ bfa_fcs_itnim_stats_get(struct bfa_fcs_port_s *port, wwn_t rpwwn,
773} 742}
774 743
775bfa_status_t 744bfa_status_t
776bfa_fcs_itnim_stats_clear(struct bfa_fcs_port_s *port, wwn_t rpwwn) 745bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
777{ 746{
778 struct bfa_fcs_itnim_s *itnim = NULL; 747 struct bfa_fcs_itnim_s *itnim = NULL;
779 748
@@ -789,10 +758,10 @@ bfa_fcs_itnim_stats_clear(struct bfa_fcs_port_s *port, wwn_t rpwwn)
789} 758}
790 759
791void 760void
792bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs, 761bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim,
793 u16 len) 762 struct fchs_s *fchs, u16 len)
794{ 763{
795 struct fc_els_cmd_s *els_cmd; 764 struct fc_els_cmd_s *els_cmd;
796 765
797 bfa_trc(itnim->fcs, fchs->type); 766 bfa_trc(itnim->fcs, fchs->type);
798 767
@@ -812,13 +781,3 @@ bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs,
812 bfa_assert(0); 781 bfa_assert(0);
813 } 782 }
814} 783}
815
816void
817bfa_fcs_itnim_pause(struct bfa_fcs_itnim_s *itnim)
818{
819}
820
821void
822bfa_fcs_itnim_resume(struct bfa_fcs_itnim_s *itnim)
823{
824}
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 35df20e68a52..b522bf30247a 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -16,23 +16,13 @@
16 */ 16 */
17 17
18/** 18/**
19 * bfa_fcs_port.c BFA FCS port 19 * bfa_fcs_lport.c BFA FCS port
20 */ 20 */
21 21
22#include <fcs/bfa_fcs.h> 22#include "bfa_fcs.h"
23#include <fcs/bfa_fcs_lport.h> 23#include "bfa_fcbuild.h"
24#include <fcs/bfa_fcs_rport.h> 24#include "bfa_fc.h"
25#include <fcb/bfa_fcb_port.h> 25#include "bfad_drv.h"
26#include <bfa_svc.h>
27#include <log/bfa_log_fcs.h>
28#include "fcs.h"
29#include "fcs_lport.h"
30#include "fcs_vport.h"
31#include "fcs_rport.h"
32#include "fcs_fcxp.h"
33#include "fcs_trcmod.h"
34#include "lport_priv.h"
35#include <aen/bfa_aen_lport.h>
36 26
37BFA_TRC_FILE(FCS, PORT); 27BFA_TRC_FILE(FCS, PORT);
38 28
@@ -40,49 +30,53 @@ BFA_TRC_FILE(FCS, PORT);
40 * Forward declarations 30 * Forward declarations
41 */ 31 */
42 32
43static void bfa_fcs_port_aen_post(struct bfa_fcs_port_s *port, 33static void bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port,
44 enum bfa_lport_aen_event event); 34 struct fchs_s *rx_fchs, u8 reason_code,
45static void bfa_fcs_port_send_ls_rjt(struct bfa_fcs_port_s *port, 35 u8 reason_code_expl);
46 struct fchs_s *rx_fchs, u8 reason_code, 36static void bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
47 u8 reason_code_expl); 37 struct fchs_s *rx_fchs, struct fc_logi_s *plogi);
48static void bfa_fcs_port_plogi(struct bfa_fcs_port_s *port, 38static void bfa_fcs_lport_online_actions(struct bfa_fcs_lport_s *port);
49 struct fchs_s *rx_fchs, 39static void bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port);
50 struct fc_logi_s *plogi); 40static void bfa_fcs_lport_unknown_init(struct bfa_fcs_lport_s *port);
51static void bfa_fcs_port_online_actions(struct bfa_fcs_port_s *port); 41static void bfa_fcs_lport_unknown_online(struct bfa_fcs_lport_s *port);
52static void bfa_fcs_port_offline_actions(struct bfa_fcs_port_s *port); 42static void bfa_fcs_lport_unknown_offline(struct bfa_fcs_lport_s *port);
53static void bfa_fcs_port_unknown_init(struct bfa_fcs_port_s *port); 43static void bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port);
54static void bfa_fcs_port_unknown_online(struct bfa_fcs_port_s *port); 44static void bfa_fcs_lport_echo(struct bfa_fcs_lport_s *port,
55static void bfa_fcs_port_unknown_offline(struct bfa_fcs_port_s *port);
56static void bfa_fcs_port_deleted(struct bfa_fcs_port_s *port);
57static void bfa_fcs_port_echo(struct bfa_fcs_port_s *port,
58 struct fchs_s *rx_fchs, 45 struct fchs_s *rx_fchs,
59 struct fc_echo_s *echo, u16 len); 46 struct fc_echo_s *echo, u16 len);
60static void bfa_fcs_port_rnid(struct bfa_fcs_port_s *port, 47static void bfa_fcs_lport_rnid(struct bfa_fcs_lport_s *port,
61 struct fchs_s *rx_fchs, 48 struct fchs_s *rx_fchs,
62 struct fc_rnid_cmd_s *rnid, u16 len); 49 struct fc_rnid_cmd_s *rnid, u16 len);
63static void bfa_fs_port_get_gen_topo_data(struct bfa_fcs_port_s *port, 50static void bfa_fs_port_get_gen_topo_data(struct bfa_fcs_lport_s *port,
64 struct fc_rnid_general_topology_data_s *gen_topo_data); 51 struct fc_rnid_general_topology_data_s *gen_topo_data);
65 52
53static void bfa_fcs_lport_fab_init(struct bfa_fcs_lport_s *port);
54static void bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port);
55static void bfa_fcs_lport_fab_offline(struct bfa_fcs_lport_s *port);
56
57static void bfa_fcs_lport_n2n_init(struct bfa_fcs_lport_s *port);
58static void bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port);
59static void bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port);
60
66static struct { 61static struct {
67 void (*init) (struct bfa_fcs_port_s *port); 62 void (*init) (struct bfa_fcs_lport_s *port);
68 void (*online) (struct bfa_fcs_port_s *port); 63 void (*online) (struct bfa_fcs_lport_s *port);
69 void (*offline) (struct bfa_fcs_port_s *port); 64 void (*offline) (struct bfa_fcs_lport_s *port);
70} __port_action[] = { 65} __port_action[] = {
71 { 66 {
72 bfa_fcs_port_unknown_init, bfa_fcs_port_unknown_online, 67 bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
73 bfa_fcs_port_unknown_offline}, { 68 bfa_fcs_lport_unknown_offline}, {
74 bfa_fcs_port_fab_init, bfa_fcs_port_fab_online, 69 bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
75 bfa_fcs_port_fab_offline}, { 70 bfa_fcs_lport_fab_offline}, {
76 bfa_fcs_port_loop_init, bfa_fcs_port_loop_online, 71 bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
77 bfa_fcs_port_loop_offline}, { 72 bfa_fcs_lport_n2n_offline},
78bfa_fcs_port_n2n_init, bfa_fcs_port_n2n_online, 73 };
79 bfa_fcs_port_n2n_offline},};
80 74
81/** 75/**
82 * fcs_port_sm FCS logical port state machine 76 * fcs_port_sm FCS logical port state machine
83 */ 77 */
84 78
85enum bfa_fcs_port_event { 79enum bfa_fcs_lport_event {
86 BFA_FCS_PORT_SM_CREATE = 1, 80 BFA_FCS_PORT_SM_CREATE = 1,
87 BFA_FCS_PORT_SM_ONLINE = 2, 81 BFA_FCS_PORT_SM_ONLINE = 2,
88 BFA_FCS_PORT_SM_OFFLINE = 3, 82 BFA_FCS_PORT_SM_OFFLINE = 3,
@@ -90,27 +84,28 @@ enum bfa_fcs_port_event {
90 BFA_FCS_PORT_SM_DELRPORT = 5, 84 BFA_FCS_PORT_SM_DELRPORT = 5,
91}; 85};
92 86
93static void bfa_fcs_port_sm_uninit(struct bfa_fcs_port_s *port, 87static void bfa_fcs_lport_sm_uninit(struct bfa_fcs_lport_s *port,
94 enum bfa_fcs_port_event event); 88 enum bfa_fcs_lport_event event);
95static void bfa_fcs_port_sm_init(struct bfa_fcs_port_s *port, 89static void bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port,
96 enum bfa_fcs_port_event event); 90 enum bfa_fcs_lport_event event);
97static void bfa_fcs_port_sm_online(struct bfa_fcs_port_s *port, 91static void bfa_fcs_lport_sm_online(struct bfa_fcs_lport_s *port,
98 enum bfa_fcs_port_event event); 92 enum bfa_fcs_lport_event event);
99static void bfa_fcs_port_sm_offline(struct bfa_fcs_port_s *port, 93static void bfa_fcs_lport_sm_offline(struct bfa_fcs_lport_s *port,
100 enum bfa_fcs_port_event event); 94 enum bfa_fcs_lport_event event);
101static void bfa_fcs_port_sm_deleting(struct bfa_fcs_port_s *port, 95static void bfa_fcs_lport_sm_deleting(struct bfa_fcs_lport_s *port,
102 enum bfa_fcs_port_event event); 96 enum bfa_fcs_lport_event event);
103 97
104static void 98static void
105bfa_fcs_port_sm_uninit(struct bfa_fcs_port_s *port, 99bfa_fcs_lport_sm_uninit(
106 enum bfa_fcs_port_event event) 100 struct bfa_fcs_lport_s *port,
101 enum bfa_fcs_lport_event event)
107{ 102{
108 bfa_trc(port->fcs, port->port_cfg.pwwn); 103 bfa_trc(port->fcs, port->port_cfg.pwwn);
109 bfa_trc(port->fcs, event); 104 bfa_trc(port->fcs, event);
110 105
111 switch (event) { 106 switch (event) {
112 case BFA_FCS_PORT_SM_CREATE: 107 case BFA_FCS_PORT_SM_CREATE:
113 bfa_sm_set_state(port, bfa_fcs_port_sm_init); 108 bfa_sm_set_state(port, bfa_fcs_lport_sm_init);
114 break; 109 break;
115 110
116 default: 111 default:
@@ -119,20 +114,21 @@ bfa_fcs_port_sm_uninit(struct bfa_fcs_port_s *port,
119} 114}
120 115
121static void 116static void
122bfa_fcs_port_sm_init(struct bfa_fcs_port_s *port, enum bfa_fcs_port_event event) 117bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port,
118 enum bfa_fcs_lport_event event)
123{ 119{
124 bfa_trc(port->fcs, port->port_cfg.pwwn); 120 bfa_trc(port->fcs, port->port_cfg.pwwn);
125 bfa_trc(port->fcs, event); 121 bfa_trc(port->fcs, event);
126 122
127 switch (event) { 123 switch (event) {
128 case BFA_FCS_PORT_SM_ONLINE: 124 case BFA_FCS_PORT_SM_ONLINE:
129 bfa_sm_set_state(port, bfa_fcs_port_sm_online); 125 bfa_sm_set_state(port, bfa_fcs_lport_sm_online);
130 bfa_fcs_port_online_actions(port); 126 bfa_fcs_lport_online_actions(port);
131 break; 127 break;
132 128
133 case BFA_FCS_PORT_SM_DELETE: 129 case BFA_FCS_PORT_SM_DELETE:
134 bfa_sm_set_state(port, bfa_fcs_port_sm_uninit); 130 bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit);
135 bfa_fcs_port_deleted(port); 131 bfa_fcs_lport_deleted(port);
136 break; 132 break;
137 133
138 case BFA_FCS_PORT_SM_OFFLINE: 134 case BFA_FCS_PORT_SM_OFFLINE:
@@ -144,19 +140,20 @@ bfa_fcs_port_sm_init(struct bfa_fcs_port_s *port, enum bfa_fcs_port_event event)
144} 140}
145 141
146static void 142static void
147bfa_fcs_port_sm_online(struct bfa_fcs_port_s *port, 143bfa_fcs_lport_sm_online(
148 enum bfa_fcs_port_event event) 144 struct bfa_fcs_lport_s *port,
145 enum bfa_fcs_lport_event event)
149{ 146{
150 struct bfa_fcs_rport_s *rport; 147 struct bfa_fcs_rport_s *rport;
151 struct list_head *qe, *qen; 148 struct list_head *qe, *qen;
152 149
153 bfa_trc(port->fcs, port->port_cfg.pwwn); 150 bfa_trc(port->fcs, port->port_cfg.pwwn);
154 bfa_trc(port->fcs, event); 151 bfa_trc(port->fcs, event);
155 152
156 switch (event) { 153 switch (event) {
157 case BFA_FCS_PORT_SM_OFFLINE: 154 case BFA_FCS_PORT_SM_OFFLINE:
158 bfa_sm_set_state(port, bfa_fcs_port_sm_offline); 155 bfa_sm_set_state(port, bfa_fcs_lport_sm_offline);
159 bfa_fcs_port_offline_actions(port); 156 bfa_fcs_lport_offline_actions(port);
160 break; 157 break;
161 158
162 case BFA_FCS_PORT_SM_DELETE: 159 case BFA_FCS_PORT_SM_DELETE:
@@ -164,12 +161,12 @@ bfa_fcs_port_sm_online(struct bfa_fcs_port_s *port,
164 __port_action[port->fabric->fab_type].offline(port); 161 __port_action[port->fabric->fab_type].offline(port);
165 162
166 if (port->num_rports == 0) { 163 if (port->num_rports == 0) {
167 bfa_sm_set_state(port, bfa_fcs_port_sm_uninit); 164 bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit);
168 bfa_fcs_port_deleted(port); 165 bfa_fcs_lport_deleted(port);
169 } else { 166 } else {
170 bfa_sm_set_state(port, bfa_fcs_port_sm_deleting); 167 bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting);
171 list_for_each_safe(qe, qen, &port->rport_q) { 168 list_for_each_safe(qe, qen, &port->rport_q) {
172 rport = (struct bfa_fcs_rport_s *)qe; 169 rport = (struct bfa_fcs_rport_s *) qe;
173 bfa_fcs_rport_delete(rport); 170 bfa_fcs_rport_delete(rport);
174 } 171 }
175 } 172 }
@@ -184,29 +181,30 @@ bfa_fcs_port_sm_online(struct bfa_fcs_port_s *port,
184} 181}
185 182
186static void 183static void
187bfa_fcs_port_sm_offline(struct bfa_fcs_port_s *port, 184bfa_fcs_lport_sm_offline(
188 enum bfa_fcs_port_event event) 185 struct bfa_fcs_lport_s *port,
186 enum bfa_fcs_lport_event event)
189{ 187{
190 struct bfa_fcs_rport_s *rport; 188 struct bfa_fcs_rport_s *rport;
191 struct list_head *qe, *qen; 189 struct list_head *qe, *qen;
192 190
193 bfa_trc(port->fcs, port->port_cfg.pwwn); 191 bfa_trc(port->fcs, port->port_cfg.pwwn);
194 bfa_trc(port->fcs, event); 192 bfa_trc(port->fcs, event);
195 193
196 switch (event) { 194 switch (event) {
197 case BFA_FCS_PORT_SM_ONLINE: 195 case BFA_FCS_PORT_SM_ONLINE:
198 bfa_sm_set_state(port, bfa_fcs_port_sm_online); 196 bfa_sm_set_state(port, bfa_fcs_lport_sm_online);
199 bfa_fcs_port_online_actions(port); 197 bfa_fcs_lport_online_actions(port);
200 break; 198 break;
201 199
202 case BFA_FCS_PORT_SM_DELETE: 200 case BFA_FCS_PORT_SM_DELETE:
203 if (port->num_rports == 0) { 201 if (port->num_rports == 0) {
204 bfa_sm_set_state(port, bfa_fcs_port_sm_uninit); 202 bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit);
205 bfa_fcs_port_deleted(port); 203 bfa_fcs_lport_deleted(port);
206 } else { 204 } else {
207 bfa_sm_set_state(port, bfa_fcs_port_sm_deleting); 205 bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting);
208 list_for_each_safe(qe, qen, &port->rport_q) { 206 list_for_each_safe(qe, qen, &port->rport_q) {
209 rport = (struct bfa_fcs_rport_s *)qe; 207 rport = (struct bfa_fcs_rport_s *) qe;
210 bfa_fcs_rport_delete(rport); 208 bfa_fcs_rport_delete(rport);
211 } 209 }
212 } 210 }
@@ -222,8 +220,9 @@ bfa_fcs_port_sm_offline(struct bfa_fcs_port_s *port,
222} 220}
223 221
224static void 222static void
225bfa_fcs_port_sm_deleting(struct bfa_fcs_port_s *port, 223bfa_fcs_lport_sm_deleting(
226 enum bfa_fcs_port_event event) 224 struct bfa_fcs_lport_s *port,
225 enum bfa_fcs_lport_event event)
227{ 226{
228 bfa_trc(port->fcs, port->port_cfg.pwwn); 227 bfa_trc(port->fcs, port->port_cfg.pwwn);
229 bfa_trc(port->fcs, event); 228 bfa_trc(port->fcs, event);
@@ -231,8 +230,8 @@ bfa_fcs_port_sm_deleting(struct bfa_fcs_port_s *port,
231 switch (event) { 230 switch (event) {
232 case BFA_FCS_PORT_SM_DELRPORT: 231 case BFA_FCS_PORT_SM_DELRPORT:
233 if (port->num_rports == 0) { 232 if (port->num_rports == 0) {
234 bfa_sm_set_state(port, bfa_fcs_port_sm_uninit); 233 bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit);
235 bfa_fcs_port_deleted(port); 234 bfa_fcs_lport_deleted(port);
236 } 235 }
237 break; 236 break;
238 237
@@ -241,74 +240,44 @@ bfa_fcs_port_sm_deleting(struct bfa_fcs_port_s *port,
241 } 240 }
242} 241}
243 242
244
245
246/** 243/**
247 * fcs_port_pvt 244 * fcs_port_pvt
248 */ 245 */
249 246
250/**
251 * Send AEN notification
252 */
253static void
254bfa_fcs_port_aen_post(struct bfa_fcs_port_s *port,
255 enum bfa_lport_aen_event event)
256{
257 union bfa_aen_data_u aen_data;
258 struct bfa_log_mod_s *logmod = port->fcs->logm;
259 enum bfa_port_role role = port->port_cfg.roles;
260 wwn_t lpwwn = bfa_fcs_port_get_pwwn(port);
261 char lpwwn_ptr[BFA_STRING_32];
262 char *role_str[BFA_PORT_ROLE_FCP_MAX / 2 + 1] =
263 { "Initiator", "Target", "IPFC" };
264
265 wwn2str(lpwwn_ptr, lpwwn);
266
267 bfa_assert(role <= BFA_PORT_ROLE_FCP_MAX);
268
269 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, event), lpwwn_ptr,
270 role_str[role/2]);
271
272 aen_data.lport.vf_id = port->fabric->vf_id;
273 aen_data.lport.roles = role;
274 aen_data.lport.ppwwn =
275 bfa_fcs_port_get_pwwn(bfa_fcs_get_base_port(port->fcs));
276 aen_data.lport.lpwwn = lpwwn;
277}
278
279/* 247/*
280 * Send a LS reject 248 * Send a LS reject
281 */ 249 */
282static void 250static void
283bfa_fcs_port_send_ls_rjt(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs, 251bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
284 u8 reason_code, u8 reason_code_expl) 252 u8 reason_code, u8 reason_code_expl)
285{ 253{
286 struct fchs_s fchs; 254 struct fchs_s fchs;
287 struct bfa_fcxp_s *fcxp; 255 struct bfa_fcxp_s *fcxp;
288 struct bfa_rport_s *bfa_rport = NULL; 256 struct bfa_rport_s *bfa_rport = NULL;
289 int len; 257 int len;
290 258
259 bfa_trc(port->fcs, rx_fchs->d_id);
291 bfa_trc(port->fcs, rx_fchs->s_id); 260 bfa_trc(port->fcs, rx_fchs->s_id);
292 261
293 fcxp = bfa_fcs_fcxp_alloc(port->fcs); 262 fcxp = bfa_fcs_fcxp_alloc(port->fcs);
294 if (!fcxp) 263 if (!fcxp)
295 return; 264 return;
296 265
297 len = fc_ls_rjt_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, 266 len = fc_ls_rjt_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
298 bfa_fcs_port_get_fcid(port), rx_fchs->ox_id, 267 rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
299 reason_code, reason_code_expl); 268 rx_fchs->ox_id, reason_code, reason_code_expl);
300 269
301 bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag, 270 bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
302 BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, 271 BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
303 FC_MAX_PDUSZ, 0); 272 FC_MAX_PDUSZ, 0);
304} 273}
305 274
306/** 275/**
307 * Process incoming plogi from a remote port. 276 * Process incoming plogi from a remote port.
308 */ 277 */
309static void 278static void
310bfa_fcs_port_plogi(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs, 279bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
311 struct fc_logi_s *plogi) 280 struct fchs_s *rx_fchs, struct fc_logi_s *plogi)
312{ 281{
313 struct bfa_fcs_rport_s *rport; 282 struct bfa_fcs_rport_s *rport;
314 283
@@ -328,46 +297,40 @@ bfa_fcs_port_plogi(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs,
328 /* 297 /*
329 * send a LS reject 298 * send a LS reject
330 */ 299 */
331 bfa_fcs_port_send_ls_rjt(port, rx_fchs, 300 bfa_fcs_lport_send_ls_rjt(port, rx_fchs,
332 FC_LS_RJT_RSN_PROTOCOL_ERROR, 301 FC_LS_RJT_RSN_PROTOCOL_ERROR,
333 FC_LS_RJT_EXP_SPARMS_ERR_OPTIONS); 302 FC_LS_RJT_EXP_SPARMS_ERR_OPTIONS);
334 return; 303 return;
335 } 304 }
336 305
337 /** 306 /**
338* Direct Attach P2P mode : verify address assigned by the r-port. 307 * Direct Attach P2P mode : verify address assigned by the r-port.
339 */ 308 */
340 if ((!bfa_fcs_fabric_is_switched(port->fabric)) 309 if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
341 && 310 (memcmp((void *)&bfa_fcs_lport_get_pwwn(port),
342 (memcmp 311 (void *)&plogi->port_name, sizeof(wwn_t)) < 0)) {
343 ((void *)&bfa_fcs_port_get_pwwn(port), (void *)&plogi->port_name,
344 sizeof(wwn_t)) < 0)) {
345 if (BFA_FCS_PID_IS_WKA(rx_fchs->d_id)) { 312 if (BFA_FCS_PID_IS_WKA(rx_fchs->d_id)) {
346 /* 313 /* Address assigned to us cannot be a WKA */
347 * Address assigned to us cannot be a WKA 314 bfa_fcs_lport_send_ls_rjt(port, rx_fchs,
348 */
349 bfa_fcs_port_send_ls_rjt(port, rx_fchs,
350 FC_LS_RJT_RSN_PROTOCOL_ERROR, 315 FC_LS_RJT_RSN_PROTOCOL_ERROR,
351 FC_LS_RJT_EXP_INVALID_NPORT_ID); 316 FC_LS_RJT_EXP_INVALID_NPORT_ID);
352 return; 317 return;
353 } 318 }
354 port->pid = rx_fchs->d_id; 319 port->pid = rx_fchs->d_id;
355 } 320 }
356 321
357 /** 322 /**
358 * First, check if we know the device by pwwn. 323 * First, check if we know the device by pwwn.
359 */ 324 */
360 rport = bfa_fcs_port_get_rport_by_pwwn(port, plogi->port_name); 325 rport = bfa_fcs_lport_get_rport_by_pwwn(port, plogi->port_name);
361 if (rport) { 326 if (rport) {
362 /** 327 /**
363 * Direct Attach P2P mode: handle address assigned by the rport. 328 * Direct Attach P2P mode : handle address assigned by r-port.
364 */ 329 */
365 if ((!bfa_fcs_fabric_is_switched(port->fabric)) 330 if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
366 && 331 (memcmp((void *)&bfa_fcs_lport_get_pwwn(port),
367 (memcmp 332 (void *)&plogi->port_name, sizeof(wwn_t)) < 0)) {
368 ((void *)&bfa_fcs_port_get_pwwn(port), 333 port->pid = rx_fchs->d_id;
369 (void *)&plogi->port_name, sizeof(wwn_t)) < 0)) {
370 port->pid = rx_fchs->d_id;
371 rport->pid = rx_fchs->s_id; 334 rport->pid = rx_fchs->s_id;
372 } 335 }
373 bfa_fcs_rport_plogi(rport, rx_fchs, plogi); 336 bfa_fcs_rport_plogi(rport, rx_fchs, plogi);
@@ -377,7 +340,7 @@ bfa_fcs_port_plogi(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs,
377 /** 340 /**
378 * Next, lookup rport by PID. 341 * Next, lookup rport by PID.
379 */ 342 */
380 rport = bfa_fcs_port_get_rport_by_pid(port, rx_fchs->s_id); 343 rport = bfa_fcs_lport_get_rport_by_pid(port, rx_fchs->s_id);
381 if (!rport) { 344 if (!rport) {
382 /** 345 /**
383 * Inbound PLOGI from a new device. 346 * Inbound PLOGI from a new device.
@@ -416,39 +379,40 @@ bfa_fcs_port_plogi(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs,
416 * Since it does not require a login, it is processed here. 379 * Since it does not require a login, it is processed here.
417 */ 380 */
418static void 381static void
419bfa_fcs_port_echo(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs, 382bfa_fcs_lport_echo(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
420 struct fc_echo_s *echo, u16 rx_len) 383 struct fc_echo_s *echo, u16 rx_len)
421{ 384{
422 struct fchs_s fchs; 385 struct fchs_s fchs;
423 struct bfa_fcxp_s *fcxp; 386 struct bfa_fcxp_s *fcxp;
424 struct bfa_rport_s *bfa_rport = NULL; 387 struct bfa_rport_s *bfa_rport = NULL;
425 int len, pyld_len; 388 int len, pyld_len;
426 389
427 bfa_trc(port->fcs, rx_fchs->s_id); 390 bfa_trc(port->fcs, rx_fchs->s_id);
428 bfa_trc(port->fcs, rx_fchs->d_id); 391 bfa_trc(port->fcs, rx_fchs->d_id);
429 bfa_trc(port->fcs, rx_len);
430 392
431 fcxp = bfa_fcs_fcxp_alloc(port->fcs); 393 fcxp = bfa_fcs_fcxp_alloc(port->fcs);
432 if (!fcxp) 394 if (!fcxp)
433 return; 395 return;
434 396
435 len = fc_ls_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, 397 len = fc_ls_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
436 bfa_fcs_port_get_fcid(port), rx_fchs->ox_id); 398 rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
399 rx_fchs->ox_id);
437 400
438 /* 401 /*
439 * Copy the payload (if any) from the echo frame 402 * Copy the payload (if any) from the echo frame
440 */ 403 */
441 pyld_len = rx_len - sizeof(struct fchs_s); 404 pyld_len = rx_len - sizeof(struct fchs_s);
405 bfa_trc(port->fcs, rx_len);
442 bfa_trc(port->fcs, pyld_len); 406 bfa_trc(port->fcs, pyld_len);
443 407
444 if (pyld_len > len) 408 if (pyld_len > len)
445 memcpy(((u8 *) bfa_fcxp_get_reqbuf(fcxp)) + 409 memcpy(((u8 *) bfa_fcxp_get_reqbuf(fcxp)) +
446 sizeof(struct fc_echo_s), (echo + 1), 410 sizeof(struct fc_echo_s), (echo + 1),
447 (pyld_len - sizeof(struct fc_echo_s))); 411 (pyld_len - sizeof(struct fc_echo_s)));
448 412
449 bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag, 413 bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
450 BFA_FALSE, FC_CLASS_3, pyld_len, &fchs, NULL, NULL, 414 BFA_FALSE, FC_CLASS_3, pyld_len, &fchs, NULL, NULL,
451 FC_MAX_PDUSZ, 0); 415 FC_MAX_PDUSZ, 0);
452} 416}
453 417
454/* 418/*
@@ -456,16 +420,16 @@ bfa_fcs_port_echo(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs,
456 * Since it does not require a login, it is processed here. 420 * Since it does not require a login, it is processed here.
457 */ 421 */
458static void 422static void
459bfa_fcs_port_rnid(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs, 423bfa_fcs_lport_rnid(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
460 struct fc_rnid_cmd_s *rnid, u16 rx_len) 424 struct fc_rnid_cmd_s *rnid, u16 rx_len)
461{ 425{
462 struct fc_rnid_common_id_data_s common_id_data; 426 struct fc_rnid_common_id_data_s common_id_data;
463 struct fc_rnid_general_topology_data_s gen_topo_data; 427 struct fc_rnid_general_topology_data_s gen_topo_data;
464 struct fchs_s fchs; 428 struct fchs_s fchs;
465 struct bfa_fcxp_s *fcxp; 429 struct bfa_fcxp_s *fcxp;
466 struct bfa_rport_s *bfa_rport = NULL; 430 struct bfa_rport_s *bfa_rport = NULL;
467 u16 len; 431 u16 len;
468 u32 data_format; 432 u32 data_format;
469 433
470 bfa_trc(port->fcs, rx_fchs->s_id); 434 bfa_trc(port->fcs, rx_fchs->s_id);
471 bfa_trc(port->fcs, rx_fchs->d_id); 435 bfa_trc(port->fcs, rx_fchs->d_id);
@@ -495,28 +459,26 @@ bfa_fcs_port_rnid(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs,
495 /* 459 /*
496 * Copy the Node Id Info 460 * Copy the Node Id Info
497 */ 461 */
498 common_id_data.port_name = bfa_fcs_port_get_pwwn(port); 462 common_id_data.port_name = bfa_fcs_lport_get_pwwn(port);
499 common_id_data.node_name = bfa_fcs_port_get_nwwn(port); 463 common_id_data.node_name = bfa_fcs_lport_get_nwwn(port);
500 464
501 len = fc_rnid_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, 465 len = fc_rnid_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
502 bfa_fcs_port_get_fcid(port), rx_fchs->ox_id, 466 rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
503 data_format, &common_id_data, &gen_topo_data); 467 rx_fchs->ox_id, data_format, &common_id_data,
468 &gen_topo_data);
504 469
505 bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag, 470 bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
506 BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, 471 BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
507 FC_MAX_PDUSZ, 0); 472 FC_MAX_PDUSZ, 0);
508
509 return;
510} 473}
511 474
512/* 475/*
513 * Fill out General Topolpgy Discovery Data for RNID ELS. 476 * Fill out General Topolpgy Discovery Data for RNID ELS.
514 */ 477 */
515static void 478static void
516bfa_fs_port_get_gen_topo_data(struct bfa_fcs_port_s *port, 479bfa_fs_port_get_gen_topo_data(struct bfa_fcs_lport_s *port,
517 struct fc_rnid_general_topology_data_s *gen_topo_data) 480 struct fc_rnid_general_topology_data_s *gen_topo_data)
518{ 481{
519
520 bfa_os_memset(gen_topo_data, 0, 482 bfa_os_memset(gen_topo_data, 0,
521 sizeof(struct fc_rnid_general_topology_data_s)); 483 sizeof(struct fc_rnid_general_topology_data_s));
522 484
@@ -526,76 +488,111 @@ bfa_fs_port_get_gen_topo_data(struct bfa_fcs_port_s *port,
526} 488}
527 489
528static void 490static void
529bfa_fcs_port_online_actions(struct bfa_fcs_port_s *port) 491bfa_fcs_lport_online_actions(struct bfa_fcs_lport_s *port)
530{ 492{
493 struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
494 char lpwwn_buf[BFA_STRING_32];
495
531 bfa_trc(port->fcs, port->fabric->oper_type); 496 bfa_trc(port->fcs, port->fabric->oper_type);
532 497
533 __port_action[port->fabric->fab_type].init(port); 498 __port_action[port->fabric->fab_type].init(port);
534 __port_action[port->fabric->fab_type].online(port); 499 __port_action[port->fabric->fab_type].online(port);
535 500
536 bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_ONLINE); 501 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
537 bfa_fcb_port_online(port->fcs->bfad, port->port_cfg.roles, 502 BFA_LOG(KERN_INFO, bfad, log_level,
538 port->fabric->vf_drv, (port->vport == NULL) ? 503 "Logical port online: WWN = %s Role = %s\n",
539 NULL : port->vport->vport_drv); 504 lpwwn_buf, "Initiator");
505
506 bfad->bfad_flags |= BFAD_PORT_ONLINE;
540} 507}
541 508
542static void 509static void
543bfa_fcs_port_offline_actions(struct bfa_fcs_port_s *port) 510bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port)
544{ 511{
545 struct list_head *qe, *qen; 512 struct list_head *qe, *qen;
546 struct bfa_fcs_rport_s *rport; 513 struct bfa_fcs_rport_s *rport;
514 struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
515 char lpwwn_buf[BFA_STRING_32];
547 516
548 bfa_trc(port->fcs, port->fabric->oper_type); 517 bfa_trc(port->fcs, port->fabric->oper_type);
549 518
550 __port_action[port->fabric->fab_type].offline(port); 519 __port_action[port->fabric->fab_type].offline(port);
551 520
521 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
552 if (bfa_fcs_fabric_is_online(port->fabric) == BFA_TRUE) 522 if (bfa_fcs_fabric_is_online(port->fabric) == BFA_TRUE)
553 bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_DISCONNECT); 523 BFA_LOG(KERN_ERR, bfad, log_level,
524 "Logical port lost fabric connectivity: WWN = %s Role = %s\n",
525 lpwwn_buf, "Initiator");
554 else 526 else
555 bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_OFFLINE); 527 BFA_LOG(KERN_INFO, bfad, log_level,
556 bfa_fcb_port_offline(port->fcs->bfad, port->port_cfg.roles, 528 "Logical port taken offline: WWN = %s Role = %s\n",
557 port->fabric->vf_drv, 529 lpwwn_buf, "Initiator");
558 (port->vport == NULL) ? NULL : port->vport->vport_drv);
559 530
560 list_for_each_safe(qe, qen, &port->rport_q) { 531 list_for_each_safe(qe, qen, &port->rport_q) {
561 rport = (struct bfa_fcs_rport_s *)qe; 532 rport = (struct bfa_fcs_rport_s *) qe;
562 bfa_fcs_rport_offline(rport); 533 bfa_fcs_rport_offline(rport);
563 } 534 }
564} 535}
565 536
566static void 537static void
567bfa_fcs_port_unknown_init(struct bfa_fcs_port_s *port) 538bfa_fcs_lport_unknown_init(struct bfa_fcs_lport_s *port)
568{ 539{
569 bfa_assert(0); 540 bfa_assert(0);
570} 541}
571 542
572static void 543static void
573bfa_fcs_port_unknown_online(struct bfa_fcs_port_s *port) 544bfa_fcs_lport_unknown_online(struct bfa_fcs_lport_s *port)
574{ 545{
575 bfa_assert(0); 546 bfa_assert(0);
576} 547}
577 548
578static void 549static void
579bfa_fcs_port_unknown_offline(struct bfa_fcs_port_s *port) 550bfa_fcs_lport_unknown_offline(struct bfa_fcs_lport_s *port)
580{ 551{
581 bfa_assert(0); 552 bfa_assert(0);
582} 553}
583 554
584static void 555static void
585bfa_fcs_port_deleted(struct bfa_fcs_port_s *port) 556bfa_fcs_lport_abts_acc(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs)
586{ 557{
587 bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_DELETE); 558 struct fchs_s fchs;
559 struct bfa_fcxp_s *fcxp;
560 int len;
588 561
589 /* 562 bfa_trc(port->fcs, rx_fchs->d_id);
590 * Base port will be deleted by the OS driver 563 bfa_trc(port->fcs, rx_fchs->s_id);
591 */ 564
565 fcxp = bfa_fcs_fcxp_alloc(port->fcs);
566 if (!fcxp)
567 return;
568
569 len = fc_ba_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
570 rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
571 rx_fchs->ox_id, 0);
572
573 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag,
574 BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
575 FC_MAX_PDUSZ, 0);
576}
577static void
578bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port)
579{
580 struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
581 char lpwwn_buf[BFA_STRING_32];
582
583 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
584 BFA_LOG(KERN_INFO, bfad, log_level,
585 "Logical port deleted: WWN = %s Role = %s\n",
586 lpwwn_buf, "Initiator");
587
588 /* Base port will be deleted by the OS driver */
592 if (port->vport) { 589 if (port->vport) {
593 bfa_fcb_port_delete(port->fcs->bfad, port->port_cfg.roles, 590 bfa_fcb_lport_delete(port->fcs->bfad, port->port_cfg.roles,
594 port->fabric->vf_drv, 591 port->fabric->vf_drv,
595 port->vport ? port->vport->vport_drv : NULL); 592 port->vport ? port->vport->vport_drv : NULL);
596 bfa_fcs_vport_delete_comp(port->vport); 593 bfa_fcs_vport_delete_comp(port->vport);
597 } else { 594 } else {
598 bfa_fcs_fabric_port_delete_comp(port->fabric); 595 bfa_fcs_fabric_port_delete_comp(port->fabric);
599 } 596 }
600} 597}
601 598
@@ -608,7 +605,7 @@ bfa_fcs_port_deleted(struct bfa_fcs_port_s *port)
608 * Module initialization 605 * Module initialization
609 */ 606 */
610void 607void
611bfa_fcs_port_modinit(struct bfa_fcs_s *fcs) 608bfa_fcs_lport_modinit(struct bfa_fcs_s *fcs)
612{ 609{
613 610
614} 611}
@@ -617,25 +614,25 @@ bfa_fcs_port_modinit(struct bfa_fcs_s *fcs)
617 * Module cleanup 614 * Module cleanup
618 */ 615 */
619void 616void
620bfa_fcs_port_modexit(struct bfa_fcs_s *fcs) 617bfa_fcs_lport_modexit(struct bfa_fcs_s *fcs)
621{ 618{
622 bfa_fcs_modexit_comp(fcs); 619 bfa_fcs_modexit_comp(fcs);
623} 620}
624 621
625/** 622/**
626 * Unsolicited frame receive handling. 623 * Unsolicited frame receive handling.
627 */ 624 */
628void 625void
629bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs, 626bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
630 u16 len) 627 struct fchs_s *fchs, u16 len)
631{ 628{
632 u32 pid = fchs->s_id; 629 u32 pid = fchs->s_id;
633 struct bfa_fcs_rport_s *rport = NULL; 630 struct bfa_fcs_rport_s *rport = NULL;
634 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); 631 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
635 632
636 bfa_stats(lport, uf_recvs); 633 bfa_stats(lport, uf_recvs);
637 634
638 if (!bfa_fcs_port_is_online(lport)) { 635 if (!bfa_fcs_lport_is_online(lport)) {
639 bfa_stats(lport, uf_recv_drops); 636 bfa_stats(lport, uf_recv_drops);
640 return; 637 return;
641 } 638 }
@@ -648,7 +645,7 @@ bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs,
648 */ 645 */
649 if ((fchs->type == FC_TYPE_ELS) && 646 if ((fchs->type == FC_TYPE_ELS) &&
650 (els_cmd->els_code == FC_ELS_PLOGI)) { 647 (els_cmd->els_code == FC_ELS_PLOGI)) {
651 bfa_fcs_port_plogi(lport, fchs, (struct fc_logi_s *) els_cmd); 648 bfa_fcs_lport_plogi(lport, fchs, (struct fc_logi_s *) els_cmd);
652 return; 649 return;
653 } 650 }
654 651
@@ -656,8 +653,8 @@ bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs,
656 * Handle ECHO separately. 653 * Handle ECHO separately.
657 */ 654 */
658 if ((fchs->type == FC_TYPE_ELS) && (els_cmd->els_code == FC_ELS_ECHO)) { 655 if ((fchs->type == FC_TYPE_ELS) && (els_cmd->els_code == FC_ELS_ECHO)) {
659 bfa_fcs_port_echo(lport, fchs, 656 bfa_fcs_lport_echo(lport, fchs,
660 (struct fc_echo_s *) els_cmd, len); 657 (struct fc_echo_s *)els_cmd, len);
661 return; 658 return;
662 } 659 }
663 660
@@ -665,15 +662,21 @@ bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs,
665 * Handle RNID separately. 662 * Handle RNID separately.
666 */ 663 */
667 if ((fchs->type == FC_TYPE_ELS) && (els_cmd->els_code == FC_ELS_RNID)) { 664 if ((fchs->type == FC_TYPE_ELS) && (els_cmd->els_code == FC_ELS_RNID)) {
668 bfa_fcs_port_rnid(lport, fchs, 665 bfa_fcs_lport_rnid(lport, fchs,
669 (struct fc_rnid_cmd_s *) els_cmd, len); 666 (struct fc_rnid_cmd_s *) els_cmd, len);
670 return; 667 return;
671 } 668 }
672 669
670 if (fchs->type == FC_TYPE_BLS) {
671 if ((fchs->routing == FC_RTG_BASIC_LINK) &&
672 (fchs->cat_info == FC_CAT_ABTS))
673 bfa_fcs_lport_abts_acc(lport, fchs);
674 return;
675 }
673 /** 676 /**
674 * look for a matching remote port ID 677 * look for a matching remote port ID
675 */ 678 */
676 rport = bfa_fcs_port_get_rport_by_pid(lport, pid); 679 rport = bfa_fcs_lport_get_rport_by_pid(lport, pid);
677 if (rport) { 680 if (rport) {
678 bfa_trc(rport->fcs, fchs->s_id); 681 bfa_trc(rport->fcs, fchs->s_id);
679 bfa_trc(rport->fcs, fchs->d_id); 682 bfa_trc(rport->fcs, fchs->d_id);
@@ -694,7 +697,7 @@ bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs,
694 697
695 bfa_trc(lport->fcs, els_cmd->els_code); 698 bfa_trc(lport->fcs, els_cmd->els_code);
696 if (els_cmd->els_code == FC_ELS_RSCN) { 699 if (els_cmd->els_code == FC_ELS_RSCN) {
697 bfa_fcs_port_scn_process_rscn(lport, fchs, len); 700 bfa_fcs_lport_scn_process_rscn(lport, fchs, len);
698 return; 701 return;
699 } 702 }
700 703
@@ -702,7 +705,6 @@ bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs,
702 /** 705 /**
703 * @todo Handle LOGO frames received. 706 * @todo Handle LOGO frames received.
704 */ 707 */
705 bfa_trc(lport->fcs, els_cmd->els_code);
706 return; 708 return;
707 } 709 }
708 710
@@ -710,14 +712,13 @@ bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs,
710 /** 712 /**
711 * @todo Handle PRLI frames received. 713 * @todo Handle PRLI frames received.
712 */ 714 */
713 bfa_trc(lport->fcs, els_cmd->els_code);
714 return; 715 return;
715 } 716 }
716 717
717 /** 718 /**
718 * Unhandled ELS frames. Send a LS_RJT. 719 * Unhandled ELS frames. Send a LS_RJT.
719 */ 720 */
720 bfa_fcs_port_send_ls_rjt(lport, fchs, FC_LS_RJT_RSN_CMD_NOT_SUPP, 721 bfa_fcs_lport_send_ls_rjt(lport, fchs, FC_LS_RJT_RSN_CMD_NOT_SUPP,
721 FC_LS_RJT_EXP_NO_ADDL_INFO); 722 FC_LS_RJT_EXP_NO_ADDL_INFO);
722 723
723} 724}
@@ -726,13 +727,13 @@ bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs,
726 * PID based Lookup for a R-Port in the Port R-Port Queue 727 * PID based Lookup for a R-Port in the Port R-Port Queue
727 */ 728 */
728struct bfa_fcs_rport_s * 729struct bfa_fcs_rport_s *
729bfa_fcs_port_get_rport_by_pid(struct bfa_fcs_port_s *port, u32 pid) 730bfa_fcs_lport_get_rport_by_pid(struct bfa_fcs_lport_s *port, u32 pid)
730{ 731{
731 struct bfa_fcs_rport_s *rport; 732 struct bfa_fcs_rport_s *rport;
732 struct list_head *qe; 733 struct list_head *qe;
733 734
734 list_for_each(qe, &port->rport_q) { 735 list_for_each(qe, &port->rport_q) {
735 rport = (struct bfa_fcs_rport_s *)qe; 736 rport = (struct bfa_fcs_rport_s *) qe;
736 if (rport->pid == pid) 737 if (rport->pid == pid)
737 return rport; 738 return rport;
738 } 739 }
@@ -745,13 +746,13 @@ bfa_fcs_port_get_rport_by_pid(struct bfa_fcs_port_s *port, u32 pid)
745 * PWWN based Lookup for a R-Port in the Port R-Port Queue 746 * PWWN based Lookup for a R-Port in the Port R-Port Queue
746 */ 747 */
747struct bfa_fcs_rport_s * 748struct bfa_fcs_rport_s *
748bfa_fcs_port_get_rport_by_pwwn(struct bfa_fcs_port_s *port, wwn_t pwwn) 749bfa_fcs_lport_get_rport_by_pwwn(struct bfa_fcs_lport_s *port, wwn_t pwwn)
749{ 750{
750 struct bfa_fcs_rport_s *rport; 751 struct bfa_fcs_rport_s *rport;
751 struct list_head *qe; 752 struct list_head *qe;
752 753
753 list_for_each(qe, &port->rport_q) { 754 list_for_each(qe, &port->rport_q) {
754 rport = (struct bfa_fcs_rport_s *)qe; 755 rport = (struct bfa_fcs_rport_s *) qe;
755 if (wwn_is_equal(rport->pwwn, pwwn)) 756 if (wwn_is_equal(rport->pwwn, pwwn))
756 return rport; 757 return rport;
757 } 758 }
@@ -764,13 +765,13 @@ bfa_fcs_port_get_rport_by_pwwn(struct bfa_fcs_port_s *port, wwn_t pwwn)
764 * NWWN based Lookup for a R-Port in the Port R-Port Queue 765 * NWWN based Lookup for a R-Port in the Port R-Port Queue
765 */ 766 */
766struct bfa_fcs_rport_s * 767struct bfa_fcs_rport_s *
767bfa_fcs_port_get_rport_by_nwwn(struct bfa_fcs_port_s *port, wwn_t nwwn) 768bfa_fcs_lport_get_rport_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t nwwn)
768{ 769{
769 struct bfa_fcs_rport_s *rport; 770 struct bfa_fcs_rport_s *rport;
770 struct list_head *qe; 771 struct list_head *qe;
771 772
772 list_for_each(qe, &port->rport_q) { 773 list_for_each(qe, &port->rport_q) {
773 rport = (struct bfa_fcs_rport_s *)qe; 774 rport = (struct bfa_fcs_rport_s *) qe;
774 if (wwn_is_equal(rport->nwwn, nwwn)) 775 if (wwn_is_equal(rport->nwwn, nwwn))
775 return rport; 776 return rport;
776 } 777 }
@@ -783,8 +784,9 @@ bfa_fcs_port_get_rport_by_nwwn(struct bfa_fcs_port_s *port, wwn_t nwwn)
783 * Called by rport module when new rports are discovered. 784 * Called by rport module when new rports are discovered.
784 */ 785 */
785void 786void
786bfa_fcs_port_add_rport(struct bfa_fcs_port_s *port, 787bfa_fcs_lport_add_rport(
787 struct bfa_fcs_rport_s *rport) 788 struct bfa_fcs_lport_s *port,
789 struct bfa_fcs_rport_s *rport)
788{ 790{
789 list_add_tail(&rport->qe, &port->rport_q); 791 list_add_tail(&rport->qe, &port->rport_q);
790 port->num_rports++; 792 port->num_rports++;
@@ -794,8 +796,9 @@ bfa_fcs_port_add_rport(struct bfa_fcs_port_s *port,
794 * Called by rport module to when rports are deleted. 796 * Called by rport module to when rports are deleted.
795 */ 797 */
796void 798void
797bfa_fcs_port_del_rport(struct bfa_fcs_port_s *port, 799bfa_fcs_lport_del_rport(
798 struct bfa_fcs_rport_s *rport) 800 struct bfa_fcs_lport_s *port,
801 struct bfa_fcs_rport_s *rport)
799{ 802{
800 bfa_assert(bfa_q_is_on_q(&port->rport_q, rport)); 803 bfa_assert(bfa_q_is_on_q(&port->rport_q, rport));
801 list_del(&rport->qe); 804 list_del(&rport->qe);
@@ -809,7 +812,7 @@ bfa_fcs_port_del_rport(struct bfa_fcs_port_s *port,
809 * Called by vport for virtual ports when FDISC is complete. 812 * Called by vport for virtual ports when FDISC is complete.
810 */ 813 */
811void 814void
812bfa_fcs_port_online(struct bfa_fcs_port_s *port) 815bfa_fcs_lport_online(struct bfa_fcs_lport_s *port)
813{ 816{
814 bfa_sm_send_event(port, BFA_FCS_PORT_SM_ONLINE); 817 bfa_sm_send_event(port, BFA_FCS_PORT_SM_ONLINE);
815} 818}
@@ -819,7 +822,7 @@ bfa_fcs_port_online(struct bfa_fcs_port_s *port)
819 * Called by vport for virtual ports when virtual port becomes offline. 822 * Called by vport for virtual ports when virtual port becomes offline.
820 */ 823 */
821void 824void
822bfa_fcs_port_offline(struct bfa_fcs_port_s *port) 825bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port)
823{ 826{
824 bfa_sm_send_event(port, BFA_FCS_PORT_SM_OFFLINE); 827 bfa_sm_send_event(port, BFA_FCS_PORT_SM_OFFLINE);
825} 828}
@@ -831,40 +834,32 @@ bfa_fcs_port_offline(struct bfa_fcs_port_s *port)
831 * bfa_fcs_vport_delete_comp() for vports on completion. 834 * bfa_fcs_vport_delete_comp() for vports on completion.
832 */ 835 */
833void 836void
834bfa_fcs_port_delete(struct bfa_fcs_port_s *port) 837bfa_fcs_lport_delete(struct bfa_fcs_lport_s *port)
835{ 838{
836 bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELETE); 839 bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELETE);
837} 840}
838 841
839/** 842/**
840 * Called by fabric in private loop topology to process LIP event.
841 */
842void
843bfa_fcs_port_lip(struct bfa_fcs_port_s *port)
844{
845}
846
847/**
848 * Return TRUE if port is online, else return FALSE 843 * Return TRUE if port is online, else return FALSE
849 */ 844 */
850bfa_boolean_t 845bfa_boolean_t
851bfa_fcs_port_is_online(struct bfa_fcs_port_s *port) 846bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port)
852{ 847{
853 return bfa_sm_cmp_state(port, bfa_fcs_port_sm_online); 848 return bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online);
854} 849}
855 850
856/** 851/**
857 * Attach time initialization of logical ports. 852 * Attach time initialization of logical ports.
858 */ 853 */
859void 854void
860bfa_fcs_lport_attach(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs, 855bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs,
861 uint16_t vf_id, struct bfa_fcs_vport_s *vport) 856 u16 vf_id, struct bfa_fcs_vport_s *vport)
862{ 857{
863 lport->fcs = fcs; 858 lport->fcs = fcs;
864 lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id); 859 lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id);
865 lport->vport = vport; 860 lport->vport = vport;
866 lport->lp_tag = (vport) ? bfa_lps_get_tag(vport->lps) : 861 lport->lp_tag = (vport) ? bfa_lps_get_tag(vport->lps) :
867 bfa_lps_get_tag(lport->fabric->lps); 862 bfa_lps_get_tag(lport->fabric->lps);
868 863
869 INIT_LIST_HEAD(&lport->rport_q); 864 INIT_LIST_HEAD(&lport->rport_q);
870 lport->num_rports = 0; 865 lport->num_rports = 0;
@@ -876,21 +871,26 @@ bfa_fcs_lport_attach(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs,
876 */ 871 */
877 872
878void 873void
879bfa_fcs_lport_init(struct bfa_fcs_port_s *lport, 874bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
880 struct bfa_port_cfg_s *port_cfg) 875 struct bfa_lport_cfg_s *port_cfg)
881{ 876{
882 struct bfa_fcs_vport_s *vport = lport->vport; 877 struct bfa_fcs_vport_s *vport = lport->vport;
878 struct bfad_s *bfad = (struct bfad_s *)lport->fcs->bfad;
879 char lpwwn_buf[BFA_STRING_32];
883 880
884 bfa_os_assign(lport->port_cfg, *port_cfg); 881 bfa_os_assign(lport->port_cfg, *port_cfg);
885 882
886 lport->bfad_port = bfa_fcb_port_new(lport->fcs->bfad, lport, 883 lport->bfad_port = bfa_fcb_lport_new(lport->fcs->bfad, lport,
887 lport->port_cfg.roles, 884 lport->port_cfg.roles,
888 lport->fabric->vf_drv, 885 lport->fabric->vf_drv,
889 vport ? vport->vport_drv : NULL); 886 vport ? vport->vport_drv : NULL);
890 887
891 bfa_fcs_port_aen_post(lport, BFA_LPORT_AEN_NEW); 888 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(lport));
889 BFA_LOG(KERN_INFO, bfad, log_level,
890 "New logical port created: WWN = %s Role = %s\n",
891 lpwwn_buf, "Initiator");
892 892
893 bfa_sm_set_state(lport, bfa_fcs_port_sm_uninit); 893 bfa_sm_set_state(lport, bfa_fcs_lport_sm_uninit);
894 bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE); 894 bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
895} 895}
896 896
@@ -899,10 +899,11 @@ bfa_fcs_lport_init(struct bfa_fcs_port_s *lport,
899 */ 899 */
900 900
901void 901void
902bfa_fcs_port_get_attr(struct bfa_fcs_port_s *port, 902bfa_fcs_lport_get_attr(
903 struct bfa_port_attr_s *port_attr) 903 struct bfa_fcs_lport_s *port,
904 struct bfa_lport_attr_s *port_attr)
904{ 905{
905 if (bfa_sm_cmp_state(port, bfa_fcs_port_sm_online)) 906 if (bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online))
906 port_attr->pid = port->pid; 907 port_attr->pid = port->pid;
907 else 908 else
908 port_attr->pid = 0; 909 port_attr->pid = 0;
@@ -913,25 +914,4895 @@ bfa_fcs_port_get_attr(struct bfa_fcs_port_s *port,
913 port_attr->port_type = bfa_fcs_fabric_port_type(port->fabric); 914 port_attr->port_type = bfa_fcs_fabric_port_type(port->fabric);
914 port_attr->loopback = bfa_fcs_fabric_is_loopback(port->fabric); 915 port_attr->loopback = bfa_fcs_fabric_is_loopback(port->fabric);
915 port_attr->authfail = 916 port_attr->authfail =
916 bfa_fcs_fabric_is_auth_failed(port->fabric); 917 bfa_fcs_fabric_is_auth_failed(port->fabric);
917 port_attr->fabric_name = bfa_fcs_port_get_fabric_name(port); 918 port_attr->fabric_name = bfa_fcs_lport_get_fabric_name(port);
918 memcpy(port_attr->fabric_ip_addr, 919 memcpy(port_attr->fabric_ip_addr,
919 bfa_fcs_port_get_fabric_ipaddr(port), 920 bfa_fcs_lport_get_fabric_ipaddr(port),
920 BFA_FCS_FABRIC_IPADDR_SZ); 921 BFA_FCS_FABRIC_IPADDR_SZ);
921 922
922 if (port->vport != NULL) { 923 if (port->vport != NULL) {
923 port_attr->port_type = BFA_PPORT_TYPE_VPORT; 924 port_attr->port_type = BFA_PORT_TYPE_VPORT;
924 port_attr->fpma_mac = 925 port_attr->fpma_mac =
925 bfa_lps_get_lp_mac(port->vport->lps); 926 bfa_lps_get_lp_mac(port->vport->lps);
926 } else 927 } else {
927 port_attr->fpma_mac = 928 port_attr->fpma_mac =
928 bfa_lps_get_lp_mac(port->fabric->lps); 929 bfa_lps_get_lp_mac(port->fabric->lps);
930 }
931 } else {
932 port_attr->port_type = BFA_PORT_TYPE_UNKNOWN;
933 port_attr->state = BFA_LPORT_UNINIT;
934 }
935}
936
937/**
938 * bfa_fcs_lport_fab port fab functions
939 */
940
941/**
942 * Called by port to initialize fabric services of the base port.
943 */
944static void
945bfa_fcs_lport_fab_init(struct bfa_fcs_lport_s *port)
946{
947 bfa_fcs_lport_ns_init(port);
948 bfa_fcs_lport_scn_init(port);
949 bfa_fcs_lport_ms_init(port);
950}
951
952/**
953 * Called by port to notify transition to online state.
954 */
955static void
956bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port)
957{
958 bfa_fcs_lport_ns_online(port);
959 bfa_fcs_lport_scn_online(port);
960}
961
962/**
963 * Called by port to notify transition to offline state.
964 */
965static void
966bfa_fcs_lport_fab_offline(struct bfa_fcs_lport_s *port)
967{
968 bfa_fcs_lport_ns_offline(port);
969 bfa_fcs_lport_scn_offline(port);
970 bfa_fcs_lport_ms_offline(port);
971}
972
973/**
974 * bfa_fcs_lport_n2n functions
975 */
976
977/**
978 * Called by fcs/port to initialize N2N topology.
979 */
980static void
981bfa_fcs_lport_n2n_init(struct bfa_fcs_lport_s *port)
982{
983}
984
985/**
986 * Called by fcs/port to notify transition to online state.
987 */
988static void
989bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port)
990{
991 struct bfa_fcs_lport_n2n_s *n2n_port = &port->port_topo.pn2n;
992 struct bfa_lport_cfg_s *pcfg = &port->port_cfg;
993 struct bfa_fcs_rport_s *rport;
994
995 bfa_trc(port->fcs, pcfg->pwwn);
996
997 /*
998 * If our PWWN is > than that of the r-port, we have to initiate PLOGI
999 * and assign an Address. if not, we need to wait for its PLOGI.
1000 *
1001 * If our PWWN is < than that of the remote port, it will send a PLOGI
1002 * with the PIDs assigned. The rport state machine take care of this
1003 * incoming PLOGI.
1004 */
1005 if (memcmp
1006 ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn,
1007 sizeof(wwn_t)) > 0) {
1008 port->pid = N2N_LOCAL_PID;
1009 /**
1010 * First, check if we know the device by pwwn.
1011 */
1012 rport = bfa_fcs_lport_get_rport_by_pwwn(port,
1013 n2n_port->rem_port_wwn);
1014 if (rport) {
1015 bfa_trc(port->fcs, rport->pid);
1016 bfa_trc(port->fcs, rport->pwwn);
1017 rport->pid = N2N_REMOTE_PID;
1018 bfa_fcs_rport_online(rport);
1019 return;
1020 }
1021
1022 /*
1023 * In n2n there can be only one rport. Delete the old one
1024 * whose pid should be zero, because it is offline.
1025 */
1026 if (port->num_rports > 0) {
1027 rport = bfa_fcs_lport_get_rport_by_pid(port, 0);
1028 bfa_assert(rport != NULL);
1029 if (rport) {
1030 bfa_trc(port->fcs, rport->pwwn);
1031 bfa_fcs_rport_delete(rport);
1032 }
1033 }
1034 bfa_fcs_rport_create(port, N2N_REMOTE_PID);
1035 }
1036}
1037
1038/**
1039 * Called by fcs/port to notify transition to offline state.
1040 */
1041static void
1042bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port)
1043{
1044 struct bfa_fcs_lport_n2n_s *n2n_port = &port->port_topo.pn2n;
1045
1046 bfa_trc(port->fcs, port->pid);
1047 port->pid = 0;
1048 n2n_port->rem_port_wwn = 0;
1049 n2n_port->reply_oxid = 0;
1050}
1051
1052#define BFA_FCS_FDMI_CMD_MAX_RETRIES 2
1053
1054/*
1055 * forward declarations
1056 */
1057static void bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg,
1058 struct bfa_fcxp_s *fcxp_alloced);
1059static void bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg,
1060 struct bfa_fcxp_s *fcxp_alloced);
1061static void bfa_fcs_lport_fdmi_send_rpa(void *fdmi_cbarg,
1062 struct bfa_fcxp_s *fcxp_alloced);
1063static void bfa_fcs_lport_fdmi_rhba_response(void *fcsarg,
1064 struct bfa_fcxp_s *fcxp,
1065 void *cbarg,
1066 bfa_status_t req_status,
1067 u32 rsp_len,
1068 u32 resid_len,
1069 struct fchs_s *rsp_fchs);
1070static void bfa_fcs_lport_fdmi_rprt_response(void *fcsarg,
1071 struct bfa_fcxp_s *fcxp,
1072 void *cbarg,
1073 bfa_status_t req_status,
1074 u32 rsp_len,
1075 u32 resid_len,
1076 struct fchs_s *rsp_fchs);
1077static void bfa_fcs_lport_fdmi_rpa_response(void *fcsarg,
1078 struct bfa_fcxp_s *fcxp,
1079 void *cbarg,
1080 bfa_status_t req_status,
1081 u32 rsp_len,
1082 u32 resid_len,
1083 struct fchs_s *rsp_fchs);
1084static void bfa_fcs_lport_fdmi_timeout(void *arg);
1085static u16 bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
1086 u8 *pyld);
1087static u16 bfa_fcs_lport_fdmi_build_rprt_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
1088 u8 *pyld);
1089static u16 bfa_fcs_lport_fdmi_build_rpa_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
1090 u8 *pyld);
1091static u16 bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *
1092 fdmi, u8 *pyld);
1093static void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
1094 struct bfa_fcs_fdmi_hba_attr_s *hba_attr);
1095static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
1096 struct bfa_fcs_fdmi_port_attr_s *port_attr);
1097/**
1098 * fcs_fdmi_sm FCS FDMI state machine
1099 */
1100
1101/**
1102 * FDMI State Machine events
1103 */
1104enum port_fdmi_event {
1105 FDMISM_EVENT_PORT_ONLINE = 1,
1106 FDMISM_EVENT_PORT_OFFLINE = 2,
1107 FDMISM_EVENT_RSP_OK = 4,
1108 FDMISM_EVENT_RSP_ERROR = 5,
1109 FDMISM_EVENT_TIMEOUT = 6,
1110 FDMISM_EVENT_RHBA_SENT = 7,
1111 FDMISM_EVENT_RPRT_SENT = 8,
1112 FDMISM_EVENT_RPA_SENT = 9,
1113};
1114
1115static void bfa_fcs_lport_fdmi_sm_offline(struct bfa_fcs_lport_fdmi_s *fdmi,
1116 enum port_fdmi_event event);
1117static void bfa_fcs_lport_fdmi_sm_sending_rhba(
1118 struct bfa_fcs_lport_fdmi_s *fdmi,
1119 enum port_fdmi_event event);
1120static void bfa_fcs_lport_fdmi_sm_rhba(struct bfa_fcs_lport_fdmi_s *fdmi,
1121 enum port_fdmi_event event);
1122static void bfa_fcs_lport_fdmi_sm_rhba_retry(
1123 struct bfa_fcs_lport_fdmi_s *fdmi,
1124 enum port_fdmi_event event);
1125static void bfa_fcs_lport_fdmi_sm_sending_rprt(
1126 struct bfa_fcs_lport_fdmi_s *fdmi,
1127 enum port_fdmi_event event);
1128static void bfa_fcs_lport_fdmi_sm_rprt(struct bfa_fcs_lport_fdmi_s *fdmi,
1129 enum port_fdmi_event event);
1130static void bfa_fcs_lport_fdmi_sm_rprt_retry(
1131 struct bfa_fcs_lport_fdmi_s *fdmi,
1132 enum port_fdmi_event event);
1133static void bfa_fcs_lport_fdmi_sm_sending_rpa(
1134 struct bfa_fcs_lport_fdmi_s *fdmi,
1135 enum port_fdmi_event event);
1136static void bfa_fcs_lport_fdmi_sm_rpa(struct bfa_fcs_lport_fdmi_s *fdmi,
1137 enum port_fdmi_event event);
1138static void bfa_fcs_lport_fdmi_sm_rpa_retry(
1139 struct bfa_fcs_lport_fdmi_s *fdmi,
1140 enum port_fdmi_event event);
1141static void bfa_fcs_lport_fdmi_sm_online(struct bfa_fcs_lport_fdmi_s *fdmi,
1142 enum port_fdmi_event event);
1143static void bfa_fcs_lport_fdmi_sm_disabled(
1144 struct bfa_fcs_lport_fdmi_s *fdmi,
1145 enum port_fdmi_event event);
1146/**
1147 * Start in offline state - awaiting MS to send start.
1148 */
1149static void
1150bfa_fcs_lport_fdmi_sm_offline(struct bfa_fcs_lport_fdmi_s *fdmi,
1151 enum port_fdmi_event event)
1152{
1153 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1154
1155 bfa_trc(port->fcs, port->port_cfg.pwwn);
1156 bfa_trc(port->fcs, event);
1157
1158 fdmi->retry_cnt = 0;
1159
1160 switch (event) {
1161 case FDMISM_EVENT_PORT_ONLINE:
1162 if (port->vport) {
1163 /*
1164 * For Vports, register a new port.
1165 */
1166 bfa_sm_set_state(fdmi,
1167 bfa_fcs_lport_fdmi_sm_sending_rprt);
1168 bfa_fcs_lport_fdmi_send_rprt(fdmi, NULL);
1169 } else {
1170 /*
1171 * For a base port, we should first register the HBA
1172 * atribute. The HBA attribute also contains the base
1173 * port registration.
1174 */
1175 bfa_sm_set_state(fdmi,
1176 bfa_fcs_lport_fdmi_sm_sending_rhba);
1177 bfa_fcs_lport_fdmi_send_rhba(fdmi, NULL);
1178 }
1179 break;
1180
1181 case FDMISM_EVENT_PORT_OFFLINE:
1182 break;
1183
1184 default:
1185 bfa_sm_fault(port->fcs, event);
1186 }
1187}
1188
1189static void
1190bfa_fcs_lport_fdmi_sm_sending_rhba(struct bfa_fcs_lport_fdmi_s *fdmi,
1191 enum port_fdmi_event event)
1192{
1193 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1194
1195 bfa_trc(port->fcs, port->port_cfg.pwwn);
1196 bfa_trc(port->fcs, event);
1197
1198 switch (event) {
1199 case FDMISM_EVENT_RHBA_SENT:
1200 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rhba);
1201 break;
1202
1203 case FDMISM_EVENT_PORT_OFFLINE:
1204 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
1205 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port),
1206 &fdmi->fcxp_wqe);
1207 break;
1208
1209 default:
1210 bfa_sm_fault(port->fcs, event);
1211 }
1212}
1213
1214static void
1215bfa_fcs_lport_fdmi_sm_rhba(struct bfa_fcs_lport_fdmi_s *fdmi,
1216 enum port_fdmi_event event)
1217{
1218 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1219
1220 bfa_trc(port->fcs, port->port_cfg.pwwn);
1221 bfa_trc(port->fcs, event);
1222
1223 switch (event) {
1224 case FDMISM_EVENT_RSP_ERROR:
1225 /*
1226 * if max retries have not been reached, start timer for a
1227 * delayed retry
1228 */
1229 if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) {
1230 bfa_sm_set_state(fdmi,
1231 bfa_fcs_lport_fdmi_sm_rhba_retry);
1232 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port),
1233 &fdmi->timer,
1234 bfa_fcs_lport_fdmi_timeout, fdmi,
1235 BFA_FCS_RETRY_TIMEOUT);
1236 } else {
1237 /*
1238 * set state to offline
1239 */
1240 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
1241 }
1242 break;
1243
1244 case FDMISM_EVENT_RSP_OK:
1245 /*
1246 * Initiate Register Port Attributes
1247 */
1248 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rpa);
1249 fdmi->retry_cnt = 0;
1250 bfa_fcs_lport_fdmi_send_rpa(fdmi, NULL);
1251 break;
1252
1253 case FDMISM_EVENT_PORT_OFFLINE:
1254 bfa_fcxp_discard(fdmi->fcxp);
1255 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
1256 break;
1257
1258 default:
1259 bfa_sm_fault(port->fcs, event);
1260 }
1261}
1262
1263static void
1264bfa_fcs_lport_fdmi_sm_rhba_retry(struct bfa_fcs_lport_fdmi_s *fdmi,
1265 enum port_fdmi_event event)
1266{
1267 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1268
1269 bfa_trc(port->fcs, port->port_cfg.pwwn);
1270 bfa_trc(port->fcs, event);
1271
1272 switch (event) {
1273 case FDMISM_EVENT_TIMEOUT:
1274 /*
1275 * Retry Timer Expired. Re-send
1276 */
1277 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rhba);
1278 bfa_fcs_lport_fdmi_send_rhba(fdmi, NULL);
1279 break;
1280
1281 case FDMISM_EVENT_PORT_OFFLINE:
1282 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
1283 bfa_timer_stop(&fdmi->timer);
1284 break;
1285
1286 default:
1287 bfa_sm_fault(port->fcs, event);
1288 }
1289}
1290
1291/*
1292* RPRT : Register Port
1293 */
1294static void
1295bfa_fcs_lport_fdmi_sm_sending_rprt(struct bfa_fcs_lport_fdmi_s *fdmi,
1296 enum port_fdmi_event event)
1297{
1298 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1299
1300 bfa_trc(port->fcs, port->port_cfg.pwwn);
1301 bfa_trc(port->fcs, event);
1302
1303 switch (event) {
1304 case FDMISM_EVENT_RPRT_SENT:
1305 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rprt);
1306 break;
1307
1308 case FDMISM_EVENT_PORT_OFFLINE:
1309 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
1310 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port),
1311 &fdmi->fcxp_wqe);
1312 break;
1313
1314 default:
1315 bfa_sm_fault(port->fcs, event);
1316 }
1317}
1318
1319static void
1320bfa_fcs_lport_fdmi_sm_rprt(struct bfa_fcs_lport_fdmi_s *fdmi,
1321 enum port_fdmi_event event)
1322{
1323 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1324
1325 bfa_trc(port->fcs, port->port_cfg.pwwn);
1326 bfa_trc(port->fcs, event);
1327
1328 switch (event) {
1329 case FDMISM_EVENT_RSP_ERROR:
1330 /*
1331 * if max retries have not been reached, start timer for a
1332 * delayed retry
1333 */
1334 if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) {
1335 bfa_sm_set_state(fdmi,
1336 bfa_fcs_lport_fdmi_sm_rprt_retry);
1337 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port),
1338 &fdmi->timer,
1339 bfa_fcs_lport_fdmi_timeout, fdmi,
1340 BFA_FCS_RETRY_TIMEOUT);
1341
1342 } else {
1343 /*
1344 * set state to offline
1345 */
1346 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
1347 fdmi->retry_cnt = 0;
1348 }
1349 break;
1350
1351 case FDMISM_EVENT_RSP_OK:
1352 fdmi->retry_cnt = 0;
1353 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_online);
1354 break;
1355
1356 case FDMISM_EVENT_PORT_OFFLINE:
1357 bfa_fcxp_discard(fdmi->fcxp);
1358 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
1359 break;
1360
1361 default:
1362 bfa_sm_fault(port->fcs, event);
1363 }
1364}
1365
1366static void
1367bfa_fcs_lport_fdmi_sm_rprt_retry(struct bfa_fcs_lport_fdmi_s *fdmi,
1368 enum port_fdmi_event event)
1369{
1370 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1371
1372 bfa_trc(port->fcs, port->port_cfg.pwwn);
1373 bfa_trc(port->fcs, event);
1374
1375 switch (event) {
1376 case FDMISM_EVENT_TIMEOUT:
1377 /*
1378 * Retry Timer Expired. Re-send
1379 */
1380 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rprt);
1381 bfa_fcs_lport_fdmi_send_rprt(fdmi, NULL);
1382 break;
1383
1384 case FDMISM_EVENT_PORT_OFFLINE:
1385 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
1386 bfa_timer_stop(&fdmi->timer);
1387 break;
1388
1389 default:
1390 bfa_sm_fault(port->fcs, event);
1391 }
1392}
1393
1394/*
1395 * Register Port Attributes
1396 */
1397static void
1398bfa_fcs_lport_fdmi_sm_sending_rpa(struct bfa_fcs_lport_fdmi_s *fdmi,
1399 enum port_fdmi_event event)
1400{
1401 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1402
1403 bfa_trc(port->fcs, port->port_cfg.pwwn);
1404 bfa_trc(port->fcs, event);
1405
1406 switch (event) {
1407 case FDMISM_EVENT_RPA_SENT:
1408 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rpa);
1409 break;
1410
1411 case FDMISM_EVENT_PORT_OFFLINE:
1412 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
1413 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port),
1414 &fdmi->fcxp_wqe);
1415 break;
1416
1417 default:
1418 bfa_sm_fault(port->fcs, event);
1419 }
1420}
1421
1422static void
1423bfa_fcs_lport_fdmi_sm_rpa(struct bfa_fcs_lport_fdmi_s *fdmi,
1424 enum port_fdmi_event event)
1425{
1426 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1427
1428 bfa_trc(port->fcs, port->port_cfg.pwwn);
1429 bfa_trc(port->fcs, event);
1430
1431 switch (event) {
1432 case FDMISM_EVENT_RSP_ERROR:
1433 /*
1434 * if max retries have not been reached, start timer for a
1435 * delayed retry
1436 */
1437 if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) {
1438 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rpa_retry);
1439 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port),
1440 &fdmi->timer,
1441 bfa_fcs_lport_fdmi_timeout, fdmi,
1442 BFA_FCS_RETRY_TIMEOUT);
1443 } else {
1444 /*
1445 * set state to offline
1446 */
1447 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
1448 fdmi->retry_cnt = 0;
1449 }
1450 break;
1451
1452 case FDMISM_EVENT_RSP_OK:
1453 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_online);
1454 fdmi->retry_cnt = 0;
1455 break;
1456
1457 case FDMISM_EVENT_PORT_OFFLINE:
1458 bfa_fcxp_discard(fdmi->fcxp);
1459 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
1460 break;
1461
1462 default:
1463 bfa_sm_fault(port->fcs, event);
1464 }
1465}
1466
1467static void
1468bfa_fcs_lport_fdmi_sm_rpa_retry(struct bfa_fcs_lport_fdmi_s *fdmi,
1469 enum port_fdmi_event event)
1470{
1471 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1472
1473 bfa_trc(port->fcs, port->port_cfg.pwwn);
1474 bfa_trc(port->fcs, event);
1475
1476 switch (event) {
1477 case FDMISM_EVENT_TIMEOUT:
1478 /*
1479 * Retry Timer Expired. Re-send
1480 */
1481 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rpa);
1482 bfa_fcs_lport_fdmi_send_rpa(fdmi, NULL);
1483 break;
1484
1485 case FDMISM_EVENT_PORT_OFFLINE:
1486 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
1487 bfa_timer_stop(&fdmi->timer);
1488 break;
1489
1490 default:
1491 bfa_sm_fault(port->fcs, event);
1492 }
1493}
1494
1495static void
1496bfa_fcs_lport_fdmi_sm_online(struct bfa_fcs_lport_fdmi_s *fdmi,
1497 enum port_fdmi_event event)
1498{
1499 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1500
1501 bfa_trc(port->fcs, port->port_cfg.pwwn);
1502 bfa_trc(port->fcs, event);
1503
1504 switch (event) {
1505 case FDMISM_EVENT_PORT_OFFLINE:
1506 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
1507 break;
1508
1509 default:
1510 bfa_sm_fault(port->fcs, event);
1511 }
1512}
1513/**
1514 * FDMI is disabled state.
1515 */
1516static void
1517bfa_fcs_lport_fdmi_sm_disabled(struct bfa_fcs_lport_fdmi_s *fdmi,
1518 enum port_fdmi_event event)
1519{
1520 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1521
1522 bfa_trc(port->fcs, port->port_cfg.pwwn);
1523 bfa_trc(port->fcs, event);
1524
1525 /* No op State. It can only be enabled at Driver Init. */
1526}
1527
1528/**
1529* RHBA : Register HBA Attributes.
1530 */
1531static void
1532bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1533{
1534 struct bfa_fcs_lport_fdmi_s *fdmi = fdmi_cbarg;
1535 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1536 struct fchs_s fchs;
1537 int len, attr_len;
1538 struct bfa_fcxp_s *fcxp;
1539 u8 *pyld;
1540
1541 bfa_trc(port->fcs, port->port_cfg.pwwn);
1542
1543 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
1544 if (!fcxp) {
1545 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
1546 bfa_fcs_lport_fdmi_send_rhba, fdmi);
1547 return;
1548 }
1549 fdmi->fcxp = fcxp;
1550
1551 pyld = bfa_fcxp_get_reqbuf(fcxp);
1552 bfa_os_memset(pyld, 0, FC_MAX_PDUSZ);
1553
1554 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
1555 FDMI_RHBA);
1556
1557 attr_len =
1558 bfa_fcs_lport_fdmi_build_rhba_pyld(fdmi,
1559 (u8 *) ((struct ct_hdr_s *) pyld
1560 + 1));
1561
1562 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1563 FC_CLASS_3, (len + attr_len), &fchs,
1564 bfa_fcs_lport_fdmi_rhba_response, (void *)fdmi,
1565 FC_MAX_PDUSZ, FC_FCCT_TOV);
1566
1567 bfa_sm_send_event(fdmi, FDMISM_EVENT_RHBA_SENT);
1568}
1569
1570static u16
1571bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1572{
1573 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1574 struct bfa_fcs_fdmi_hba_attr_s hba_attr;
1575 struct bfa_fcs_fdmi_hba_attr_s *fcs_hba_attr = &hba_attr;
1576 struct fdmi_rhba_s *rhba = (struct fdmi_rhba_s *) pyld;
1577 struct fdmi_attr_s *attr;
1578 u8 *curr_ptr;
1579 u16 len, count;
1580
1581 /*
1582 * get hba attributes
1583 */
1584 bfa_fcs_fdmi_get_hbaattr(fdmi, fcs_hba_attr);
1585
1586 rhba->hba_id = bfa_fcs_lport_get_pwwn(port);
1587 rhba->port_list.num_ports = bfa_os_htonl(1);
1588 rhba->port_list.port_entry = bfa_fcs_lport_get_pwwn(port);
1589
1590 len = sizeof(rhba->hba_id) + sizeof(rhba->port_list);
1591
1592 count = 0;
1593 len += sizeof(rhba->hba_attr_blk.attr_count);
1594
1595 /*
1596 * fill out the invididual entries of the HBA attrib Block
1597 */
1598 curr_ptr = (u8 *) &rhba->hba_attr_blk.hba_attr;
1599
1600 /*
1601 * Node Name
1602 */
1603 attr = (struct fdmi_attr_s *) curr_ptr;
1604 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_NODENAME);
1605 attr->len = sizeof(wwn_t);
1606 memcpy(attr->value, &bfa_fcs_lport_get_nwwn(port), attr->len);
1607 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1608 len += attr->len;
1609 count++;
1610 attr->len =
1611 bfa_os_htons(attr->len + sizeof(attr->type) +
1612 sizeof(attr->len));
1613
1614 /*
1615 * Manufacturer
1616 */
1617 attr = (struct fdmi_attr_s *) curr_ptr;
1618 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MANUFACTURER);
1619 attr->len = (u16) strlen(fcs_hba_attr->manufacturer);
1620 memcpy(attr->value, fcs_hba_attr->manufacturer, attr->len);
1621 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
1622 *fields need
1623 *to be 4 byte
1624 *aligned */
1625 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1626 len += attr->len;
1627 count++;
1628 attr->len =
1629 bfa_os_htons(attr->len + sizeof(attr->type) +
1630 sizeof(attr->len));
1631
1632 /*
1633 * Serial Number
1634 */
1635 attr = (struct fdmi_attr_s *) curr_ptr;
1636 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_SERIALNUM);
1637 attr->len = (u16) strlen(fcs_hba_attr->serial_num);
1638 memcpy(attr->value, fcs_hba_attr->serial_num, attr->len);
1639 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
1640 *fields need
1641 *to be 4 byte
1642 *aligned */
1643 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1644 len += attr->len;
1645 count++;
1646 attr->len =
1647 bfa_os_htons(attr->len + sizeof(attr->type) +
1648 sizeof(attr->len));
1649
1650 /*
1651 * Model
1652 */
1653 attr = (struct fdmi_attr_s *) curr_ptr;
1654 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL);
1655 attr->len = (u16) strlen(fcs_hba_attr->model);
1656 memcpy(attr->value, fcs_hba_attr->model, attr->len);
1657 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
1658 *fields need
1659 *to be 4 byte
1660 *aligned */
1661 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1662 len += attr->len;
1663 count++;
1664 attr->len =
1665 bfa_os_htons(attr->len + sizeof(attr->type) +
1666 sizeof(attr->len));
1667
1668 /*
1669 * Model Desc
1670 */
1671 attr = (struct fdmi_attr_s *) curr_ptr;
1672 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL_DESC);
1673 attr->len = (u16) strlen(fcs_hba_attr->model_desc);
1674 memcpy(attr->value, fcs_hba_attr->model_desc, attr->len);
1675 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
1676 *fields need
1677 *to be 4 byte
1678 *aligned */
1679 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1680 len += attr->len;
1681 count++;
1682 attr->len =
1683 bfa_os_htons(attr->len + sizeof(attr->type) +
1684 sizeof(attr->len));
1685
1686 /*
1687 * H/W Version
1688 */
1689 if (fcs_hba_attr->hw_version[0] != '\0') {
1690 attr = (struct fdmi_attr_s *) curr_ptr;
1691 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_HW_VERSION);
1692 attr->len = (u16) strlen(fcs_hba_attr->hw_version);
1693 memcpy(attr->value, fcs_hba_attr->hw_version, attr->len);
1694 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
1695 *fields need
1696 *to be 4 byte
1697 *aligned */
1698 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1699 len += attr->len;
1700 count++;
1701 attr->len =
1702 bfa_os_htons(attr->len + sizeof(attr->type) +
1703 sizeof(attr->len));
1704 }
1705
1706 /*
1707 * Driver Version
1708 */
1709 attr = (struct fdmi_attr_s *) curr_ptr;
1710 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_DRIVER_VERSION);
1711 attr->len = (u16) strlen(fcs_hba_attr->driver_version);
1712 memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
1713 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
1714 *fields need
1715 *to be 4 byte
1716 *aligned */
1717 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1718 len += attr->len;;
1719 count++;
1720 attr->len =
1721 bfa_os_htons(attr->len + sizeof(attr->type) +
1722 sizeof(attr->len));
1723
1724 /*
1725 * Option Rom Version
1726 */
1727 if (fcs_hba_attr->option_rom_ver[0] != '\0') {
1728 attr = (struct fdmi_attr_s *) curr_ptr;
1729 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_ROM_VERSION);
1730 attr->len = (u16) strlen(fcs_hba_attr->option_rom_ver);
1731 memcpy(attr->value, fcs_hba_attr->option_rom_ver, attr->len);
1732 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
1733 *fields need
1734 *to be 4 byte
1735 *aligned */
1736 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1737 len += attr->len;
1738 count++;
1739 attr->len =
1740 bfa_os_htons(attr->len + sizeof(attr->type) +
1741 sizeof(attr->len));
1742 }
1743
1744 /*
1745 * f/w Version = driver version
1746 */
1747 attr = (struct fdmi_attr_s *) curr_ptr;
1748 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_FW_VERSION);
1749 attr->len = (u16) strlen(fcs_hba_attr->driver_version);
1750 memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
1751 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
1752 *fields need
1753 *to be 4 byte
1754 *aligned */
1755 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1756 len += attr->len;
1757 count++;
1758 attr->len =
1759 bfa_os_htons(attr->len + sizeof(attr->type) +
1760 sizeof(attr->len));
1761
1762 /*
1763 * OS Name
1764 */
1765 if (fcs_hba_attr->os_name[0] != '\0') {
1766 attr = (struct fdmi_attr_s *) curr_ptr;
1767 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_OS_NAME);
1768 attr->len = (u16) strlen(fcs_hba_attr->os_name);
1769 memcpy(attr->value, fcs_hba_attr->os_name, attr->len);
1770 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
1771 *fields need
1772 *to be 4 byte
1773 *aligned */
1774 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1775 len += attr->len;
1776 count++;
1777 attr->len =
1778 bfa_os_htons(attr->len + sizeof(attr->type) +
1779 sizeof(attr->len));
1780 }
1781
1782 /*
1783 * MAX_CT_PAYLOAD
1784 */
1785 attr = (struct fdmi_attr_s *) curr_ptr;
1786 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MAX_CT);
1787 attr->len = sizeof(fcs_hba_attr->max_ct_pyld);
1788 memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, attr->len);
1789 len += attr->len;
1790 count++;
1791 attr->len =
1792 bfa_os_htons(attr->len + sizeof(attr->type) +
1793 sizeof(attr->len));
1794
1795 /*
1796 * Update size of payload
1797 */
1798 len += ((sizeof(attr->type) +
1799 sizeof(attr->len)) * count);
1800
1801 rhba->hba_attr_blk.attr_count = bfa_os_htonl(count);
1802 return len;
1803}
1804
1805static void
1806bfa_fcs_lport_fdmi_rhba_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
1807 void *cbarg, bfa_status_t req_status,
1808 u32 rsp_len, u32 resid_len,
1809 struct fchs_s *rsp_fchs)
1810{
1811 struct bfa_fcs_lport_fdmi_s *fdmi =
1812 (struct bfa_fcs_lport_fdmi_s *) cbarg;
1813 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1814 struct ct_hdr_s *cthdr = NULL;
1815
1816 bfa_trc(port->fcs, port->port_cfg.pwwn);
1817
1818 /*
1819 * Sanity Checks
1820 */
1821 if (req_status != BFA_STATUS_OK) {
1822 bfa_trc(port->fcs, req_status);
1823 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
1824 return;
1825 }
1826
1827 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
1828 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
1829
1830 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
1831 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
1832 return;
1833 }
1834
1835 bfa_trc(port->fcs, cthdr->reason_code);
1836 bfa_trc(port->fcs, cthdr->exp_code);
1837 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
1838}
1839
1840/**
1841* RPRT : Register Port
1842 */
1843static void
1844bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1845{
1846 struct bfa_fcs_lport_fdmi_s *fdmi = fdmi_cbarg;
1847 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1848 struct fchs_s fchs;
1849 u16 len, attr_len;
1850 struct bfa_fcxp_s *fcxp;
1851 u8 *pyld;
1852
1853 bfa_trc(port->fcs, port->port_cfg.pwwn);
1854
1855 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
1856 if (!fcxp) {
1857 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
1858 bfa_fcs_lport_fdmi_send_rprt, fdmi);
1859 return;
1860 }
1861 fdmi->fcxp = fcxp;
1862
1863 pyld = bfa_fcxp_get_reqbuf(fcxp);
1864 bfa_os_memset(pyld, 0, FC_MAX_PDUSZ);
1865
1866 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
1867 FDMI_RPRT);
1868
1869 attr_len =
1870 bfa_fcs_lport_fdmi_build_rprt_pyld(fdmi,
1871 (u8 *) ((struct ct_hdr_s *) pyld
1872 + 1));
1873
1874 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1875 FC_CLASS_3, len + attr_len, &fchs,
1876 bfa_fcs_lport_fdmi_rprt_response, (void *)fdmi,
1877 FC_MAX_PDUSZ, FC_FCCT_TOV);
1878
1879 bfa_sm_send_event(fdmi, FDMISM_EVENT_RPRT_SENT);
1880}
1881
1882/**
1883 * This routine builds Port Attribute Block that used in RPA, RPRT commands.
1884 */
1885static u16
1886bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
1887 u8 *pyld)
1888{
1889 struct bfa_fcs_fdmi_port_attr_s fcs_port_attr;
1890 struct fdmi_port_attr_s *port_attrib = (struct fdmi_port_attr_s *) pyld;
1891 struct fdmi_attr_s *attr;
1892 u8 *curr_ptr;
1893 u16 len;
1894 u8 count = 0;
1895
1896 /*
1897 * get port attributes
1898 */
1899 bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr);
1900
1901 len = sizeof(port_attrib->attr_count);
1902
1903 /*
1904 * fill out the invididual entries
1905 */
1906 curr_ptr = (u8 *) &port_attrib->port_attr;
1907
1908 /*
1909 * FC4 Types
1910 */
1911 attr = (struct fdmi_attr_s *) curr_ptr;
1912 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FC4_TYPES);
1913 attr->len = sizeof(fcs_port_attr.supp_fc4_types);
1914 memcpy(attr->value, fcs_port_attr.supp_fc4_types, attr->len);
1915 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1916 len += attr->len;
1917 ++count;
1918 attr->len =
1919 bfa_os_htons(attr->len + sizeof(attr->type) +
1920 sizeof(attr->len));
1921
1922 /*
1923 * Supported Speed
1924 */
1925 attr = (struct fdmi_attr_s *) curr_ptr;
1926 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_SUPP_SPEED);
1927 attr->len = sizeof(fcs_port_attr.supp_speed);
1928 memcpy(attr->value, &fcs_port_attr.supp_speed, attr->len);
1929 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1930 len += attr->len;
1931 ++count;
1932 attr->len =
1933 bfa_os_htons(attr->len + sizeof(attr->type) +
1934 sizeof(attr->len));
1935
1936 /*
1937 * current Port Speed
1938 */
1939 attr = (struct fdmi_attr_s *) curr_ptr;
1940 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_PORT_SPEED);
1941 attr->len = sizeof(fcs_port_attr.curr_speed);
1942 memcpy(attr->value, &fcs_port_attr.curr_speed, attr->len);
1943 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1944 len += attr->len;
1945 ++count;
1946 attr->len =
1947 bfa_os_htons(attr->len + sizeof(attr->type) +
1948 sizeof(attr->len));
1949
1950 /*
1951 * max frame size
1952 */
1953 attr = (struct fdmi_attr_s *) curr_ptr;
1954 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FRAME_SIZE);
1955 attr->len = sizeof(fcs_port_attr.max_frm_size);
1956 memcpy(attr->value, &fcs_port_attr.max_frm_size, attr->len);
1957 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1958 len += attr->len;
1959 ++count;
1960 attr->len =
1961 bfa_os_htons(attr->len + sizeof(attr->type) +
1962 sizeof(attr->len));
1963
1964 /*
1965 * OS Device Name
1966 */
1967 if (fcs_port_attr.os_device_name[0] != '\0') {
1968 attr = (struct fdmi_attr_s *) curr_ptr;
1969 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_DEV_NAME);
1970 attr->len = (u16) strlen(fcs_port_attr.os_device_name);
1971 memcpy(attr->value, fcs_port_attr.os_device_name, attr->len);
1972 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
1973 *fields need
1974 *to be 4 byte
1975 *aligned */
1976 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1977 len += attr->len;
1978 ++count;
1979 attr->len =
1980 bfa_os_htons(attr->len + sizeof(attr->type) +
1981 sizeof(attr->len));
1982 }
1983 /*
1984 * Host Name
1985 */
1986 if (fcs_port_attr.host_name[0] != '\0') {
1987 attr = (struct fdmi_attr_s *) curr_ptr;
1988 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_HOST_NAME);
1989 attr->len = (u16) strlen(fcs_port_attr.host_name);
1990 memcpy(attr->value, fcs_port_attr.host_name, attr->len);
1991 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
1992 *fields need
1993 *to be 4 byte
1994 *aligned */
1995 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1996 len += attr->len;
1997 ++count;
1998 attr->len =
1999 bfa_os_htons(attr->len + sizeof(attr->type) +
2000 sizeof(attr->len));
2001 }
2002
2003 /*
2004 * Update size of payload
2005 */
2006 port_attrib->attr_count = bfa_os_htonl(count);
2007 len += ((sizeof(attr->type) +
2008 sizeof(attr->len)) * count);
2009 return len;
2010}
2011
2012static u16
2013bfa_fcs_lport_fdmi_build_rprt_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
2014{
2015 struct bfa_fcs_lport_s *port = fdmi->ms->port;
2016 struct fdmi_rprt_s *rprt = (struct fdmi_rprt_s *) pyld;
2017 u16 len;
2018
2019 rprt->hba_id = bfa_fcs_lport_get_pwwn(bfa_fcs_get_base_port(port->fcs));
2020 rprt->port_name = bfa_fcs_lport_get_pwwn(port);
2021
2022 len = bfa_fcs_lport_fdmi_build_portattr_block(fdmi,
2023 (u8 *) &rprt->port_attr_blk);
2024
2025 len += sizeof(rprt->hba_id) + sizeof(rprt->port_name);
2026
2027 return len;
2028}
2029
2030static void
2031bfa_fcs_lport_fdmi_rprt_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
2032 void *cbarg, bfa_status_t req_status,
2033 u32 rsp_len, u32 resid_len,
2034 struct fchs_s *rsp_fchs)
2035{
2036 struct bfa_fcs_lport_fdmi_s *fdmi =
2037 (struct bfa_fcs_lport_fdmi_s *) cbarg;
2038 struct bfa_fcs_lport_s *port = fdmi->ms->port;
2039 struct ct_hdr_s *cthdr = NULL;
2040
2041 bfa_trc(port->fcs, port->port_cfg.pwwn);
2042
2043 /*
2044 * Sanity Checks
2045 */
2046 if (req_status != BFA_STATUS_OK) {
2047 bfa_trc(port->fcs, req_status);
2048 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
2049 return;
2050 }
2051
2052 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
2053 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
2054
2055 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
2056 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
2057 return;
2058 }
2059
2060 bfa_trc(port->fcs, cthdr->reason_code);
2061 bfa_trc(port->fcs, cthdr->exp_code);
2062 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
2063}
2064
2065/**
2066* RPA : Register Port Attributes.
2067 */
2068static void
2069bfa_fcs_lport_fdmi_send_rpa(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
2070{
2071 struct bfa_fcs_lport_fdmi_s *fdmi = fdmi_cbarg;
2072 struct bfa_fcs_lport_s *port = fdmi->ms->port;
2073 struct fchs_s fchs;
2074 u16 len, attr_len;
2075 struct bfa_fcxp_s *fcxp;
2076 u8 *pyld;
2077
2078 bfa_trc(port->fcs, port->port_cfg.pwwn);
2079
2080 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
2081 if (!fcxp) {
2082 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
2083 bfa_fcs_lport_fdmi_send_rpa, fdmi);
2084 return;
2085 }
2086 fdmi->fcxp = fcxp;
2087
2088 pyld = bfa_fcxp_get_reqbuf(fcxp);
2089 bfa_os_memset(pyld, 0, FC_MAX_PDUSZ);
2090
2091 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
2092 FDMI_RPA);
2093
2094 attr_len =
2095 bfa_fcs_lport_fdmi_build_rpa_pyld(fdmi,
2096 (u8 *) ((struct ct_hdr_s *) pyld
2097 + 1));
2098
2099 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
2100 FC_CLASS_3, len + attr_len, &fchs,
2101 bfa_fcs_lport_fdmi_rpa_response, (void *)fdmi,
2102 FC_MAX_PDUSZ, FC_FCCT_TOV);
2103
2104 bfa_sm_send_event(fdmi, FDMISM_EVENT_RPA_SENT);
2105}
2106
2107static u16
2108bfa_fcs_lport_fdmi_build_rpa_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
2109{
2110 struct bfa_fcs_lport_s *port = fdmi->ms->port;
2111 struct fdmi_rpa_s *rpa = (struct fdmi_rpa_s *) pyld;
2112 u16 len;
2113
2114 rpa->port_name = bfa_fcs_lport_get_pwwn(port);
2115
2116 len = bfa_fcs_lport_fdmi_build_portattr_block(fdmi,
2117 (u8 *) &rpa->port_attr_blk);
2118
2119 len += sizeof(rpa->port_name);
2120
2121 return len;
2122}
2123
2124static void
2125bfa_fcs_lport_fdmi_rpa_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
2126 void *cbarg, bfa_status_t req_status, u32 rsp_len,
2127 u32 resid_len, struct fchs_s *rsp_fchs)
2128{
2129 struct bfa_fcs_lport_fdmi_s *fdmi =
2130 (struct bfa_fcs_lport_fdmi_s *) cbarg;
2131 struct bfa_fcs_lport_s *port = fdmi->ms->port;
2132 struct ct_hdr_s *cthdr = NULL;
2133
2134 bfa_trc(port->fcs, port->port_cfg.pwwn);
2135
2136 /*
2137 * Sanity Checks
2138 */
2139 if (req_status != BFA_STATUS_OK) {
2140 bfa_trc(port->fcs, req_status);
2141 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
2142 return;
2143 }
2144
2145 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
2146 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
2147
2148 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
2149 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
2150 return;
2151 }
2152
2153 bfa_trc(port->fcs, cthdr->reason_code);
2154 bfa_trc(port->fcs, cthdr->exp_code);
2155 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
2156}
2157
2158static void
2159bfa_fcs_lport_fdmi_timeout(void *arg)
2160{
2161 struct bfa_fcs_lport_fdmi_s *fdmi = (struct bfa_fcs_lport_fdmi_s *) arg;
2162
2163 bfa_sm_send_event(fdmi, FDMISM_EVENT_TIMEOUT);
2164}
2165
2166void
2167bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
2168 struct bfa_fcs_fdmi_hba_attr_s *hba_attr)
2169{
2170 struct bfa_fcs_lport_s *port = fdmi->ms->port;
2171 struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info;
2172
2173 bfa_os_memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s));
2174
2175 bfa_ioc_get_adapter_manufacturer(&port->fcs->bfa->ioc,
2176 hba_attr->manufacturer);
2177 bfa_ioc_get_adapter_serial_num(&port->fcs->bfa->ioc,
2178 hba_attr->serial_num);
2179 bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc,
2180 hba_attr->model);
2181 bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc,
2182 hba_attr->model_desc);
2183 bfa_ioc_get_pci_chip_rev(&port->fcs->bfa->ioc,
2184 hba_attr->hw_version);
2185 bfa_ioc_get_adapter_optrom_ver(&port->fcs->bfa->ioc,
2186 hba_attr->option_rom_ver);
2187 bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc,
2188 hba_attr->fw_version);
2189
2190 strncpy(hba_attr->driver_version, (char *)driver_info->version,
2191 sizeof(hba_attr->driver_version));
2192
2193 strncpy(hba_attr->os_name, driver_info->host_os_name,
2194 sizeof(hba_attr->os_name));
2195
2196 /*
2197 * If there is a patch level, append it
2198 * to the os name along with a separator
2199 */
2200 if (driver_info->host_os_patch[0] != '\0') {
2201 strncat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
2202 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
2203 strncat(hba_attr->os_name, driver_info->host_os_patch,
2204 sizeof(driver_info->host_os_patch));
2205 }
2206
2207 hba_attr->max_ct_pyld = bfa_os_htonl(FC_MAX_PDUSZ);
2208}
2209
2210void
2211bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
2212 struct bfa_fcs_fdmi_port_attr_s *port_attr)
2213{
2214 struct bfa_fcs_lport_s *port = fdmi->ms->port;
2215 struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info;
2216 struct bfa_port_attr_s pport_attr;
2217
2218 bfa_os_memset(port_attr, 0, sizeof(struct bfa_fcs_fdmi_port_attr_s));
2219
2220 /*
2221 * get pport attributes from hal
2222 */
2223 bfa_fcport_get_attr(port->fcs->bfa, &pport_attr);
2224
2225 /*
2226 * get FC4 type Bitmask
2227 */
2228 fc_get_fc4type_bitmask(FC_TYPE_FCP, port_attr->supp_fc4_types);
2229
2230 /*
2231 * Supported Speeds
2232 */
2233 port_attr->supp_speed = bfa_os_htonl(BFA_FCS_FDMI_SUPORTED_SPEEDS);
2234
2235 /*
2236 * Current Speed
2237 */
2238 port_attr->curr_speed = bfa_os_htonl(pport_attr.speed);
2239
2240 /*
2241 * Max PDU Size.
2242 */
2243 port_attr->max_frm_size = bfa_os_htonl(FC_MAX_PDUSZ);
2244
2245 /*
2246 * OS device Name
2247 */
2248 strncpy(port_attr->os_device_name, (char *)driver_info->os_device_name,
2249 sizeof(port_attr->os_device_name));
2250
2251 /*
2252 * Host name
2253 */
2254 strncpy(port_attr->host_name, (char *)driver_info->host_machine_name,
2255 sizeof(port_attr->host_name));
2256
2257}
2258
2259
2260void
2261bfa_fcs_lport_fdmi_init(struct bfa_fcs_lport_ms_s *ms)
2262{
2263 struct bfa_fcs_lport_fdmi_s *fdmi = &ms->fdmi;
2264
2265 fdmi->ms = ms;
2266 if (ms->port->fcs->fdmi_enabled)
2267 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
2268 else
2269 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_disabled);
2270}
2271
2272void
2273bfa_fcs_lport_fdmi_offline(struct bfa_fcs_lport_ms_s *ms)
2274{
2275 struct bfa_fcs_lport_fdmi_s *fdmi = &ms->fdmi;
2276
2277 fdmi->ms = ms;
2278 bfa_sm_send_event(fdmi, FDMISM_EVENT_PORT_OFFLINE);
2279}
2280
2281void
2282bfa_fcs_lport_fdmi_online(struct bfa_fcs_lport_ms_s *ms)
2283{
2284 struct bfa_fcs_lport_fdmi_s *fdmi = &ms->fdmi;
2285
2286 fdmi->ms = ms;
2287 bfa_sm_send_event(fdmi, FDMISM_EVENT_PORT_ONLINE);
2288}
2289
2290#define BFA_FCS_MS_CMD_MAX_RETRIES 2
2291
2292/*
2293 * forward declarations
2294 */
2295static void bfa_fcs_lport_ms_send_plogi(void *ms_cbarg,
2296 struct bfa_fcxp_s *fcxp_alloced);
2297static void bfa_fcs_lport_ms_timeout(void *arg);
2298static void bfa_fcs_lport_ms_plogi_response(void *fcsarg,
2299 struct bfa_fcxp_s *fcxp,
2300 void *cbarg,
2301 bfa_status_t req_status,
2302 u32 rsp_len,
2303 u32 resid_len,
2304 struct fchs_s *rsp_fchs);
2305
2306static void bfa_fcs_lport_ms_send_gmal(void *ms_cbarg,
2307 struct bfa_fcxp_s *fcxp_alloced);
2308static void bfa_fcs_lport_ms_gmal_response(void *fcsarg,
2309 struct bfa_fcxp_s *fcxp,
2310 void *cbarg,
2311 bfa_status_t req_status,
2312 u32 rsp_len,
2313 u32 resid_len,
2314 struct fchs_s *rsp_fchs);
2315static void bfa_fcs_lport_ms_send_gfn(void *ms_cbarg,
2316 struct bfa_fcxp_s *fcxp_alloced);
2317static void bfa_fcs_lport_ms_gfn_response(void *fcsarg,
2318 struct bfa_fcxp_s *fcxp,
2319 void *cbarg,
2320 bfa_status_t req_status,
2321 u32 rsp_len,
2322 u32 resid_len,
2323 struct fchs_s *rsp_fchs);
2324/**
2325 * fcs_ms_sm FCS MS state machine
2326 */
2327
2328/**
2329 * MS State Machine events
2330 */
2331enum port_ms_event {
2332 MSSM_EVENT_PORT_ONLINE = 1,
2333 MSSM_EVENT_PORT_OFFLINE = 2,
2334 MSSM_EVENT_RSP_OK = 3,
2335 MSSM_EVENT_RSP_ERROR = 4,
2336 MSSM_EVENT_TIMEOUT = 5,
2337 MSSM_EVENT_FCXP_SENT = 6,
2338 MSSM_EVENT_PORT_FABRIC_RSCN = 7
2339};
2340
2341static void bfa_fcs_lport_ms_sm_offline(struct bfa_fcs_lport_ms_s *ms,
2342 enum port_ms_event event);
2343static void bfa_fcs_lport_ms_sm_plogi_sending(struct bfa_fcs_lport_ms_s *ms,
2344 enum port_ms_event event);
2345static void bfa_fcs_lport_ms_sm_plogi(struct bfa_fcs_lport_ms_s *ms,
2346 enum port_ms_event event);
2347static void bfa_fcs_lport_ms_sm_plogi_retry(struct bfa_fcs_lport_ms_s *ms,
2348 enum port_ms_event event);
2349static void bfa_fcs_lport_ms_sm_gmal_sending(struct bfa_fcs_lport_ms_s *ms,
2350 enum port_ms_event event);
2351static void bfa_fcs_lport_ms_sm_gmal(struct bfa_fcs_lport_ms_s *ms,
2352 enum port_ms_event event);
2353static void bfa_fcs_lport_ms_sm_gmal_retry(struct bfa_fcs_lport_ms_s *ms,
2354 enum port_ms_event event);
2355static void bfa_fcs_lport_ms_sm_gfn_sending(struct bfa_fcs_lport_ms_s *ms,
2356 enum port_ms_event event);
2357static void bfa_fcs_lport_ms_sm_gfn(struct bfa_fcs_lport_ms_s *ms,
2358 enum port_ms_event event);
2359static void bfa_fcs_lport_ms_sm_gfn_retry(struct bfa_fcs_lport_ms_s *ms,
2360 enum port_ms_event event);
2361static void bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms,
2362 enum port_ms_event event);
2363/**
2364 * Start in offline state - awaiting NS to send start.
2365 */
2366static void
2367bfa_fcs_lport_ms_sm_offline(struct bfa_fcs_lport_ms_s *ms,
2368 enum port_ms_event event)
2369{
2370 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
2371 bfa_trc(ms->port->fcs, event);
2372
2373 switch (event) {
2374 case MSSM_EVENT_PORT_ONLINE:
2375 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi_sending);
2376 bfa_fcs_lport_ms_send_plogi(ms, NULL);
2377 break;
2378
2379 case MSSM_EVENT_PORT_OFFLINE:
2380 break;
2381
2382 default:
2383 bfa_sm_fault(ms->port->fcs, event);
2384 }
2385}
2386
2387static void
2388bfa_fcs_lport_ms_sm_plogi_sending(struct bfa_fcs_lport_ms_s *ms,
2389 enum port_ms_event event)
2390{
2391 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
2392 bfa_trc(ms->port->fcs, event);
2393
2394 switch (event) {
2395 case MSSM_EVENT_FCXP_SENT:
2396 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi);
2397 break;
2398
2399 case MSSM_EVENT_PORT_OFFLINE:
2400 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
2401 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
2402 &ms->fcxp_wqe);
2403 break;
2404
2405 default:
2406 bfa_sm_fault(ms->port->fcs, event);
2407 }
2408}
2409
2410static void
2411bfa_fcs_lport_ms_sm_plogi(struct bfa_fcs_lport_ms_s *ms,
2412 enum port_ms_event event)
2413{
2414 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
2415 bfa_trc(ms->port->fcs, event);
2416
2417 switch (event) {
2418 case MSSM_EVENT_RSP_ERROR:
2419 /*
2420 * Start timer for a delayed retry
2421 */
2422 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi_retry);
2423 ms->port->stats.ms_retries++;
2424 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
2425 &ms->timer, bfa_fcs_lport_ms_timeout, ms,
2426 BFA_FCS_RETRY_TIMEOUT);
2427 break;
2428
2429 case MSSM_EVENT_RSP_OK:
2430 /*
2431 * since plogi is done, now invoke MS related sub-modules
2432 */
2433 bfa_fcs_lport_fdmi_online(ms);
2434
2435 /**
2436 * if this is a Vport, go to online state.
2437 */
2438 if (ms->port->vport) {
2439 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_online);
2440 break;
2441 }
2442
2443 /*
2444 * For a base port we need to get the
2445 * switch's IP address.
2446 */
2447 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal_sending);
2448 bfa_fcs_lport_ms_send_gmal(ms, NULL);
2449 break;
2450
2451 case MSSM_EVENT_PORT_OFFLINE:
2452 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
2453 bfa_fcxp_discard(ms->fcxp);
2454 break;
2455
2456 default:
2457 bfa_sm_fault(ms->port->fcs, event);
2458 }
2459}
2460
2461static void
2462bfa_fcs_lport_ms_sm_plogi_retry(struct bfa_fcs_lport_ms_s *ms,
2463 enum port_ms_event event)
2464{
2465 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
2466 bfa_trc(ms->port->fcs, event);
2467
2468 switch (event) {
2469 case MSSM_EVENT_TIMEOUT:
2470 /*
2471 * Retry Timer Expired. Re-send
2472 */
2473 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi_sending);
2474 bfa_fcs_lport_ms_send_plogi(ms, NULL);
2475 break;
2476
2477 case MSSM_EVENT_PORT_OFFLINE:
2478 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
2479 bfa_timer_stop(&ms->timer);
2480 break;
2481
2482 default:
2483 bfa_sm_fault(ms->port->fcs, event);
2484 }
2485}
2486
2487static void
2488bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms,
2489 enum port_ms_event event)
2490{
2491 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
2492 bfa_trc(ms->port->fcs, event);
2493
2494 switch (event) {
2495 case MSSM_EVENT_PORT_OFFLINE:
2496 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
2497 break;
2498
2499 case MSSM_EVENT_PORT_FABRIC_RSCN:
2500 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending);
2501 ms->retry_cnt = 0;
2502 bfa_fcs_lport_ms_send_gfn(ms, NULL);
2503 break;
2504
2505 default:
2506 bfa_sm_fault(ms->port->fcs, event);
2507 }
2508}
2509
2510static void
2511bfa_fcs_lport_ms_sm_gmal_sending(struct bfa_fcs_lport_ms_s *ms,
2512 enum port_ms_event event)
2513{
2514 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
2515 bfa_trc(ms->port->fcs, event);
2516
2517 switch (event) {
2518 case MSSM_EVENT_FCXP_SENT:
2519 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal);
2520 break;
2521
2522 case MSSM_EVENT_PORT_OFFLINE:
2523 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
2524 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
2525 &ms->fcxp_wqe);
2526 break;
2527
2528 default:
2529 bfa_sm_fault(ms->port->fcs, event);
2530 }
2531}
2532
2533static void
2534bfa_fcs_lport_ms_sm_gmal(struct bfa_fcs_lport_ms_s *ms,
2535 enum port_ms_event event)
2536{
2537 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
2538 bfa_trc(ms->port->fcs, event);
2539
2540 switch (event) {
2541 case MSSM_EVENT_RSP_ERROR:
2542 /*
2543 * Start timer for a delayed retry
2544 */
2545 if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) {
2546 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal_retry);
2547 ms->port->stats.ms_retries++;
2548 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
2549 &ms->timer, bfa_fcs_lport_ms_timeout, ms,
2550 BFA_FCS_RETRY_TIMEOUT);
2551 } else {
2552 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending);
2553 bfa_fcs_lport_ms_send_gfn(ms, NULL);
2554 ms->retry_cnt = 0;
2555 }
2556 break;
2557
2558 case MSSM_EVENT_RSP_OK:
2559 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending);
2560 bfa_fcs_lport_ms_send_gfn(ms, NULL);
2561 break;
2562
2563 case MSSM_EVENT_PORT_OFFLINE:
2564 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
2565 bfa_fcxp_discard(ms->fcxp);
2566 break;
2567
2568 default:
2569 bfa_sm_fault(ms->port->fcs, event);
2570 }
2571}
2572
2573static void
2574bfa_fcs_lport_ms_sm_gmal_retry(struct bfa_fcs_lport_ms_s *ms,
2575 enum port_ms_event event)
2576{
2577 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
2578 bfa_trc(ms->port->fcs, event);
2579
2580 switch (event) {
2581 case MSSM_EVENT_TIMEOUT:
2582 /*
2583 * Retry Timer Expired. Re-send
2584 */
2585 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal_sending);
2586 bfa_fcs_lport_ms_send_gmal(ms, NULL);
2587 break;
2588
2589 case MSSM_EVENT_PORT_OFFLINE:
2590 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
2591 bfa_timer_stop(&ms->timer);
2592 break;
2593
2594 default:
2595 bfa_sm_fault(ms->port->fcs, event);
2596 }
2597}
2598/**
2599 * ms_pvt MS local functions
2600 */
2601
2602static void
2603bfa_fcs_lport_ms_send_gmal(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
2604{
2605 struct bfa_fcs_lport_ms_s *ms = ms_cbarg;
2606 bfa_fcs_lport_t *port = ms->port;
2607 struct fchs_s fchs;
2608 int len;
2609 struct bfa_fcxp_s *fcxp;
2610
2611 bfa_trc(port->fcs, port->pid);
2612
2613 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
2614 if (!fcxp) {
2615 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
2616 bfa_fcs_lport_ms_send_gmal, ms);
2617 return;
2618 }
2619 ms->fcxp = fcxp;
2620
2621 len = fc_gmal_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
2622 bfa_fcs_lport_get_fcid(port),
2623 bfa_lps_get_peer_nwwn(port->fabric->lps));
2624
2625 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
2626 FC_CLASS_3, len, &fchs,
2627 bfa_fcs_lport_ms_gmal_response, (void *)ms,
2628 FC_MAX_PDUSZ, FC_FCCT_TOV);
2629
2630 bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT);
2631}
2632
2633static void
2634bfa_fcs_lport_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
2635 void *cbarg, bfa_status_t req_status,
2636 u32 rsp_len, u32 resid_len,
2637 struct fchs_s *rsp_fchs)
2638{
2639 struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) cbarg;
2640 bfa_fcs_lport_t *port = ms->port;
2641 struct ct_hdr_s *cthdr = NULL;
2642 struct fcgs_gmal_resp_s *gmal_resp;
2643 struct fcgs_gmal_entry_s *gmal_entry;
2644 u32 num_entries;
2645 u8 *rsp_str;
2646
2647 bfa_trc(port->fcs, req_status);
2648 bfa_trc(port->fcs, port->port_cfg.pwwn);
2649
2650 /*
2651 * Sanity Checks
2652 */
2653 if (req_status != BFA_STATUS_OK) {
2654 bfa_trc(port->fcs, req_status);
2655 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
2656 return;
2657 }
2658
2659 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
2660 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
2661
2662 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
2663 gmal_resp = (struct fcgs_gmal_resp_s *)(cthdr + 1);
2664
2665 num_entries = bfa_os_ntohl(gmal_resp->ms_len);
2666 if (num_entries == 0) {
2667 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
2668 return;
2669 }
2670 /*
2671 * The response could contain multiple Entries.
2672 * Entries for SNMP interface, etc.
2673 * We look for the entry with a telnet prefix.
2674 * First "http://" entry refers to IP addr
2675 */
2676
2677 gmal_entry = (struct fcgs_gmal_entry_s *)gmal_resp->ms_ma;
2678 while (num_entries > 0) {
2679 if (strncmp(gmal_entry->prefix,
2680 CT_GMAL_RESP_PREFIX_HTTP,
2681 sizeof(gmal_entry->prefix)) == 0) {
2682
2683 /*
2684 * if the IP address is terminating with a '/',
2685 * remove it.
2686 * Byte 0 consists of the length of the string.
2687 */
2688 rsp_str = &(gmal_entry->prefix[0]);
2689 if (rsp_str[gmal_entry->len-1] == '/')
2690 rsp_str[gmal_entry->len-1] = 0;
2691
2692 /* copy IP Address to fabric */
2693 strncpy(bfa_fcs_lport_get_fabric_ipaddr(port),
2694 gmal_entry->ip_addr,
2695 BFA_FCS_FABRIC_IPADDR_SZ);
2696 break;
2697 } else {
2698 --num_entries;
2699 ++gmal_entry;
2700 }
2701 }
2702
2703 bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK);
2704 return;
2705 }
2706
2707 bfa_trc(port->fcs, cthdr->reason_code);
2708 bfa_trc(port->fcs, cthdr->exp_code);
2709 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
2710}
2711
2712static void
2713bfa_fcs_lport_ms_sm_gfn_sending(struct bfa_fcs_lport_ms_s *ms,
2714 enum port_ms_event event)
2715{
2716 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
2717 bfa_trc(ms->port->fcs, event);
2718
2719 switch (event) {
2720 case MSSM_EVENT_FCXP_SENT:
2721 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn);
2722 break;
2723
2724 case MSSM_EVENT_PORT_OFFLINE:
2725 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
2726 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
2727 &ms->fcxp_wqe);
2728 break;
2729
2730 default:
2731 bfa_sm_fault(ms->port->fcs, event);
2732 }
2733}
2734
2735static void
2736bfa_fcs_lport_ms_sm_gfn(struct bfa_fcs_lport_ms_s *ms,
2737 enum port_ms_event event)
2738{
2739 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
2740 bfa_trc(ms->port->fcs, event);
2741
2742 switch (event) {
2743 case MSSM_EVENT_RSP_ERROR:
2744 /*
2745 * Start timer for a delayed retry
2746 */
2747 if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) {
2748 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_retry);
2749 ms->port->stats.ms_retries++;
2750 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
2751 &ms->timer, bfa_fcs_lport_ms_timeout, ms,
2752 BFA_FCS_RETRY_TIMEOUT);
2753 } else {
2754 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_online);
2755 ms->retry_cnt = 0;
2756 }
2757 break;
2758
2759 case MSSM_EVENT_RSP_OK:
2760 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_online);
2761 break;
2762
2763 case MSSM_EVENT_PORT_OFFLINE:
2764 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
2765 bfa_fcxp_discard(ms->fcxp);
2766 break;
2767
2768 default:
2769 bfa_sm_fault(ms->port->fcs, event);
2770 }
2771}
2772
2773static void
2774bfa_fcs_lport_ms_sm_gfn_retry(struct bfa_fcs_lport_ms_s *ms,
2775 enum port_ms_event event)
2776{
2777 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
2778 bfa_trc(ms->port->fcs, event);
2779
2780 switch (event) {
2781 case MSSM_EVENT_TIMEOUT:
2782 /*
2783 * Retry Timer Expired. Re-send
2784 */
2785 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending);
2786 bfa_fcs_lport_ms_send_gfn(ms, NULL);
2787 break;
2788
2789 case MSSM_EVENT_PORT_OFFLINE:
2790 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
2791 bfa_timer_stop(&ms->timer);
2792 break;
2793
2794 default:
2795 bfa_sm_fault(ms->port->fcs, event);
2796 }
2797}
2798/**
2799 * ms_pvt MS local functions
2800 */
2801
2802static void
2803bfa_fcs_lport_ms_send_gfn(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
2804{
2805 struct bfa_fcs_lport_ms_s *ms = ms_cbarg;
2806 bfa_fcs_lport_t *port = ms->port;
2807 struct fchs_s fchs;
2808 int len;
2809 struct bfa_fcxp_s *fcxp;
2810
2811 bfa_trc(port->fcs, port->pid);
2812
2813 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
2814 if (!fcxp) {
2815 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
2816 bfa_fcs_lport_ms_send_gfn, ms);
2817 return;
2818 }
2819 ms->fcxp = fcxp;
2820
2821 len = fc_gfn_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
2822 bfa_fcs_lport_get_fcid(port),
2823 bfa_lps_get_peer_nwwn(port->fabric->lps));
2824
2825 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
2826 FC_CLASS_3, len, &fchs,
2827 bfa_fcs_lport_ms_gfn_response, (void *)ms,
2828 FC_MAX_PDUSZ, FC_FCCT_TOV);
2829
2830 bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT);
2831}
2832
2833static void
2834bfa_fcs_lport_ms_gfn_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
2835 void *cbarg, bfa_status_t req_status, u32 rsp_len,
2836 u32 resid_len, struct fchs_s *rsp_fchs)
2837{
2838 struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) cbarg;
2839 bfa_fcs_lport_t *port = ms->port;
2840 struct ct_hdr_s *cthdr = NULL;
2841 wwn_t *gfn_resp;
2842
2843 bfa_trc(port->fcs, req_status);
2844 bfa_trc(port->fcs, port->port_cfg.pwwn);
2845
2846 /*
2847 * Sanity Checks
2848 */
2849 if (req_status != BFA_STATUS_OK) {
2850 bfa_trc(port->fcs, req_status);
2851 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
2852 return;
2853 }
2854
2855 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
2856 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
2857
2858 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
2859 gfn_resp = (wwn_t *)(cthdr + 1);
2860 /* check if it has actually changed */
2861 if ((memcmp((void *)&bfa_fcs_lport_get_fabric_name(port),
2862 gfn_resp, sizeof(wwn_t)) != 0)) {
2863 bfa_fcs_fabric_set_fabric_name(port->fabric, *gfn_resp);
2864 }
2865 bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK);
2866 return;
2867 }
2868
2869 bfa_trc(port->fcs, cthdr->reason_code);
2870 bfa_trc(port->fcs, cthdr->exp_code);
2871 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
2872}
2873
2874/**
2875 * ms_pvt MS local functions
2876 */
2877
2878static void
2879bfa_fcs_lport_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
2880{
2881 struct bfa_fcs_lport_ms_s *ms = ms_cbarg;
2882 struct bfa_fcs_lport_s *port = ms->port;
2883 struct fchs_s fchs;
2884 int len;
2885 struct bfa_fcxp_s *fcxp;
2886
2887 bfa_trc(port->fcs, port->pid);
2888
2889 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
2890 if (!fcxp) {
2891 port->stats.ms_plogi_alloc_wait++;
2892 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
2893 bfa_fcs_lport_ms_send_plogi, ms);
2894 return;
2895 }
2896 ms->fcxp = fcxp;
2897
2898 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
2899 bfa_os_hton3b(FC_MGMT_SERVER),
2900 bfa_fcs_lport_get_fcid(port), 0,
2901 port->port_cfg.pwwn, port->port_cfg.nwwn,
2902 bfa_fcport_get_maxfrsize(port->fcs->bfa));
2903
2904 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
2905 FC_CLASS_3, len, &fchs,
2906 bfa_fcs_lport_ms_plogi_response, (void *)ms,
2907 FC_MAX_PDUSZ, FC_ELS_TOV);
2908
2909 port->stats.ms_plogi_sent++;
2910 bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT);
2911}
2912
2913static void
2914bfa_fcs_lport_ms_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
2915 void *cbarg, bfa_status_t req_status,
2916 u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs)
2917{
2918 struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) cbarg;
2919 struct bfa_fcs_lport_s *port = ms->port;
2920 struct fc_els_cmd_s *els_cmd;
2921 struct fc_ls_rjt_s *ls_rjt;
2922
2923 bfa_trc(port->fcs, req_status);
2924 bfa_trc(port->fcs, port->port_cfg.pwwn);
2925
2926 /*
2927 * Sanity Checks
2928 */
2929 if (req_status != BFA_STATUS_OK) {
2930 port->stats.ms_plogi_rsp_err++;
2931 bfa_trc(port->fcs, req_status);
2932 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
2933 return;
2934 }
2935
2936 els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
2937
2938 switch (els_cmd->els_code) {
2939
2940 case FC_ELS_ACC:
2941 if (rsp_len < sizeof(struct fc_logi_s)) {
2942 bfa_trc(port->fcs, rsp_len);
2943 port->stats.ms_plogi_acc_err++;
2944 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
2945 break;
2946 }
2947 port->stats.ms_plogi_accepts++;
2948 bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK);
2949 break;
2950
2951 case FC_ELS_LS_RJT:
2952 ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
2953
2954 bfa_trc(port->fcs, ls_rjt->reason_code);
2955 bfa_trc(port->fcs, ls_rjt->reason_code_expl);
2956
2957 port->stats.ms_rejects++;
2958 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
2959 break;
2960
2961 default:
2962 port->stats.ms_plogi_unknown_rsp++;
2963 bfa_trc(port->fcs, els_cmd->els_code);
2964 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
2965 }
2966}
2967
2968static void
2969bfa_fcs_lport_ms_timeout(void *arg)
2970{
2971 struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) arg;
2972
2973 ms->port->stats.ms_timeouts++;
2974 bfa_sm_send_event(ms, MSSM_EVENT_TIMEOUT);
2975}
2976
2977
2978void
2979bfa_fcs_lport_ms_init(struct bfa_fcs_lport_s *port)
2980{
2981 struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
2982
2983 ms->port = port;
2984 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
2985
2986 /*
2987 * Invoke init routines of sub modules.
2988 */
2989 bfa_fcs_lport_fdmi_init(ms);
2990}
2991
2992void
2993bfa_fcs_lport_ms_offline(struct bfa_fcs_lport_s *port)
2994{
2995 struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
2996
2997 ms->port = port;
2998 bfa_sm_send_event(ms, MSSM_EVENT_PORT_OFFLINE);
2999 bfa_fcs_lport_fdmi_offline(ms);
3000}
3001
3002void
3003bfa_fcs_lport_ms_online(struct bfa_fcs_lport_s *port)
3004{
3005 struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
3006
3007 ms->port = port;
3008 bfa_sm_send_event(ms, MSSM_EVENT_PORT_ONLINE);
3009}
3010void
3011bfa_fcs_lport_ms_fabric_rscn(struct bfa_fcs_lport_s *port)
3012{
3013 struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
3014
3015 /* todo. Handle this only when in Online state */
3016 if (bfa_sm_cmp_state(ms, bfa_fcs_lport_ms_sm_online))
3017 bfa_sm_send_event(ms, MSSM_EVENT_PORT_FABRIC_RSCN);
3018}
3019
3020/**
3021 * @page ns_sm_info VPORT NS State Machine
3022 *
3023 * @section ns_sm_interactions VPORT NS State Machine Interactions
3024 *
3025 * @section ns_sm VPORT NS State Machine
3026 * img ns_sm.jpg
3027 */
3028
3029/*
3030 * forward declarations
3031 */
3032static void bfa_fcs_lport_ns_send_plogi(void *ns_cbarg,
3033 struct bfa_fcxp_s *fcxp_alloced);
3034static void bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg,
3035 struct bfa_fcxp_s *fcxp_alloced);
3036static void bfa_fcs_lport_ns_send_rft_id(void *ns_cbarg,
3037 struct bfa_fcxp_s *fcxp_alloced);
3038static void bfa_fcs_lport_ns_send_rff_id(void *ns_cbarg,
3039 struct bfa_fcxp_s *fcxp_alloced);
3040static void bfa_fcs_lport_ns_send_gid_ft(void *ns_cbarg,
3041 struct bfa_fcxp_s *fcxp_alloced);
3042static void bfa_fcs_lport_ns_timeout(void *arg);
3043static void bfa_fcs_lport_ns_plogi_response(void *fcsarg,
3044 struct bfa_fcxp_s *fcxp,
3045 void *cbarg,
3046 bfa_status_t req_status,
3047 u32 rsp_len,
3048 u32 resid_len,
3049 struct fchs_s *rsp_fchs);
3050static void bfa_fcs_lport_ns_rspn_id_response(void *fcsarg,
3051 struct bfa_fcxp_s *fcxp,
3052 void *cbarg,
3053 bfa_status_t req_status,
3054 u32 rsp_len,
3055 u32 resid_len,
3056 struct fchs_s *rsp_fchs);
3057static void bfa_fcs_lport_ns_rft_id_response(void *fcsarg,
3058 struct bfa_fcxp_s *fcxp,
3059 void *cbarg,
3060 bfa_status_t req_status,
3061 u32 rsp_len,
3062 u32 resid_len,
3063 struct fchs_s *rsp_fchs);
3064static void bfa_fcs_lport_ns_rff_id_response(void *fcsarg,
3065 struct bfa_fcxp_s *fcxp,
3066 void *cbarg,
3067 bfa_status_t req_status,
3068 u32 rsp_len,
3069 u32 resid_len,
3070 struct fchs_s *rsp_fchs);
3071static void bfa_fcs_lport_ns_gid_ft_response(void *fcsarg,
3072 struct bfa_fcxp_s *fcxp,
3073 void *cbarg,
3074 bfa_status_t req_status,
3075 u32 rsp_len,
3076 u32 resid_len,
3077 struct fchs_s *rsp_fchs);
3078static void bfa_fcs_lport_ns_process_gidft_pids(
3079 struct bfa_fcs_lport_s *port,
3080 u32 *pid_buf, u32 n_pids);
3081
3082static void bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port);
3083/**
3084 * fcs_ns_sm FCS nameserver interface state machine
3085 */
3086
3087/**
3088 * VPort NS State Machine events
3089 */
3090enum vport_ns_event {
3091 NSSM_EVENT_PORT_ONLINE = 1,
3092 NSSM_EVENT_PORT_OFFLINE = 2,
3093 NSSM_EVENT_PLOGI_SENT = 3,
3094 NSSM_EVENT_RSP_OK = 4,
3095 NSSM_EVENT_RSP_ERROR = 5,
3096 NSSM_EVENT_TIMEOUT = 6,
3097 NSSM_EVENT_NS_QUERY = 7,
3098 NSSM_EVENT_RSPNID_SENT = 8,
3099 NSSM_EVENT_RFTID_SENT = 9,
3100 NSSM_EVENT_RFFID_SENT = 10,
3101 NSSM_EVENT_GIDFT_SENT = 11,
3102};
3103
3104static void bfa_fcs_lport_ns_sm_offline(struct bfa_fcs_lport_ns_s *ns,
3105 enum vport_ns_event event);
3106static void bfa_fcs_lport_ns_sm_plogi_sending(struct bfa_fcs_lport_ns_s *ns,
3107 enum vport_ns_event event);
3108static void bfa_fcs_lport_ns_sm_plogi(struct bfa_fcs_lport_ns_s *ns,
3109 enum vport_ns_event event);
3110static void bfa_fcs_lport_ns_sm_plogi_retry(struct bfa_fcs_lport_ns_s *ns,
3111 enum vport_ns_event event);
3112static void bfa_fcs_lport_ns_sm_sending_rspn_id(
3113 struct bfa_fcs_lport_ns_s *ns,
3114 enum vport_ns_event event);
3115static void bfa_fcs_lport_ns_sm_rspn_id(struct bfa_fcs_lport_ns_s *ns,
3116 enum vport_ns_event event);
3117static void bfa_fcs_lport_ns_sm_rspn_id_retry(struct bfa_fcs_lport_ns_s *ns,
3118 enum vport_ns_event event);
3119static void bfa_fcs_lport_ns_sm_sending_rft_id(
3120 struct bfa_fcs_lport_ns_s *ns,
3121 enum vport_ns_event event);
3122static void bfa_fcs_lport_ns_sm_rft_id_retry(struct bfa_fcs_lport_ns_s *ns,
3123 enum vport_ns_event event);
3124static void bfa_fcs_lport_ns_sm_rft_id(struct bfa_fcs_lport_ns_s *ns,
3125 enum vport_ns_event event);
3126static void bfa_fcs_lport_ns_sm_sending_rff_id(
3127 struct bfa_fcs_lport_ns_s *ns,
3128 enum vport_ns_event event);
3129static void bfa_fcs_lport_ns_sm_rff_id_retry(struct bfa_fcs_lport_ns_s *ns,
3130 enum vport_ns_event event);
3131static void bfa_fcs_lport_ns_sm_rff_id(struct bfa_fcs_lport_ns_s *ns,
3132 enum vport_ns_event event);
3133static void bfa_fcs_lport_ns_sm_sending_gid_ft(
3134 struct bfa_fcs_lport_ns_s *ns,
3135 enum vport_ns_event event);
3136static void bfa_fcs_lport_ns_sm_gid_ft(struct bfa_fcs_lport_ns_s *ns,
3137 enum vport_ns_event event);
3138static void bfa_fcs_lport_ns_sm_gid_ft_retry(struct bfa_fcs_lport_ns_s *ns,
3139 enum vport_ns_event event);
3140static void bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
3141 enum vport_ns_event event);
3142/**
3143 * Start in offline state - awaiting linkup
3144 */
3145static void
3146bfa_fcs_lport_ns_sm_offline(struct bfa_fcs_lport_ns_s *ns,
3147 enum vport_ns_event event)
3148{
3149 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3150 bfa_trc(ns->port->fcs, event);
3151
3152 switch (event) {
3153 case NSSM_EVENT_PORT_ONLINE:
3154 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi_sending);
3155 bfa_fcs_lport_ns_send_plogi(ns, NULL);
3156 break;
3157
3158 case NSSM_EVENT_PORT_OFFLINE:
3159 break;
3160
3161 default:
3162 bfa_sm_fault(ns->port->fcs, event);
3163 }
3164}
3165
3166static void
3167bfa_fcs_lport_ns_sm_plogi_sending(struct bfa_fcs_lport_ns_s *ns,
3168 enum vport_ns_event event)
3169{
3170 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3171 bfa_trc(ns->port->fcs, event);
3172
3173 switch (event) {
3174 case NSSM_EVENT_PLOGI_SENT:
3175 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi);
3176 break;
3177
3178 case NSSM_EVENT_PORT_OFFLINE:
3179 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3180 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
3181 &ns->fcxp_wqe);
3182 break;
3183
3184 default:
3185 bfa_sm_fault(ns->port->fcs, event);
3186 }
3187}
3188
3189static void
3190bfa_fcs_lport_ns_sm_plogi(struct bfa_fcs_lport_ns_s *ns,
3191 enum vport_ns_event event)
3192{
3193 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3194 bfa_trc(ns->port->fcs, event);
3195
3196 switch (event) {
3197 case NSSM_EVENT_RSP_ERROR:
3198 /*
3199 * Start timer for a delayed retry
3200 */
3201 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi_retry);
3202 ns->port->stats.ns_retries++;
3203 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
3204 &ns->timer, bfa_fcs_lport_ns_timeout, ns,
3205 BFA_FCS_RETRY_TIMEOUT);
3206 break;
3207
3208 case NSSM_EVENT_RSP_OK:
3209 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rspn_id);
3210 bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
3211 break;
3212
3213 case NSSM_EVENT_PORT_OFFLINE:
3214 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3215 bfa_fcxp_discard(ns->fcxp);
3216 break;
3217
3218 default:
3219 bfa_sm_fault(ns->port->fcs, event);
3220 }
3221}
3222
3223static void
3224bfa_fcs_lport_ns_sm_plogi_retry(struct bfa_fcs_lport_ns_s *ns,
3225 enum vport_ns_event event)
3226{
3227 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3228 bfa_trc(ns->port->fcs, event);
3229
3230 switch (event) {
3231 case NSSM_EVENT_TIMEOUT:
3232 /*
3233 * Retry Timer Expired. Re-send
3234 */
3235 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi_sending);
3236 bfa_fcs_lport_ns_send_plogi(ns, NULL);
3237 break;
3238
3239 case NSSM_EVENT_PORT_OFFLINE:
3240 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3241 bfa_timer_stop(&ns->timer);
3242 break;
3243
3244 default:
3245 bfa_sm_fault(ns->port->fcs, event);
3246 }
3247}
3248
3249static void
3250bfa_fcs_lport_ns_sm_sending_rspn_id(struct bfa_fcs_lport_ns_s *ns,
3251 enum vport_ns_event event)
3252{
3253 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3254 bfa_trc(ns->port->fcs, event);
3255
3256 switch (event) {
3257 case NSSM_EVENT_RSPNID_SENT:
3258 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rspn_id);
3259 break;
3260
3261 case NSSM_EVENT_PORT_OFFLINE:
3262 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3263 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
3264 &ns->fcxp_wqe);
3265 break;
3266
3267 default:
3268 bfa_sm_fault(ns->port->fcs, event);
3269 }
3270}
3271
3272static void
3273bfa_fcs_lport_ns_sm_rspn_id(struct bfa_fcs_lport_ns_s *ns,
3274 enum vport_ns_event event)
3275{
3276 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3277 bfa_trc(ns->port->fcs, event);
3278
3279 switch (event) {
3280 case NSSM_EVENT_RSP_ERROR:
3281 /*
3282 * Start timer for a delayed retry
3283 */
3284 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rspn_id_retry);
3285 ns->port->stats.ns_retries++;
3286 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
3287 &ns->timer, bfa_fcs_lport_ns_timeout, ns,
3288 BFA_FCS_RETRY_TIMEOUT);
3289 break;
3290
3291 case NSSM_EVENT_RSP_OK:
3292 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rft_id);
3293 bfa_fcs_lport_ns_send_rft_id(ns, NULL);
3294 break;
3295
3296 case NSSM_EVENT_PORT_OFFLINE:
3297 bfa_fcxp_discard(ns->fcxp);
3298 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3299 break;
3300
3301 default:
3302 bfa_sm_fault(ns->port->fcs, event);
3303 }
3304}
3305
3306static void
3307bfa_fcs_lport_ns_sm_rspn_id_retry(struct bfa_fcs_lport_ns_s *ns,
3308 enum vport_ns_event event)
3309{
3310 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3311 bfa_trc(ns->port->fcs, event);
3312
3313 switch (event) {
3314 case NSSM_EVENT_TIMEOUT:
3315 /*
3316 * Retry Timer Expired. Re-send
3317 */
3318 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rspn_id);
3319 bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
3320 break;
3321
3322 case NSSM_EVENT_PORT_OFFLINE:
3323 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3324 bfa_timer_stop(&ns->timer);
3325 break;
3326
3327 default:
3328 bfa_sm_fault(ns->port->fcs, event);
3329 }
3330}
3331
3332static void
3333bfa_fcs_lport_ns_sm_sending_rft_id(struct bfa_fcs_lport_ns_s *ns,
3334 enum vport_ns_event event)
3335{
3336 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3337 bfa_trc(ns->port->fcs, event);
3338
3339 switch (event) {
3340 case NSSM_EVENT_RFTID_SENT:
3341 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rft_id);
3342 break;
3343
3344 case NSSM_EVENT_PORT_OFFLINE:
3345 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3346 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
3347 &ns->fcxp_wqe);
3348 break;
3349
3350 default:
3351 bfa_sm_fault(ns->port->fcs, event);
3352 }
3353}
3354
3355static void
3356bfa_fcs_lport_ns_sm_rft_id(struct bfa_fcs_lport_ns_s *ns,
3357 enum vport_ns_event event)
3358{
3359 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3360 bfa_trc(ns->port->fcs, event);
3361
3362 switch (event) {
3363 case NSSM_EVENT_RSP_OK:
3364 /* Now move to register FC4 Features */
3365 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rff_id);
3366 bfa_fcs_lport_ns_send_rff_id(ns, NULL);
3367 break;
3368
3369 case NSSM_EVENT_RSP_ERROR:
3370 /*
3371 * Start timer for a delayed retry
3372 */
3373 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rft_id_retry);
3374 ns->port->stats.ns_retries++;
3375 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
3376 &ns->timer, bfa_fcs_lport_ns_timeout, ns,
3377 BFA_FCS_RETRY_TIMEOUT);
3378 break;
3379
3380 case NSSM_EVENT_PORT_OFFLINE:
3381 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3382 bfa_fcxp_discard(ns->fcxp);
3383 break;
3384
3385 default:
3386 bfa_sm_fault(ns->port->fcs, event);
3387 }
3388}
3389
3390static void
3391bfa_fcs_lport_ns_sm_rft_id_retry(struct bfa_fcs_lport_ns_s *ns,
3392 enum vport_ns_event event)
3393{
3394 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3395 bfa_trc(ns->port->fcs, event);
3396
3397 switch (event) {
3398 case NSSM_EVENT_TIMEOUT:
3399 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rft_id);
3400 bfa_fcs_lport_ns_send_rft_id(ns, NULL);
3401 break;
3402
3403 case NSSM_EVENT_PORT_OFFLINE:
3404 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3405 bfa_timer_stop(&ns->timer);
3406 break;
3407
3408 default:
3409 bfa_sm_fault(ns->port->fcs, event);
3410 }
3411}
3412
3413static void
3414bfa_fcs_lport_ns_sm_sending_rff_id(struct bfa_fcs_lport_ns_s *ns,
3415 enum vport_ns_event event)
3416{
3417 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3418 bfa_trc(ns->port->fcs, event);
3419
3420 switch (event) {
3421 case NSSM_EVENT_RFFID_SENT:
3422 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rff_id);
3423 break;
3424
3425 case NSSM_EVENT_PORT_OFFLINE:
3426 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3427 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
3428 &ns->fcxp_wqe);
3429 break;
3430
3431 default:
3432 bfa_sm_fault(ns->port->fcs, event);
3433 }
3434}
3435
3436static void
3437bfa_fcs_lport_ns_sm_rff_id(struct bfa_fcs_lport_ns_s *ns,
3438 enum vport_ns_event event)
3439{
3440 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3441 bfa_trc(ns->port->fcs, event);
3442
3443 switch (event) {
3444 case NSSM_EVENT_RSP_OK:
3445
3446 /*
3447 * If min cfg mode is enabled, we donot initiate rport
3448 * discovery with the fabric. Instead, we will retrieve the
3449 * boot targets from HAL/FW.
3450 */
3451 if (__fcs_min_cfg(ns->port->fcs)) {
3452 bfa_fcs_lport_ns_boot_target_disc(ns->port);
3453 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_online);
3454 return;
3455 }
3456
3457 /*
3458 * If the port role is Initiator Mode issue NS query.
3459 * If it is Target Mode, skip this and go to online.
3460 */
3461 if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) {
3462 bfa_sm_set_state(ns,
3463 bfa_fcs_lport_ns_sm_sending_gid_ft);
3464 bfa_fcs_lport_ns_send_gid_ft(ns, NULL);
3465 }
3466 /*
3467 * kick off mgmt srvr state machine
3468 */
3469 bfa_fcs_lport_ms_online(ns->port);
3470 break;
3471
3472 case NSSM_EVENT_RSP_ERROR:
3473 /*
3474 * Start timer for a delayed retry
3475 */
3476 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rff_id_retry);
3477 ns->port->stats.ns_retries++;
3478 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
3479 &ns->timer, bfa_fcs_lport_ns_timeout, ns,
3480 BFA_FCS_RETRY_TIMEOUT);
3481 break;
3482
3483 case NSSM_EVENT_PORT_OFFLINE:
3484 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3485 bfa_fcxp_discard(ns->fcxp);
3486 break;
3487
3488 default:
3489 bfa_sm_fault(ns->port->fcs, event);
3490 }
3491}
3492
3493static void
3494bfa_fcs_lport_ns_sm_rff_id_retry(struct bfa_fcs_lport_ns_s *ns,
3495 enum vport_ns_event event)
3496{
3497 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3498 bfa_trc(ns->port->fcs, event);
3499
3500 switch (event) {
3501 case NSSM_EVENT_TIMEOUT:
3502 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rff_id);
3503 bfa_fcs_lport_ns_send_rff_id(ns, NULL);
3504 break;
3505
3506 case NSSM_EVENT_PORT_OFFLINE:
3507 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3508 bfa_timer_stop(&ns->timer);
3509 break;
3510
3511 default:
3512 bfa_sm_fault(ns->port->fcs, event);
3513 }
3514}
3515static void
3516bfa_fcs_lport_ns_sm_sending_gid_ft(struct bfa_fcs_lport_ns_s *ns,
3517 enum vport_ns_event event)
3518{
3519 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3520 bfa_trc(ns->port->fcs, event);
3521
3522 switch (event) {
3523 case NSSM_EVENT_GIDFT_SENT:
3524 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_gid_ft);
3525 break;
3526
3527 case NSSM_EVENT_PORT_OFFLINE:
3528 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3529 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
3530 &ns->fcxp_wqe);
3531 break;
3532
3533 default:
3534 bfa_sm_fault(ns->port->fcs, event);
3535 }
3536}
3537
3538static void
3539bfa_fcs_lport_ns_sm_gid_ft(struct bfa_fcs_lport_ns_s *ns,
3540 enum vport_ns_event event)
3541{
3542 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3543 bfa_trc(ns->port->fcs, event);
3544
3545 switch (event) {
3546 case NSSM_EVENT_RSP_OK:
3547 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_online);
3548 break;
3549
3550 case NSSM_EVENT_RSP_ERROR:
3551 /*
3552 * TBD: for certain reject codes, we don't need to retry
3553 */
3554 /*
3555 * Start timer for a delayed retry
3556 */
3557 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_gid_ft_retry);
3558 ns->port->stats.ns_retries++;
3559 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
3560 &ns->timer, bfa_fcs_lport_ns_timeout, ns,
3561 BFA_FCS_RETRY_TIMEOUT);
3562 break;
3563
3564 case NSSM_EVENT_PORT_OFFLINE:
3565 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3566 bfa_fcxp_discard(ns->fcxp);
3567 break;
3568
3569 case NSSM_EVENT_NS_QUERY:
3570 break;
3571
3572 default:
3573 bfa_sm_fault(ns->port->fcs, event);
3574 }
3575}
3576
3577static void
3578bfa_fcs_lport_ns_sm_gid_ft_retry(struct bfa_fcs_lport_ns_s *ns,
3579 enum vport_ns_event event)
3580{
3581 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3582 bfa_trc(ns->port->fcs, event);
3583
3584 switch (event) {
3585 case NSSM_EVENT_TIMEOUT:
3586 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_gid_ft);
3587 bfa_fcs_lport_ns_send_gid_ft(ns, NULL);
3588 break;
3589
3590 case NSSM_EVENT_PORT_OFFLINE:
3591 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3592 bfa_timer_stop(&ns->timer);
3593 break;
3594
3595 default:
3596 bfa_sm_fault(ns->port->fcs, event);
3597 }
3598}
3599
3600static void
3601bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
3602 enum vport_ns_event event)
3603{
3604 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3605 bfa_trc(ns->port->fcs, event);
3606
3607 switch (event) {
3608 case NSSM_EVENT_PORT_OFFLINE:
3609 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3610 break;
3611
3612 case NSSM_EVENT_NS_QUERY:
3613 /*
3614 * If the port role is Initiator Mode issue NS query.
3615 * If it is Target Mode, skip this and go to online.
3616 */
3617 if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) {
3618 bfa_sm_set_state(ns,
3619 bfa_fcs_lport_ns_sm_sending_gid_ft);
3620 bfa_fcs_lport_ns_send_gid_ft(ns, NULL);
3621 };
3622 break;
3623
3624 default:
3625 bfa_sm_fault(ns->port->fcs, event);
3626 }
3627}
3628
3629
3630
3631/**
3632 * ns_pvt Nameserver local functions
3633 */
3634
3635static void
3636bfa_fcs_lport_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
3637{
3638 struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
3639 struct bfa_fcs_lport_s *port = ns->port;
3640 struct fchs_s fchs;
3641 int len;
3642 struct bfa_fcxp_s *fcxp;
3643
3644 bfa_trc(port->fcs, port->pid);
3645
3646fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
3647 if (!fcxp) {
3648 port->stats.ns_plogi_alloc_wait++;
3649 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
3650 bfa_fcs_lport_ns_send_plogi, ns);
3651 return;
3652 }
3653 ns->fcxp = fcxp;
3654
3655 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
3656 bfa_os_hton3b(FC_NAME_SERVER),
3657 bfa_fcs_lport_get_fcid(port), 0,
3658 port->port_cfg.pwwn, port->port_cfg.nwwn,
3659 bfa_fcport_get_maxfrsize(port->fcs->bfa));
3660
3661 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
3662 FC_CLASS_3, len, &fchs,
3663 bfa_fcs_lport_ns_plogi_response, (void *)ns,
3664 FC_MAX_PDUSZ, FC_ELS_TOV);
3665 port->stats.ns_plogi_sent++;
3666
3667 bfa_sm_send_event(ns, NSSM_EVENT_PLOGI_SENT);
3668}
3669
3670static void
3671bfa_fcs_lport_ns_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
3672 void *cbarg, bfa_status_t req_status, u32 rsp_len,
3673 u32 resid_len, struct fchs_s *rsp_fchs)
3674{
3675 struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
3676 struct bfa_fcs_lport_s *port = ns->port;
3677 /* struct fc_logi_s *plogi_resp; */
3678 struct fc_els_cmd_s *els_cmd;
3679 struct fc_ls_rjt_s *ls_rjt;
3680
3681 bfa_trc(port->fcs, req_status);
3682 bfa_trc(port->fcs, port->port_cfg.pwwn);
3683
3684 /*
3685 * Sanity Checks
3686 */
3687 if (req_status != BFA_STATUS_OK) {
3688 bfa_trc(port->fcs, req_status);
3689 port->stats.ns_plogi_rsp_err++;
3690 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
3691 return;
3692 }
3693
3694 els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
3695
3696 switch (els_cmd->els_code) {
3697
3698 case FC_ELS_ACC:
3699 if (rsp_len < sizeof(struct fc_logi_s)) {
3700 bfa_trc(port->fcs, rsp_len);
3701 port->stats.ns_plogi_acc_err++;
3702 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
3703 break;
3704 }
3705 port->stats.ns_plogi_accepts++;
3706 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
3707 break;
3708
3709 case FC_ELS_LS_RJT:
3710 ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
3711
3712 bfa_trc(port->fcs, ls_rjt->reason_code);
3713 bfa_trc(port->fcs, ls_rjt->reason_code_expl);
3714
3715 port->stats.ns_rejects++;
3716
3717 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
3718 break;
3719
3720 default:
3721 port->stats.ns_plogi_unknown_rsp++;
3722 bfa_trc(port->fcs, els_cmd->els_code);
3723 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
3724 }
3725}
3726
3727/**
3728 * Register the symbolic port name.
3729 */
3730static void
3731bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
3732{
3733 struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
3734 struct bfa_fcs_lport_s *port = ns->port;
3735 struct fchs_s fchs;
3736 int len;
3737 struct bfa_fcxp_s *fcxp;
3738 u8 symbl[256];
3739 u8 *psymbl = &symbl[0];
3740
3741 bfa_os_memset(symbl, 0, sizeof(symbl));
3742
3743 bfa_trc(port->fcs, port->port_cfg.pwwn);
3744
3745 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
3746 if (!fcxp) {
3747 port->stats.ns_rspnid_alloc_wait++;
3748 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
3749 bfa_fcs_lport_ns_send_rspn_id, ns);
3750 return;
3751 }
3752 ns->fcxp = fcxp;
3753
3754 /*
3755 * for V-Port, form a Port Symbolic Name
3756 */
3757 if (port->vport) {
3758 /**
3759 * For Vports, we append the vport's port symbolic name
3760 * to that of the base port.
3761 */
3762
3763 strncpy((char *)psymbl,
3764 (char *) &
3765 (bfa_fcs_lport_get_psym_name
3766 (bfa_fcs_get_base_port(port->fcs))),
3767 strlen((char *) &
3768 bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port
3769 (port->fcs))));
3770
3771 /* Ensure we have a null terminating string. */
3772 ((char *)psymbl)[strlen((char *) &
3773 bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port
3774 (port->fcs)))] = 0;
3775 strncat((char *)psymbl,
3776 (char *) &(bfa_fcs_lport_get_psym_name(port)),
3777 strlen((char *) &bfa_fcs_lport_get_psym_name(port)));
3778 } else {
3779 psymbl = (u8 *) &(bfa_fcs_lport_get_psym_name(port));
3780 }
3781
3782 len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
3783 bfa_fcs_lport_get_fcid(port), 0, psymbl);
3784
3785 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
3786 FC_CLASS_3, len, &fchs,
3787 bfa_fcs_lport_ns_rspn_id_response, (void *)ns,
3788 FC_MAX_PDUSZ, FC_FCCT_TOV);
3789
3790 port->stats.ns_rspnid_sent++;
3791
3792 bfa_sm_send_event(ns, NSSM_EVENT_RSPNID_SENT);
3793}
3794
3795static void
3796bfa_fcs_lport_ns_rspn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
3797 void *cbarg, bfa_status_t req_status,
3798 u32 rsp_len, u32 resid_len,
3799 struct fchs_s *rsp_fchs)
3800{
3801 struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
3802 struct bfa_fcs_lport_s *port = ns->port;
3803 struct ct_hdr_s *cthdr = NULL;
3804
3805 bfa_trc(port->fcs, port->port_cfg.pwwn);
3806
3807 /*
3808 * Sanity Checks
3809 */
3810 if (req_status != BFA_STATUS_OK) {
3811 bfa_trc(port->fcs, req_status);
3812 port->stats.ns_rspnid_rsp_err++;
3813 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
3814 return;
3815 }
3816
3817 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
3818 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
3819
3820 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
3821 port->stats.ns_rspnid_accepts++;
3822 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
3823 return;
3824 }
3825
3826 port->stats.ns_rspnid_rejects++;
3827 bfa_trc(port->fcs, cthdr->reason_code);
3828 bfa_trc(port->fcs, cthdr->exp_code);
3829 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
3830}
3831
3832/**
3833 * Register FC4-Types
3834 */
3835static void
3836bfa_fcs_lport_ns_send_rft_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
3837{
3838 struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
3839 struct bfa_fcs_lport_s *port = ns->port;
3840 struct fchs_s fchs;
3841 int len;
3842 struct bfa_fcxp_s *fcxp;
3843
3844 bfa_trc(port->fcs, port->port_cfg.pwwn);
3845
3846 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
3847 if (!fcxp) {
3848 port->stats.ns_rftid_alloc_wait++;
3849 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
3850 bfa_fcs_lport_ns_send_rft_id, ns);
3851 return;
3852 }
3853 ns->fcxp = fcxp;
3854
3855 len = fc_rftid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
3856 bfa_fcs_lport_get_fcid(port), 0, port->port_cfg.roles);
3857
3858 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
3859 FC_CLASS_3, len, &fchs,
3860 bfa_fcs_lport_ns_rft_id_response, (void *)ns,
3861 FC_MAX_PDUSZ, FC_FCCT_TOV);
3862
3863 port->stats.ns_rftid_sent++;
3864 bfa_sm_send_event(ns, NSSM_EVENT_RFTID_SENT);
3865}
3866
3867static void
3868bfa_fcs_lport_ns_rft_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
3869 void *cbarg, bfa_status_t req_status,
3870 u32 rsp_len, u32 resid_len,
3871 struct fchs_s *rsp_fchs)
3872{
3873 struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
3874 struct bfa_fcs_lport_s *port = ns->port;
3875 struct ct_hdr_s *cthdr = NULL;
3876
3877 bfa_trc(port->fcs, port->port_cfg.pwwn);
3878
3879 /*
3880 * Sanity Checks
3881 */
3882 if (req_status != BFA_STATUS_OK) {
3883 bfa_trc(port->fcs, req_status);
3884 port->stats.ns_rftid_rsp_err++;
3885 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
3886 return;
3887 }
3888
3889 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
3890 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
3891
3892 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
3893 port->stats.ns_rftid_accepts++;
3894 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
3895 return;
3896 }
3897
3898 port->stats.ns_rftid_rejects++;
3899 bfa_trc(port->fcs, cthdr->reason_code);
3900 bfa_trc(port->fcs, cthdr->exp_code);
3901 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
3902}
3903
3904/**
3905 * Register FC4-Features : Should be done after RFT_ID
3906 */
3907static void
3908bfa_fcs_lport_ns_send_rff_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
3909{
3910 struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
3911 struct bfa_fcs_lport_s *port = ns->port;
3912 struct fchs_s fchs;
3913 int len;
3914 struct bfa_fcxp_s *fcxp;
3915 u8 fc4_ftrs = 0;
3916
3917 bfa_trc(port->fcs, port->port_cfg.pwwn);
3918
3919 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
3920 if (!fcxp) {
3921 port->stats.ns_rffid_alloc_wait++;
3922 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
3923 bfa_fcs_lport_ns_send_rff_id, ns);
3924 return;
3925 }
3926 ns->fcxp = fcxp;
3927
3928 if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port))
3929 fc4_ftrs = FC_GS_FCP_FC4_FEATURE_INITIATOR;
3930
3931 len = fc_rffid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
3932 bfa_fcs_lport_get_fcid(port), 0,
3933 FC_TYPE_FCP, fc4_ftrs);
3934
3935 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
3936 FC_CLASS_3, len, &fchs,
3937 bfa_fcs_lport_ns_rff_id_response, (void *)ns,
3938 FC_MAX_PDUSZ, FC_FCCT_TOV);
3939
3940 port->stats.ns_rffid_sent++;
3941 bfa_sm_send_event(ns, NSSM_EVENT_RFFID_SENT);
3942}
3943
3944static void
3945bfa_fcs_lport_ns_rff_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
3946 void *cbarg, bfa_status_t req_status,
3947 u32 rsp_len, u32 resid_len,
3948 struct fchs_s *rsp_fchs)
3949{
3950 struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
3951 struct bfa_fcs_lport_s *port = ns->port;
3952 struct ct_hdr_s *cthdr = NULL;
3953
3954 bfa_trc(port->fcs, port->port_cfg.pwwn);
3955
3956 /*
3957 * Sanity Checks
3958 */
3959 if (req_status != BFA_STATUS_OK) {
3960 bfa_trc(port->fcs, req_status);
3961 port->stats.ns_rffid_rsp_err++;
3962 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
3963 return;
3964 }
3965
3966 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
3967 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
3968
3969 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
3970 port->stats.ns_rffid_accepts++;
3971 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
3972 return;
3973 }
3974
3975 port->stats.ns_rffid_rejects++;
3976 bfa_trc(port->fcs, cthdr->reason_code);
3977 bfa_trc(port->fcs, cthdr->exp_code);
3978
3979 if (cthdr->reason_code == CT_RSN_NOT_SUPP) {
3980 /* if this command is not supported, we don't retry */
3981 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
3982 } else
3983 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
3984}
3985/**
3986 * Query Fabric for FC4-Types Devices.
3987 *
3988* TBD : Need to use a local (FCS private) response buffer, since the response
3989 * can be larger than 2K.
3990 */
3991static void
3992bfa_fcs_lport_ns_send_gid_ft(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
3993{
3994 struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
3995 struct bfa_fcs_lport_s *port = ns->port;
3996 struct fchs_s fchs;
3997 int len;
3998 struct bfa_fcxp_s *fcxp;
3999
4000 bfa_trc(port->fcs, port->pid);
4001
4002 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
4003 if (!fcxp) {
4004 port->stats.ns_gidft_alloc_wait++;
4005 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
4006 bfa_fcs_lport_ns_send_gid_ft, ns);
4007 return;
4008 }
4009 ns->fcxp = fcxp;
4010
4011 /*
4012 * This query is only initiated for FCP initiator mode.
4013 */
4014 len = fc_gid_ft_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
4015 ns->port->pid, FC_TYPE_FCP);
4016
4017 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
4018 FC_CLASS_3, len, &fchs,
4019 bfa_fcs_lport_ns_gid_ft_response, (void *)ns,
4020 bfa_fcxp_get_maxrsp(port->fcs->bfa), FC_FCCT_TOV);
4021
4022 port->stats.ns_gidft_sent++;
4023
4024 bfa_sm_send_event(ns, NSSM_EVENT_GIDFT_SENT);
4025}
4026
4027static void
4028bfa_fcs_lport_ns_gid_ft_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
4029 void *cbarg, bfa_status_t req_status,
4030 u32 rsp_len, u32 resid_len,
4031 struct fchs_s *rsp_fchs)
4032{
4033 struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
4034 struct bfa_fcs_lport_s *port = ns->port;
4035 struct ct_hdr_s *cthdr = NULL;
4036 u32 n_pids;
4037
4038 bfa_trc(port->fcs, port->port_cfg.pwwn);
4039
4040 /*
4041 * Sanity Checks
4042 */
4043 if (req_status != BFA_STATUS_OK) {
4044 bfa_trc(port->fcs, req_status);
4045 port->stats.ns_gidft_rsp_err++;
4046 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
4047 return;
4048 }
4049
4050 if (resid_len != 0) {
4051 /*
4052 * TBD : we will need to allocate a larger buffer & retry the
4053 * command
4054 */
4055 bfa_trc(port->fcs, rsp_len);
4056 bfa_trc(port->fcs, resid_len);
4057 return;
4058 }
4059
4060 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
4061 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
4062
4063 switch (cthdr->cmd_rsp_code) {
4064
4065 case CT_RSP_ACCEPT:
4066
4067 port->stats.ns_gidft_accepts++;
4068 n_pids = (fc_get_ctresp_pyld_len(rsp_len) / sizeof(u32));
4069 bfa_trc(port->fcs, n_pids);
4070 bfa_fcs_lport_ns_process_gidft_pids(port,
4071 (u32 *) (cthdr + 1),
4072 n_pids);
4073 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
4074 break;
4075
4076 case CT_RSP_REJECT:
4077
4078 /*
4079 * Check the reason code & explanation.
4080 * There may not have been any FC4 devices in the fabric
4081 */
4082 port->stats.ns_gidft_rejects++;
4083 bfa_trc(port->fcs, cthdr->reason_code);
4084 bfa_trc(port->fcs, cthdr->exp_code);
4085
4086 if ((cthdr->reason_code == CT_RSN_UNABLE_TO_PERF)
4087 && (cthdr->exp_code == CT_NS_EXP_FT_NOT_REG)) {
4088
4089 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
4090 } else {
4091 /*
4092 * for all other errors, retry
4093 */
4094 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
4095 }
4096 break;
4097
4098 default:
4099 port->stats.ns_gidft_unknown_rsp++;
4100 bfa_trc(port->fcs, cthdr->cmd_rsp_code);
4101 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
4102 }
4103}
4104
4105/**
4106 * This routine will be called by bfa_timer on timer timeouts.
4107 *
4108 * param[in] port - pointer to bfa_fcs_lport_t.
4109 *
4110 * return
4111 * void
4112 *
4113 * Special Considerations:
4114 *
4115 * note
4116 */
4117static void
4118bfa_fcs_lport_ns_timeout(void *arg)
4119{
4120 struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) arg;
4121
4122 ns->port->stats.ns_timeouts++;
4123 bfa_sm_send_event(ns, NSSM_EVENT_TIMEOUT);
4124}
4125
4126/*
4127 * Process the PID list in GID_FT response
4128 */
4129static void
4130bfa_fcs_lport_ns_process_gidft_pids(struct bfa_fcs_lport_s *port, u32 *pid_buf,
4131 u32 n_pids)
4132{
4133 struct fcgs_gidft_resp_s *gidft_entry;
4134 struct bfa_fcs_rport_s *rport;
4135 u32 ii;
4136
4137 for (ii = 0; ii < n_pids; ii++) {
4138 gidft_entry = (struct fcgs_gidft_resp_s *) &pid_buf[ii];
4139
4140 if (gidft_entry->pid == port->pid)
4141 continue;
4142
4143 /*
4144 * Check if this rport already exists
4145 */
4146 rport = bfa_fcs_lport_get_rport_by_pid(port, gidft_entry->pid);
4147 if (rport == NULL) {
4148 /*
4149 * this is a new device. create rport
4150 */
4151 rport = bfa_fcs_rport_create(port, gidft_entry->pid);
4152 } else {
4153 /*
4154 * this rport already exists
4155 */
4156 bfa_fcs_rport_scn(rport);
4157 }
4158
4159 bfa_trc(port->fcs, gidft_entry->pid);
4160
4161 /*
4162 * if the last entry bit is set, bail out.
4163 */
4164 if (gidft_entry->last)
4165 return;
4166 }
4167}
4168
4169/**
4170 * fcs_ns_public FCS nameserver public interfaces
4171 */
4172
4173/*
4174 * Functions called by port/fab.
4175 * These will send relevant Events to the ns state machine.
4176 */
4177void
4178bfa_fcs_lport_ns_init(struct bfa_fcs_lport_s *port)
4179{
4180 struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
4181
4182 ns->port = port;
4183 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
4184}
4185
4186void
4187bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *port)
4188{
4189 struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
4190
4191 ns->port = port;
4192 bfa_sm_send_event(ns, NSSM_EVENT_PORT_OFFLINE);
4193}
4194
4195void
4196bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *port)
4197{
4198 struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
4199
4200 ns->port = port;
4201 bfa_sm_send_event(ns, NSSM_EVENT_PORT_ONLINE);
4202}
4203
4204void
4205bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port)
4206{
4207 struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
4208
4209 bfa_trc(port->fcs, port->pid);
4210 bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY);
4211}
4212
4213void
4214bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port)
4215{
4216
4217 struct bfa_fcs_rport_s *rport;
4218 u8 nwwns;
4219 wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX];
4220 int ii;
4221
4222 bfa_iocfc_get_bootwwns(port->fcs->bfa, &nwwns, wwns);
4223
4224 for (ii = 0 ; ii < nwwns; ++ii) {
4225 rport = bfa_fcs_rport_create_by_wwn(port, wwns[ii]);
4226 bfa_assert(rport);
4227 }
4228}
4229
4230/**
4231 * FCS SCN
4232 */
4233
4234#define FC_QOS_RSCN_EVENT 0x0c
4235#define FC_FABRIC_NAME_RSCN_EVENT 0x0d
4236
4237/*
4238 * forward declarations
4239 */
4240static void bfa_fcs_lport_scn_send_scr(void *scn_cbarg,
4241 struct bfa_fcxp_s *fcxp_alloced);
4242static void bfa_fcs_lport_scn_scr_response(void *fcsarg,
4243 struct bfa_fcxp_s *fcxp,
4244 void *cbarg,
4245 bfa_status_t req_status,
4246 u32 rsp_len,
4247 u32 resid_len,
4248 struct fchs_s *rsp_fchs);
4249static void bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port,
4250 struct fchs_s *rx_fchs);
4251static void bfa_fcs_lport_scn_timeout(void *arg);
4252
4253/**
4254 * fcs_scm_sm FCS SCN state machine
4255 */
4256
4257/**
4258 * VPort SCN State Machine events
4259 */
4260enum port_scn_event {
4261 SCNSM_EVENT_PORT_ONLINE = 1,
4262 SCNSM_EVENT_PORT_OFFLINE = 2,
4263 SCNSM_EVENT_RSP_OK = 3,
4264 SCNSM_EVENT_RSP_ERROR = 4,
4265 SCNSM_EVENT_TIMEOUT = 5,
4266 SCNSM_EVENT_SCR_SENT = 6,
4267};
4268
4269static void bfa_fcs_lport_scn_sm_offline(struct bfa_fcs_lport_scn_s *scn,
4270 enum port_scn_event event);
4271static void bfa_fcs_lport_scn_sm_sending_scr(
4272 struct bfa_fcs_lport_scn_s *scn,
4273 enum port_scn_event event);
4274static void bfa_fcs_lport_scn_sm_scr(struct bfa_fcs_lport_scn_s *scn,
4275 enum port_scn_event event);
4276static void bfa_fcs_lport_scn_sm_scr_retry(struct bfa_fcs_lport_scn_s *scn,
4277 enum port_scn_event event);
4278static void bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn,
4279 enum port_scn_event event);
4280
4281/**
4282 * Starting state - awaiting link up.
4283 */
4284static void
4285bfa_fcs_lport_scn_sm_offline(struct bfa_fcs_lport_scn_s *scn,
4286 enum port_scn_event event)
4287{
4288 switch (event) {
4289 case SCNSM_EVENT_PORT_ONLINE:
4290 bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_sending_scr);
4291 bfa_fcs_lport_scn_send_scr(scn, NULL);
4292 break;
4293
4294 case SCNSM_EVENT_PORT_OFFLINE:
4295 break;
4296
4297 default:
4298 bfa_sm_fault(scn->port->fcs, event);
4299 }
4300}
4301
4302static void
4303bfa_fcs_lport_scn_sm_sending_scr(struct bfa_fcs_lport_scn_s *scn,
4304 enum port_scn_event event)
4305{
4306 switch (event) {
4307 case SCNSM_EVENT_SCR_SENT:
4308 bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_scr);
4309 break;
4310
4311 case SCNSM_EVENT_PORT_OFFLINE:
4312 bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline);
4313 bfa_fcxp_walloc_cancel(scn->port->fcs->bfa, &scn->fcxp_wqe);
4314 break;
4315
4316 default:
4317 bfa_sm_fault(scn->port->fcs, event);
4318 }
4319}
4320
4321static void
4322bfa_fcs_lport_scn_sm_scr(struct bfa_fcs_lport_scn_s *scn,
4323 enum port_scn_event event)
4324{
4325 struct bfa_fcs_lport_s *port = scn->port;
4326
4327 switch (event) {
4328 case SCNSM_EVENT_RSP_OK:
4329 bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_online);
4330 break;
4331
4332 case SCNSM_EVENT_RSP_ERROR:
4333 bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_scr_retry);
4334 bfa_timer_start(port->fcs->bfa, &scn->timer,
4335 bfa_fcs_lport_scn_timeout, scn,
4336 BFA_FCS_RETRY_TIMEOUT);
4337 break;
4338
4339 case SCNSM_EVENT_PORT_OFFLINE:
4340 bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline);
4341 bfa_fcxp_discard(scn->fcxp);
4342 break;
4343
4344 default:
4345 bfa_sm_fault(port->fcs, event);
4346 }
4347}
4348
4349static void
4350bfa_fcs_lport_scn_sm_scr_retry(struct bfa_fcs_lport_scn_s *scn,
4351 enum port_scn_event event)
4352{
4353 switch (event) {
4354 case SCNSM_EVENT_TIMEOUT:
4355 bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_sending_scr);
4356 bfa_fcs_lport_scn_send_scr(scn, NULL);
4357 break;
4358
4359 case SCNSM_EVENT_PORT_OFFLINE:
4360 bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline);
4361 bfa_timer_stop(&scn->timer);
4362 break;
4363
4364 default:
4365 bfa_sm_fault(scn->port->fcs, event);
4366 }
4367}
4368
4369static void
4370bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn,
4371 enum port_scn_event event)
4372{
4373 switch (event) {
4374 case SCNSM_EVENT_PORT_OFFLINE:
4375 bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline);
4376 break;
4377
4378 default:
4379 bfa_sm_fault(scn->port->fcs, event);
4380 }
4381}
4382
4383
4384
4385/**
4386 * fcs_scn_private FCS SCN private functions
4387 */
4388
4389/**
4390 * This routine will be called to send a SCR command.
4391 */
4392static void
4393bfa_fcs_lport_scn_send_scr(void *scn_cbarg, struct bfa_fcxp_s *fcxp_alloced)
4394{
4395 struct bfa_fcs_lport_scn_s *scn = scn_cbarg;
4396 struct bfa_fcs_lport_s *port = scn->port;
4397 struct fchs_s fchs;
4398 int len;
4399 struct bfa_fcxp_s *fcxp;
4400
4401 bfa_trc(port->fcs, port->pid);
4402 bfa_trc(port->fcs, port->port_cfg.pwwn);
4403
4404 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
4405 if (!fcxp) {
4406 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &scn->fcxp_wqe,
4407 bfa_fcs_lport_scn_send_scr, scn);
4408 return;
4409 }
4410 scn->fcxp = fcxp;
4411
4412 /* Handle VU registrations for Base port only */
4413 if ((!port->vport) && bfa_ioc_get_fcmode(&port->fcs->bfa->ioc)) {
4414 len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
4415 bfa_lps_is_brcd_fabric(port->fabric->lps),
4416 port->pid, 0);
4417 } else {
4418 len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
4419 BFA_FALSE,
4420 port->pid, 0);
4421 }
4422
4423 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
4424 FC_CLASS_3, len, &fchs,
4425 bfa_fcs_lport_scn_scr_response,
4426 (void *)scn, FC_MAX_PDUSZ, FC_ELS_TOV);
4427
4428 bfa_sm_send_event(scn, SCNSM_EVENT_SCR_SENT);
4429}
4430
4431static void
4432bfa_fcs_lport_scn_scr_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
4433 void *cbarg, bfa_status_t req_status, u32 rsp_len,
4434 u32 resid_len, struct fchs_s *rsp_fchs)
4435{
4436 struct bfa_fcs_lport_scn_s *scn = (struct bfa_fcs_lport_scn_s *) cbarg;
4437 struct bfa_fcs_lport_s *port = scn->port;
4438 struct fc_els_cmd_s *els_cmd;
4439 struct fc_ls_rjt_s *ls_rjt;
4440
4441 bfa_trc(port->fcs, port->port_cfg.pwwn);
4442
4443 /*
4444 * Sanity Checks
4445 */
4446 if (req_status != BFA_STATUS_OK) {
4447 bfa_trc(port->fcs, req_status);
4448 bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR);
4449 return;
4450 }
4451
4452 els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
4453
4454 switch (els_cmd->els_code) {
4455
4456 case FC_ELS_ACC:
4457 bfa_sm_send_event(scn, SCNSM_EVENT_RSP_OK);
4458 break;
4459
4460 case FC_ELS_LS_RJT:
4461
4462 ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
4463
4464 bfa_trc(port->fcs, ls_rjt->reason_code);
4465 bfa_trc(port->fcs, ls_rjt->reason_code_expl);
4466
4467 bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR);
4468 break;
4469
4470 default:
4471 bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR);
4472 }
4473}
4474
4475/*
4476 * Send a LS Accept
4477 */
4478static void
4479bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port,
4480 struct fchs_s *rx_fchs)
4481{
4482 struct fchs_s fchs;
4483 struct bfa_fcxp_s *fcxp;
4484 struct bfa_rport_s *bfa_rport = NULL;
4485 int len;
4486
4487 bfa_trc(port->fcs, rx_fchs->s_id);
4488
4489 fcxp = bfa_fcs_fcxp_alloc(port->fcs);
4490 if (!fcxp)
4491 return;
4492
4493 len = fc_ls_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
4494 rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
4495 rx_fchs->ox_id);
4496
4497 bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
4498 BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
4499 FC_MAX_PDUSZ, 0);
4500}
4501
4502/**
4503 * This routine will be called by bfa_timer on timer timeouts.
4504 *
4505 * param[in] vport - pointer to bfa_fcs_lport_t.
4506 * param[out] vport_status - pointer to return vport status in
4507 *
4508 * return
4509 * void
4510 *
4511 * Special Considerations:
4512 *
4513 * note
4514 */
4515static void
4516bfa_fcs_lport_scn_timeout(void *arg)
4517{
4518 struct bfa_fcs_lport_scn_s *scn = (struct bfa_fcs_lport_scn_s *) arg;
4519
4520 bfa_sm_send_event(scn, SCNSM_EVENT_TIMEOUT);
4521}
4522
4523
4524
4525/**
4526 * fcs_scn_public FCS state change notification public interfaces
4527 */
4528
4529/*
4530 * Functions called by port/fab
4531 */
4532void
4533bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *port)
4534{
4535 struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port);
4536
4537 scn->port = port;
4538 bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline);
4539}
4540
4541void
4542bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *port)
4543{
4544 struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port);
4545
4546 scn->port = port;
4547 bfa_sm_send_event(scn, SCNSM_EVENT_PORT_OFFLINE);
4548}
4549
4550void
4551bfa_fcs_lport_scn_online(struct bfa_fcs_lport_s *port)
4552{
4553 struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port);
4554
4555 scn->port = port;
4556 bfa_sm_send_event(scn, SCNSM_EVENT_PORT_ONLINE);
4557}
4558
4559static void
4560bfa_fcs_lport_scn_portid_rscn(struct bfa_fcs_lport_s *port, u32 rpid)
4561{
4562 struct bfa_fcs_rport_s *rport;
4563
4564 bfa_trc(port->fcs, rpid);
4565
4566 /**
4567 * If this is an unknown device, then it just came online.
4568 * Otherwise let rport handle the RSCN event.
4569 */
4570 rport = bfa_fcs_lport_get_rport_by_pid(port, rpid);
4571 if (rport == NULL) {
4572 /*
4573 * If min cfg mode is enabled, we donot need to
4574 * discover any new rports.
4575 */
4576 if (!__fcs_min_cfg(port->fcs))
4577 rport = bfa_fcs_rport_create(port, rpid);
4578 } else
4579 bfa_fcs_rport_scn(rport);
4580}
4581
4582/**
4583 * rscn format based PID comparison
4584 */
4585#define __fc_pid_match(__c0, __c1, __fmt) \
4586 (((__fmt) == FC_RSCN_FORMAT_FABRIC) || \
4587 (((__fmt) == FC_RSCN_FORMAT_DOMAIN) && \
4588 ((__c0)[0] == (__c1)[0])) || \
4589 (((__fmt) == FC_RSCN_FORMAT_AREA) && \
4590 ((__c0)[0] == (__c1)[0]) && \
4591 ((__c0)[1] == (__c1)[1])))
4592
4593static void
4594bfa_fcs_lport_scn_multiport_rscn(struct bfa_fcs_lport_s *port,
4595 enum fc_rscn_format format,
4596 u32 rscn_pid)
4597{
4598 struct bfa_fcs_rport_s *rport;
4599 struct list_head *qe, *qe_next;
4600 u8 *c0, *c1;
4601
4602 bfa_trc(port->fcs, format);
4603 bfa_trc(port->fcs, rscn_pid);
4604
4605 c0 = (u8 *) &rscn_pid;
4606
4607 list_for_each_safe(qe, qe_next, &port->rport_q) {
4608 rport = (struct bfa_fcs_rport_s *) qe;
4609 c1 = (u8 *) &rport->pid;
4610 if (__fc_pid_match(c0, c1, format))
4611 bfa_fcs_rport_scn(rport);
4612 }
4613}
4614
4615
4616void
4617bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
4618 struct fchs_s *fchs, u32 len)
4619{
4620 struct fc_rscn_pl_s *rscn = (struct fc_rscn_pl_s *) (fchs + 1);
4621 int num_entries;
4622 u32 rscn_pid;
4623 bfa_boolean_t nsquery = BFA_FALSE, found;
4624 int i = 0, j;
4625
4626 num_entries =
4627 (bfa_os_ntohs(rscn->payldlen) -
4628 sizeof(u32)) / sizeof(rscn->event[0]);
4629
4630 bfa_trc(port->fcs, num_entries);
4631
4632 port->stats.num_rscn++;
4633
4634 bfa_fcs_lport_scn_send_ls_acc(port, fchs);
4635
4636 for (i = 0; i < num_entries; i++) {
4637 rscn_pid = rscn->event[i].portid;
4638
4639 bfa_trc(port->fcs, rscn->event[i].format);
4640 bfa_trc(port->fcs, rscn_pid);
4641
4642 /* check for duplicate entries in the list */
4643 found = BFA_FALSE;
4644 for (j = 0; j < i; j++) {
4645 if (rscn->event[j].portid == rscn_pid) {
4646 found = BFA_TRUE;
4647 break;
4648 }
4649 }
4650
4651 /* if found in down the list, pid has been already processed */
4652 if (found) {
4653 bfa_trc(port->fcs, rscn_pid);
4654 continue;
4655 }
4656
4657 switch (rscn->event[i].format) {
4658 case FC_RSCN_FORMAT_PORTID:
4659 if (rscn->event[i].qualifier == FC_QOS_RSCN_EVENT) {
4660 /*
4661 * Ignore this event.
4662 * f/w would have processed it
4663 */
4664 bfa_trc(port->fcs, rscn_pid);
4665 } else {
4666 port->stats.num_portid_rscn++;
4667 bfa_fcs_lport_scn_portid_rscn(port, rscn_pid);
4668 }
4669 break;
4670
4671 case FC_RSCN_FORMAT_FABRIC:
4672 if (rscn->event[i].qualifier ==
4673 FC_FABRIC_NAME_RSCN_EVENT) {
4674 bfa_fcs_lport_ms_fabric_rscn(port);
4675 break;
4676 }
4677 /* !!!!!!!!! Fall Through !!!!!!!!!!!!! */
4678
4679 case FC_RSCN_FORMAT_AREA:
4680 case FC_RSCN_FORMAT_DOMAIN:
4681 nsquery = BFA_TRUE;
4682 bfa_fcs_lport_scn_multiport_rscn(port,
4683 rscn->event[i].format,
4684 rscn_pid);
4685 break;
4686
4687
4688 default:
4689 bfa_assert(0);
4690 nsquery = BFA_TRUE;
4691 }
4692 }
4693
4694 /**
4695 * If any of area, domain or fabric RSCN is received, do a fresh discovery
4696 * to find new devices.
4697 */
4698 if (nsquery)
4699 bfa_fcs_lport_ns_query(port);
4700}
4701
4702/**
4703 * BFA FCS port
4704 */
4705/**
4706 * fcs_port_api BFA FCS port API
4707 */
4708struct bfa_fcs_lport_s *
4709bfa_fcs_get_base_port(struct bfa_fcs_s *fcs)
4710{
4711 return &fcs->fabric.bport;
4712}
4713
4714wwn_t
4715bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, int index,
4716 int nrports, bfa_boolean_t bwwn)
4717{
4718 struct list_head *qh, *qe;
4719 struct bfa_fcs_rport_s *rport = NULL;
4720 int i;
4721 struct bfa_fcs_s *fcs;
4722
4723 if (port == NULL || nrports == 0)
4724 return (wwn_t) 0;
4725
4726 fcs = port->fcs;
4727 bfa_trc(fcs, (u32) nrports);
4728
4729 i = 0;
4730 qh = &port->rport_q;
4731 qe = bfa_q_first(qh);
4732
4733 while ((qe != qh) && (i < nrports)) {
4734 rport = (struct bfa_fcs_rport_s *) qe;
4735 if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) {
4736 qe = bfa_q_next(qe);
4737 bfa_trc(fcs, (u32) rport->pwwn);
4738 bfa_trc(fcs, rport->pid);
4739 bfa_trc(fcs, i);
4740 continue;
4741 }
4742
4743 if (bwwn) {
4744 if (!memcmp(&wwn, &rport->pwwn, 8))
4745 break;
4746 } else {
4747 if (i == index)
4748 break;
4749 }
4750
4751 i++;
4752 qe = bfa_q_next(qe);
4753 }
4754
4755 bfa_trc(fcs, i);
4756 if (rport)
4757 return rport->pwwn;
4758 else
4759 return (wwn_t) 0;
4760}
4761
4762void
4763bfa_fcs_lport_get_rports(struct bfa_fcs_lport_s *port,
4764 wwn_t rport_wwns[], int *nrports)
4765{
4766 struct list_head *qh, *qe;
4767 struct bfa_fcs_rport_s *rport = NULL;
4768 int i;
4769 struct bfa_fcs_s *fcs;
4770
4771 if (port == NULL || rport_wwns == NULL || *nrports == 0)
4772 return;
4773
4774 fcs = port->fcs;
4775 bfa_trc(fcs, (u32) *nrports);
4776
4777 i = 0;
4778 qh = &port->rport_q;
4779 qe = bfa_q_first(qh);
4780
4781 while ((qe != qh) && (i < *nrports)) {
4782 rport = (struct bfa_fcs_rport_s *) qe;
4783 if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) {
4784 qe = bfa_q_next(qe);
4785 bfa_trc(fcs, (u32) rport->pwwn);
4786 bfa_trc(fcs, rport->pid);
4787 bfa_trc(fcs, i);
4788 continue;
4789 }
4790
4791 rport_wwns[i] = rport->pwwn;
4792
4793 i++;
4794 qe = bfa_q_next(qe);
4795 }
4796
4797 bfa_trc(fcs, i);
4798 *nrports = i;
4799}
4800
4801/*
4802 * Iterate's through all the rport's in the given port to
4803 * determine the maximum operating speed.
4804 *
4805 * !!!! To be used in TRL Functionality only !!!!
4806 */
4807bfa_port_speed_t
4808bfa_fcs_lport_get_rport_max_speed(bfa_fcs_lport_t *port)
4809{
4810 struct list_head *qh, *qe;
4811 struct bfa_fcs_rport_s *rport = NULL;
4812 struct bfa_fcs_s *fcs;
4813 bfa_port_speed_t max_speed = 0;
4814 struct bfa_port_attr_s port_attr;
4815 bfa_port_speed_t port_speed, rport_speed;
4816 bfa_boolean_t trl_enabled = bfa_fcport_is_ratelim(port->fcs->bfa);
4817
4818
4819 if (port == NULL)
4820 return 0;
4821
4822 fcs = port->fcs;
4823
4824 /* Get Physical port's current speed */
4825 bfa_fcport_get_attr(port->fcs->bfa, &port_attr);
4826 port_speed = port_attr.speed;
4827 bfa_trc(fcs, port_speed);
4828
4829 qh = &port->rport_q;
4830 qe = bfa_q_first(qh);
4831
4832 while (qe != qh) {
4833 rport = (struct bfa_fcs_rport_s *) qe;
4834 if ((bfa_os_ntoh3b(rport->pid) > 0xFFF000) ||
4835 (bfa_fcs_rport_get_state(rport) ==
4836 BFA_RPORT_OFFLINE)) {
4837 qe = bfa_q_next(qe);
4838 continue;
4839 }
4840
4841 rport_speed = rport->rpf.rpsc_speed;
4842 if ((trl_enabled) && (rport_speed ==
4843 BFA_PORT_SPEED_UNKNOWN)) {
4844 /* Use default ratelim speed setting */
4845 rport_speed =
4846 bfa_fcport_get_ratelim_speed(port->fcs->bfa);
4847 }
929 4848
4849 if ((rport_speed == BFA_PORT_SPEED_8GBPS) ||
4850 (rport_speed > port_speed)) {
4851 max_speed = rport_speed;
4852 break;
4853 } else if (rport_speed > max_speed) {
4854 max_speed = rport_speed;
4855 }
4856
4857 qe = bfa_q_next(qe);
4858 }
4859
4860 bfa_trc(fcs, max_speed);
4861 return max_speed;
4862}
4863
4864struct bfa_fcs_lport_s *
4865bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn)
4866{
4867 struct bfa_fcs_vport_s *vport;
4868 bfa_fcs_vf_t *vf;
4869
4870 bfa_assert(fcs != NULL);
4871
4872 vf = bfa_fcs_vf_lookup(fcs, vf_id);
4873 if (vf == NULL) {
4874 bfa_trc(fcs, vf_id);
4875 return NULL;
4876 }
4877
4878 if (!lpwwn || (vf->bport.port_cfg.pwwn == lpwwn))
4879 return &vf->bport;
4880
4881 vport = bfa_fcs_fabric_vport_lookup(vf, lpwwn);
4882 if (vport)
4883 return &vport->lport;
4884
4885 return NULL;
4886}
4887
4888/*
4889 * API corresponding to NPIV_VPORT_GETINFO.
4890 */
4891void
4892bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port,
4893 struct bfa_lport_info_s *port_info)
4894{
4895
4896 bfa_trc(port->fcs, port->fabric->fabric_name);
4897
4898 if (port->vport == NULL) {
4899 /*
4900 * This is a Physical port
4901 */
4902 port_info->port_type = BFA_LPORT_TYPE_PHYSICAL;
4903
4904 /*
4905 * @todo : need to fix the state & reason
4906 */
4907 port_info->port_state = 0;
4908 port_info->offline_reason = 0;
4909
4910 port_info->port_wwn = bfa_fcs_lport_get_pwwn(port);
4911 port_info->node_wwn = bfa_fcs_lport_get_nwwn(port);
4912
4913 port_info->max_vports_supp =
4914 bfa_lps_get_max_vport(port->fcs->bfa);
4915 port_info->num_vports_inuse =
4916 bfa_fcs_fabric_vport_count(port->fabric);
4917 port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP;
4918 port_info->num_rports_inuse = port->num_rports;
930 } else { 4919 } else {
931 port_attr->port_type = BFA_PPORT_TYPE_UNKNOWN; 4920 /*
932 port_attr->state = BFA_PORT_UNINIT; 4921 * This is a virtual port
4922 */
4923 port_info->port_type = BFA_LPORT_TYPE_VIRTUAL;
4924
4925 /*
4926 * @todo : need to fix the state & reason
4927 */
4928 port_info->port_state = 0;
4929 port_info->offline_reason = 0;
4930
4931 port_info->port_wwn = bfa_fcs_lport_get_pwwn(port);
4932 port_info->node_wwn = bfa_fcs_lport_get_nwwn(port);
4933 }
4934}
4935
4936void
4937bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port,
4938 struct bfa_lport_stats_s *port_stats)
4939{
4940 *port_stats = fcs_port->stats;
4941}
4942
4943void
4944bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port)
4945{
4946 bfa_os_memset(&fcs_port->stats, 0, sizeof(struct bfa_lport_stats_s));
4947}
4948
4949/**
4950 * FCS virtual port state machine
4951 */
4952
4953#define __vport_fcs(__vp) ((__vp)->lport.fcs)
4954#define __vport_pwwn(__vp) ((__vp)->lport.port_cfg.pwwn)
4955#define __vport_nwwn(__vp) ((__vp)->lport.port_cfg.nwwn)
4956#define __vport_bfa(__vp) ((__vp)->lport.fcs->bfa)
4957#define __vport_fcid(__vp) ((__vp)->lport.pid)
4958#define __vport_fabric(__vp) ((__vp)->lport.fabric)
4959#define __vport_vfid(__vp) ((__vp)->lport.fabric->vf_id)
4960
4961#define BFA_FCS_VPORT_MAX_RETRIES 5
4962/*
4963 * Forward declarations
4964 */
4965static void bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport);
4966static void bfa_fcs_vport_timeout(void *vport_arg);
4967static void bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport);
4968static void bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport);
4969
4970/**
4971 * fcs_vport_sm FCS virtual port state machine
4972 */
4973
4974/**
4975 * VPort State Machine events
4976 */
4977enum bfa_fcs_vport_event {
4978 BFA_FCS_VPORT_SM_CREATE = 1, /* vport create event */
4979 BFA_FCS_VPORT_SM_DELETE = 2, /* vport delete event */
4980 BFA_FCS_VPORT_SM_START = 3, /* vport start request */
4981 BFA_FCS_VPORT_SM_STOP = 4, /* stop: unsupported */
4982 BFA_FCS_VPORT_SM_ONLINE = 5, /* fabric online */
4983 BFA_FCS_VPORT_SM_OFFLINE = 6, /* fabric offline event */
4984 BFA_FCS_VPORT_SM_FRMSENT = 7, /* fdisc/logo sent events */
4985 BFA_FCS_VPORT_SM_RSP_OK = 8, /* good response */
4986 BFA_FCS_VPORT_SM_RSP_ERROR = 9, /* error/bad response */
4987 BFA_FCS_VPORT_SM_TIMEOUT = 10, /* delay timer event */
4988 BFA_FCS_VPORT_SM_DELCOMP = 11, /* lport delete completion */
4989 BFA_FCS_VPORT_SM_RSP_DUP_WWN = 12, /* Dup wnn error*/
4990 BFA_FCS_VPORT_SM_RSP_FAILED = 13, /* non-retryable failure */
4991};
4992
4993static void bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
4994 enum bfa_fcs_vport_event event);
4995static void bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
4996 enum bfa_fcs_vport_event event);
4997static void bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
4998 enum bfa_fcs_vport_event event);
4999static void bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
5000 enum bfa_fcs_vport_event event);
5001static void bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
5002 enum bfa_fcs_vport_event event);
5003static void bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
5004 enum bfa_fcs_vport_event event);
5005static void bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport,
5006 enum bfa_fcs_vport_event event);
5007static void bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
5008 enum bfa_fcs_vport_event event);
5009static void bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
5010 enum bfa_fcs_vport_event event);
5011static void bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport,
5012 enum bfa_fcs_vport_event event);
5013
5014static struct bfa_sm_table_s vport_sm_table[] = {
5015 {BFA_SM(bfa_fcs_vport_sm_uninit), BFA_FCS_VPORT_UNINIT},
5016 {BFA_SM(bfa_fcs_vport_sm_created), BFA_FCS_VPORT_CREATED},
5017 {BFA_SM(bfa_fcs_vport_sm_offline), BFA_FCS_VPORT_OFFLINE},
5018 {BFA_SM(bfa_fcs_vport_sm_fdisc), BFA_FCS_VPORT_FDISC},
5019 {BFA_SM(bfa_fcs_vport_sm_fdisc_retry), BFA_FCS_VPORT_FDISC_RETRY},
5020 {BFA_SM(bfa_fcs_vport_sm_online), BFA_FCS_VPORT_ONLINE},
5021 {BFA_SM(bfa_fcs_vport_sm_deleting), BFA_FCS_VPORT_DELETING},
5022 {BFA_SM(bfa_fcs_vport_sm_cleanup), BFA_FCS_VPORT_CLEANUP},
5023 {BFA_SM(bfa_fcs_vport_sm_logo), BFA_FCS_VPORT_LOGO},
5024 {BFA_SM(bfa_fcs_vport_sm_error), BFA_FCS_VPORT_ERROR}
5025};
5026
5027/**
5028 * Beginning state.
5029 */
5030static void
5031bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
5032 enum bfa_fcs_vport_event event)
5033{
5034 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
5035 bfa_trc(__vport_fcs(vport), event);
5036
5037 switch (event) {
5038 case BFA_FCS_VPORT_SM_CREATE:
5039 bfa_sm_set_state(vport, bfa_fcs_vport_sm_created);
5040 bfa_fcs_fabric_addvport(__vport_fabric(vport), vport);
5041 break;
5042
5043 default:
5044 bfa_sm_fault(__vport_fcs(vport), event);
5045 }
5046}
5047
5048/**
5049 * Created state - a start event is required to start up the state machine.
5050 */
5051static void
5052bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
5053 enum bfa_fcs_vport_event event)
5054{
5055 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
5056 bfa_trc(__vport_fcs(vport), event);
5057
5058 switch (event) {
5059 case BFA_FCS_VPORT_SM_START:
5060 if (bfa_fcs_fabric_is_online(__vport_fabric(vport))
5061 && bfa_fcs_fabric_npiv_capable(__vport_fabric(vport))) {
5062 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
5063 bfa_fcs_vport_do_fdisc(vport);
5064 } else {
5065 /**
5066 * Fabric is offline or not NPIV capable, stay in
5067 * offline state.
5068 */
5069 vport->vport_stats.fab_no_npiv++;
5070 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
5071 }
5072 break;
5073
5074 case BFA_FCS_VPORT_SM_DELETE:
5075 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
5076 bfa_fcs_lport_delete(&vport->lport);
5077 break;
5078
5079 case BFA_FCS_VPORT_SM_ONLINE:
5080 case BFA_FCS_VPORT_SM_OFFLINE:
5081 /**
5082 * Ignore ONLINE/OFFLINE events from fabric
5083 * till vport is started.
5084 */
5085 break;
5086
5087 default:
5088 bfa_sm_fault(__vport_fcs(vport), event);
5089 }
5090}
5091
5092/**
5093 * Offline state - awaiting ONLINE event from fabric SM.
5094 */
5095static void
5096bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
5097 enum bfa_fcs_vport_event event)
5098{
5099 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
5100 bfa_trc(__vport_fcs(vport), event);
5101
5102 switch (event) {
5103 case BFA_FCS_VPORT_SM_DELETE:
5104 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
5105 bfa_fcs_lport_delete(&vport->lport);
5106 break;
5107
5108 case BFA_FCS_VPORT_SM_ONLINE:
5109 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
5110 vport->fdisc_retries = 0;
5111 bfa_fcs_vport_do_fdisc(vport);
5112 break;
5113
5114 case BFA_FCS_VPORT_SM_OFFLINE:
5115 /*
5116 * This can happen if the vport couldn't be initialzied
5117 * due the fact that the npiv was not enabled on the switch.
5118 * In that case we will put the vport in offline state.
5119 * However, the link can go down and cause the this event to
5120 * be sent when we are already offline. Ignore it.
5121 */
5122 break;
5123
5124 default:
5125 bfa_sm_fault(__vport_fcs(vport), event);
5126 }
5127}
5128
5129
5130/**
5131 * FDISC is sent and awaiting reply from fabric.
5132 */
5133static void
5134bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
5135 enum bfa_fcs_vport_event event)
5136{
5137 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
5138 bfa_trc(__vport_fcs(vport), event);
5139
5140 switch (event) {
5141 case BFA_FCS_VPORT_SM_DELETE:
5142 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
5143 bfa_lps_discard(vport->lps);
5144 bfa_fcs_lport_delete(&vport->lport);
5145 break;
5146
5147 case BFA_FCS_VPORT_SM_OFFLINE:
5148 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
5149 bfa_lps_discard(vport->lps);
5150 break;
5151
5152 case BFA_FCS_VPORT_SM_RSP_OK:
5153 bfa_sm_set_state(vport, bfa_fcs_vport_sm_online);
5154 bfa_fcs_lport_online(&vport->lport);
5155 break;
5156
5157 case BFA_FCS_VPORT_SM_RSP_ERROR:
5158 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc_retry);
5159 bfa_timer_start(__vport_bfa(vport), &vport->timer,
5160 bfa_fcs_vport_timeout, vport,
5161 BFA_FCS_RETRY_TIMEOUT);
5162 break;
5163
5164 case BFA_FCS_VPORT_SM_RSP_FAILED:
5165 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
5166 break;
5167
5168 case BFA_FCS_VPORT_SM_RSP_DUP_WWN:
5169 bfa_sm_set_state(vport, bfa_fcs_vport_sm_error);
5170 break;
5171
5172 default:
5173 bfa_sm_fault(__vport_fcs(vport), event);
5174 }
5175}
5176
5177/**
5178 * FDISC attempt failed - a timer is active to retry FDISC.
5179 */
5180static void
5181bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
5182 enum bfa_fcs_vport_event event)
5183{
5184 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
5185 bfa_trc(__vport_fcs(vport), event);
5186
5187 switch (event) {
5188 case BFA_FCS_VPORT_SM_DELETE:
5189 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
5190 bfa_timer_stop(&vport->timer);
5191 bfa_fcs_lport_delete(&vport->lport);
5192 break;
5193
5194 case BFA_FCS_VPORT_SM_OFFLINE:
5195 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
5196 bfa_timer_stop(&vport->timer);
5197 break;
5198
5199 case BFA_FCS_VPORT_SM_TIMEOUT:
5200 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
5201 vport->vport_stats.fdisc_retries++;
5202 vport->fdisc_retries++;
5203 bfa_fcs_vport_do_fdisc(vport);
5204 break;
5205
5206 default:
5207 bfa_sm_fault(__vport_fcs(vport), event);
5208 }
5209}
5210
5211/**
5212 * Vport is online (FDISC is complete).
5213 */
5214static void
5215bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
5216 enum bfa_fcs_vport_event event)
5217{
5218 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
5219 bfa_trc(__vport_fcs(vport), event);
5220
5221 switch (event) {
5222 case BFA_FCS_VPORT_SM_DELETE:
5223 bfa_sm_set_state(vport, bfa_fcs_vport_sm_deleting);
5224 bfa_fcs_lport_delete(&vport->lport);
5225 break;
5226
5227 case BFA_FCS_VPORT_SM_OFFLINE:
5228 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
5229 bfa_lps_discard(vport->lps);
5230 bfa_fcs_lport_offline(&vport->lport);
5231 break;
5232
5233 default:
5234 bfa_sm_fault(__vport_fcs(vport), event);
5235 }
5236}
5237
5238/**
5239 * Vport is being deleted - awaiting lport delete completion to send
5240 * LOGO to fabric.
5241 */
5242static void
5243bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport,
5244 enum bfa_fcs_vport_event event)
5245{
5246 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
5247 bfa_trc(__vport_fcs(vport), event);
5248
5249 switch (event) {
5250 case BFA_FCS_VPORT_SM_DELETE:
5251 break;
5252
5253 case BFA_FCS_VPORT_SM_DELCOMP:
5254 bfa_sm_set_state(vport, bfa_fcs_vport_sm_logo);
5255 bfa_fcs_vport_do_logo(vport);
5256 break;
5257
5258 case BFA_FCS_VPORT_SM_OFFLINE:
5259 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
5260 break;
5261
5262 default:
5263 bfa_sm_fault(__vport_fcs(vport), event);
5264 }
5265}
5266
5267/**
5268 * Error State.
5269 * This state will be set when the Vport Creation fails due
5270 * to errors like Dup WWN. In this state only operation allowed
5271 * is a Vport Delete.
5272 */
5273static void
5274bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport,
5275 enum bfa_fcs_vport_event event)
5276{
5277 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
5278 bfa_trc(__vport_fcs(vport), event);
5279
5280 switch (event) {
5281 case BFA_FCS_VPORT_SM_DELETE:
5282 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
5283 bfa_fcs_lport_delete(&vport->lport);
5284 break;
5285
5286 default:
5287 bfa_trc(__vport_fcs(vport), event);
5288 }
5289}
5290
5291/**
5292 * Lport cleanup is in progress since vport is being deleted. Fabric is
5293 * offline, so no LOGO is needed to complete vport deletion.
5294 */
5295static void
5296bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
5297 enum bfa_fcs_vport_event event)
5298{
5299 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
5300 bfa_trc(__vport_fcs(vport), event);
5301
5302 switch (event) {
5303 case BFA_FCS_VPORT_SM_DELCOMP:
5304 bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
5305 bfa_fcs_vport_free(vport);
5306 break;
5307
5308 case BFA_FCS_VPORT_SM_DELETE:
5309 break;
5310
5311 default:
5312 bfa_sm_fault(__vport_fcs(vport), event);
5313 }
5314}
5315
5316/**
5317 * LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup
5318 * is done.
5319 */
5320static void
5321bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
5322 enum bfa_fcs_vport_event event)
5323{
5324 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
5325 bfa_trc(__vport_fcs(vport), event);
5326
5327 switch (event) {
5328 case BFA_FCS_VPORT_SM_OFFLINE:
5329 bfa_lps_discard(vport->lps);
5330 /*
5331 * !!! fall through !!!
5332 */
5333
5334 case BFA_FCS_VPORT_SM_RSP_OK:
5335 case BFA_FCS_VPORT_SM_RSP_ERROR:
5336 bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
5337 bfa_fcs_vport_free(vport);
5338 break;
5339
5340 case BFA_FCS_VPORT_SM_DELETE:
5341 break;
5342
5343 default:
5344 bfa_sm_fault(__vport_fcs(vport), event);
5345 }
5346}
5347
5348
5349
5350/**
5351 * fcs_vport_private FCS virtual port private functions
5352 */
5353/**
5354 * This routine will be called to send a FDISC command.
5355 */
5356static void
5357bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport)
5358{
5359 bfa_lps_fdisc(vport->lps, vport,
5360 bfa_fcport_get_maxfrsize(__vport_bfa(vport)),
5361 __vport_pwwn(vport), __vport_nwwn(vport));
5362 vport->vport_stats.fdisc_sent++;
5363}
5364
5365static void
5366bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
5367{
5368 u8 lsrjt_rsn = bfa_lps_get_lsrjt_rsn(vport->lps);
5369 u8 lsrjt_expl = bfa_lps_get_lsrjt_expl(vport->lps);
5370
5371 bfa_trc(__vport_fcs(vport), lsrjt_rsn);
5372 bfa_trc(__vport_fcs(vport), lsrjt_expl);
5373
5374 /* For certain reason codes, we don't want to retry. */
5375 switch (bfa_lps_get_lsrjt_expl(vport->lps)) {
5376 case FC_LS_RJT_EXP_INV_PORT_NAME: /* by brocade */
5377 case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */
5378 if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
5379 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
5380 else
5381 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_DUP_WWN);
5382 break;
5383
5384 case FC_LS_RJT_EXP_INSUFF_RES:
5385 /*
5386 * This means max logins per port/switch setting on the
5387 * switch was exceeded.
5388 */
5389 if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
5390 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
5391 else
5392 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED);
5393 break;
5394
5395 default:
5396 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
933 } 5397 }
5398}
5399
5400/**
5401 * Called to send a logout to the fabric. Used when a V-Port is
5402 * deleted/stopped.
5403 */
5404static void
5405bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport)
5406{
5407 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
5408
5409 vport->vport_stats.logo_sent++;
5410 bfa_lps_fdisclogo(vport->lps);
5411}
5412
5413
5414/**
5415 * This routine will be called by bfa_timer on timer timeouts.
5416 *
5417 * param[in] vport - pointer to bfa_fcs_vport_t.
5418 * param[out] vport_status - pointer to return vport status in
5419 *
5420 * return
5421 * void
5422 *
5423 * Special Considerations:
5424 *
5425 * note
5426 */
5427static void
5428bfa_fcs_vport_timeout(void *vport_arg)
5429{
5430 struct bfa_fcs_vport_s *vport = (struct bfa_fcs_vport_s *) vport_arg;
5431
5432 vport->vport_stats.fdisc_timeouts++;
5433 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_TIMEOUT);
5434}
5435
5436static void
5437bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport)
5438{
5439 struct bfad_vport_s *vport_drv =
5440 (struct bfad_vport_s *)vport->vport_drv;
5441
5442 bfa_fcs_fabric_delvport(__vport_fabric(vport), vport);
5443
5444 if (vport_drv->comp_del)
5445 complete(vport_drv->comp_del);
5446
5447 bfa_lps_delete(vport->lps);
5448}
5449
934 5450
5451
5452/**
5453 * fcs_vport_public FCS virtual port public interfaces
5454 */
5455
5456/**
5457 * Online notification from fabric SM.
5458 */
5459void
5460bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport)
5461{
5462 vport->vport_stats.fab_online++;
5463 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
5464}
5465
5466/**
5467 * Offline notification from fabric SM.
5468 */
5469void
5470bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport)
5471{
5472 vport->vport_stats.fab_offline++;
5473 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE);
5474}
5475
5476/**
5477 * Cleanup notification from fabric SM on link timer expiry.
5478 */
5479void
5480bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport)
5481{
5482 vport->vport_stats.fab_cleanup++;
5483}
5484/**
5485 * delete notification from fabric SM. To be invoked from within FCS.
5486 */
5487void
5488bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport)
5489{
5490 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE);
935} 5491}
936 5492
5493/**
5494 * Delete completion callback from associated lport
5495 */
5496void
5497bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport)
5498{
5499 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELCOMP);
5500}
937 5501
5502
5503
5504/**
5505 * fcs_vport_api Virtual port API
5506 */
5507
5508/**
5509 * Use this function to instantiate a new FCS vport object. This
5510 * function will not trigger any HW initialization process (which will be
5511 * done in vport_start() call)
5512 *
5513 * param[in] vport - pointer to bfa_fcs_vport_t. This space
5514 * needs to be allocated by the driver.
5515 * param[in] fcs - FCS instance
5516 * param[in] vport_cfg - vport configuration
5517 * param[in] vf_id - VF_ID if vport is created within a VF.
5518 * FC_VF_ID_NULL to specify base fabric.
5519 * param[in] vport_drv - Opaque handle back to the driver's vport
5520 * structure
5521 *
5522 * retval BFA_STATUS_OK - on success.
5523 * retval BFA_STATUS_FAILED - on failure.
5524 */
5525bfa_status_t
5526bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
5527 u16 vf_id, struct bfa_lport_cfg_s *vport_cfg,
5528 struct bfad_vport_s *vport_drv)
5529{
5530 if (vport_cfg->pwwn == 0)
5531 return BFA_STATUS_INVALID_WWN;
5532
5533 if (bfa_fcs_lport_get_pwwn(&fcs->fabric.bport) == vport_cfg->pwwn)
5534 return BFA_STATUS_VPORT_WWN_BP;
5535
5536 if (bfa_fcs_vport_lookup(fcs, vf_id, vport_cfg->pwwn) != NULL)
5537 return BFA_STATUS_VPORT_EXISTS;
5538
5539 if (bfa_fcs_fabric_vport_count(&fcs->fabric) ==
5540 bfa_lps_get_max_vport(fcs->bfa))
5541 return BFA_STATUS_VPORT_MAX;
5542
5543 vport->lps = bfa_lps_alloc(fcs->bfa);
5544 if (!vport->lps)
5545 return BFA_STATUS_VPORT_MAX;
5546
5547 vport->vport_drv = vport_drv;
5548 vport_cfg->preboot_vp = BFA_FALSE;
5549
5550 bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
5551 bfa_fcs_lport_attach(&vport->lport, fcs, vf_id, vport);
5552 bfa_fcs_lport_init(&vport->lport, vport_cfg);
5553 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_CREATE);
5554
5555 return BFA_STATUS_OK;
5556}
5557
5558/**
5559 * Use this function to instantiate a new FCS PBC vport object. This
5560 * function will not trigger any HW initialization process (which will be
5561 * done in vport_start() call)
5562 *
5563 * param[in] vport - pointer to bfa_fcs_vport_t. This space
5564 * needs to be allocated by the driver.
5565 * param[in] fcs - FCS instance
5566 * param[in] vport_cfg - vport configuration
5567 * param[in] vf_id - VF_ID if vport is created within a VF.
5568 * FC_VF_ID_NULL to specify base fabric.
5569 * param[in] vport_drv - Opaque handle back to the driver's vport
5570 * structure
5571 *
5572 * retval BFA_STATUS_OK - on success.
5573 * retval BFA_STATUS_FAILED - on failure.
5574 */
5575bfa_status_t
5576bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
5577 u16 vf_id, struct bfa_lport_cfg_s *vport_cfg,
5578 struct bfad_vport_s *vport_drv)
5579{
5580 bfa_status_t rc;
5581
5582 rc = bfa_fcs_vport_create(vport, fcs, vf_id, vport_cfg, vport_drv);
5583 vport->lport.port_cfg.preboot_vp = BFA_TRUE;
5584
5585 return rc;
5586}
5587
5588/**
5589 * Use this function to findout if this is a pbc vport or not.
5590 *
5591 * @param[in] vport - pointer to bfa_fcs_vport_t.
5592 *
5593 * @returns None
5594 */
5595bfa_boolean_t
5596bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport)
5597{
5598
5599 if (vport && (vport->lport.port_cfg.preboot_vp == BFA_TRUE))
5600 return BFA_TRUE;
5601 else
5602 return BFA_FALSE;
5603
5604}
5605
5606/**
5607 * Use this function initialize the vport.
5608 *
5609 * @param[in] vport - pointer to bfa_fcs_vport_t.
5610 *
5611 * @returns None
5612 */
5613bfa_status_t
5614bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport)
5615{
5616 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_START);
5617
5618 return BFA_STATUS_OK;
5619}
5620
5621/**
5622 * Use this function quiese the vport object. This function will return
5623 * immediately, when the vport is actually stopped, the
5624 * bfa_drv_vport_stop_cb() will be called.
5625 *
5626 * param[in] vport - pointer to bfa_fcs_vport_t.
5627 *
5628 * return None
5629 */
5630bfa_status_t
5631bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport)
5632{
5633 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOP);
5634
5635 return BFA_STATUS_OK;
5636}
5637
5638/**
5639 * Use this function to delete a vport object. Fabric object should
5640 * be stopped before this function call.
5641 *
5642 * !!!!!!! Donot invoke this from within FCS !!!!!!!
5643 *
5644 * param[in] vport - pointer to bfa_fcs_vport_t.
5645 *
5646 * return None
5647 */
5648bfa_status_t
5649bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport)
5650{
5651
5652 if (vport->lport.port_cfg.preboot_vp)
5653 return BFA_STATUS_PBC;
5654
5655 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE);
5656
5657 return BFA_STATUS_OK;
5658}
5659
5660/**
5661 * Use this function to get vport's current status info.
5662 *
5663 * param[in] vport pointer to bfa_fcs_vport_t.
5664 * param[out] attr pointer to return vport attributes
5665 *
5666 * return None
5667 */
5668void
5669bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
5670 struct bfa_vport_attr_s *attr)
5671{
5672 if (vport == NULL || attr == NULL)
5673 return;
5674
5675 bfa_os_memset(attr, 0, sizeof(struct bfa_vport_attr_s));
5676
5677 bfa_fcs_lport_get_attr(&vport->lport, &attr->port_attr);
5678 attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm);
5679}
5680
5681/**
5682 * Use this function to get vport's statistics.
5683 *
5684 * param[in] vport pointer to bfa_fcs_vport_t.
5685 * param[out] stats pointer to return vport statistics in
5686 *
5687 * return None
5688 */
5689void
5690bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
5691 struct bfa_vport_stats_s *stats)
5692{
5693 *stats = vport->vport_stats;
5694}
5695
5696/**
5697 * Use this function to clear vport's statistics.
5698 *
5699 * param[in] vport pointer to bfa_fcs_vport_t.
5700 *
5701 * return None
5702 */
5703void
5704bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport)
5705{
5706 bfa_os_memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
5707}
5708
5709/**
5710 * Lookup a virtual port. Excludes base port from lookup.
5711 */
5712struct bfa_fcs_vport_s *
5713bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t vpwwn)
5714{
5715 struct bfa_fcs_vport_s *vport;
5716 struct bfa_fcs_fabric_s *fabric;
5717
5718 bfa_trc(fcs, vf_id);
5719 bfa_trc(fcs, vpwwn);
5720
5721 fabric = bfa_fcs_vf_lookup(fcs, vf_id);
5722 if (!fabric) {
5723 bfa_trc(fcs, vf_id);
5724 return NULL;
5725 }
5726
5727 vport = bfa_fcs_fabric_vport_lookup(fabric, vpwwn);
5728 return vport;
5729}
5730
5731/**
5732 * FDISC Response
5733 */
5734void
5735bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status)
5736{
5737 struct bfa_fcs_vport_s *vport = uarg;
5738
5739 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
5740 bfa_trc(__vport_fcs(vport), status);
5741
5742 switch (status) {
5743 case BFA_STATUS_OK:
5744 /*
5745 * Initialiaze the V-Port fields
5746 */
5747 __vport_fcid(vport) = bfa_lps_get_pid(vport->lps);
5748 vport->vport_stats.fdisc_accepts++;
5749 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
5750 break;
5751
5752 case BFA_STATUS_INVALID_MAC:
5753 /* Only for CNA */
5754 vport->vport_stats.fdisc_acc_bad++;
5755 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
5756
5757 break;
5758
5759 case BFA_STATUS_EPROTOCOL:
5760 switch (bfa_lps_get_extstatus(vport->lps)) {
5761 case BFA_EPROTO_BAD_ACCEPT:
5762 vport->vport_stats.fdisc_acc_bad++;
5763 break;
5764
5765 case BFA_EPROTO_UNKNOWN_RSP:
5766 vport->vport_stats.fdisc_unknown_rsp++;
5767 break;
5768
5769 default:
5770 break;
5771 }
5772
5773 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
5774 break;
5775
5776 case BFA_STATUS_FABRIC_RJT:
5777 vport->vport_stats.fdisc_rejects++;
5778 bfa_fcs_vport_fdisc_rejected(vport);
5779 break;
5780
5781 default:
5782 vport->vport_stats.fdisc_rsp_err++;
5783 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
5784 }
5785}
5786
5787/**
5788 * LOGO response
5789 */
5790void
5791bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg)
5792{
5793 struct bfa_fcs_vport_s *vport = uarg;
5794 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
5795}
5796
5797/**
5798 * Received clear virtual link
5799 */
5800void
5801bfa_cb_lps_cvl_event(void *bfad, void *uarg)
5802{
5803 struct bfa_fcs_vport_s *vport = uarg;
5804
5805 /* Send an Offline followed by an ONLINE */
5806 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE);
5807 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
5808}
diff --git a/drivers/scsi/bfa/bfa_fcs_port.c b/drivers/scsi/bfa/bfa_fcs_port.c
deleted file mode 100644
index 3c27788cd527..000000000000
--- a/drivers/scsi/bfa/bfa_fcs_port.c
+++ /dev/null
@@ -1,61 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcs_pport.c BFA FCS PPORT ( physical port)
20 */
21
22#include <fcs/bfa_fcs.h>
23#include <bfa_svc.h>
24#include <fcs/bfa_fcs_fabric.h>
25#include "fcs_trcmod.h"
26#include "fcs.h"
27#include "fcs_fabric.h"
28#include "fcs_port.h"
29
30BFA_TRC_FILE(FCS, PPORT);
31
32static void
33bfa_fcs_pport_event_handler(void *cbarg, bfa_pport_event_t event)
34{
35 struct bfa_fcs_s *fcs = cbarg;
36
37 bfa_trc(fcs, event);
38
39 switch (event) {
40 case BFA_PPORT_LINKUP:
41 bfa_fcs_fabric_link_up(&fcs->fabric);
42 break;
43
44 case BFA_PPORT_LINKDOWN:
45 bfa_fcs_fabric_link_down(&fcs->fabric);
46 break;
47
48 case BFA_PPORT_TRUNK_LINKDOWN:
49 bfa_assert(0);
50 break;
51
52 default:
53 bfa_assert(0);
54 }
55}
56
57void
58bfa_fcs_pport_attach(struct bfa_fcs_s *fcs)
59{
60 bfa_fcport_event_register(fcs->bfa, bfa_fcs_pport_event_handler, fcs);
61}
diff --git a/drivers/scsi/bfa/rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index 9b4c2c9a644b..635f0cd88714 100644
--- a/drivers/scsi/bfa/rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -19,151 +19,133 @@
19 * rport.c Remote port implementation. 19 * rport.c Remote port implementation.
20 */ 20 */
21 21
22#include <linux/slab.h> 22#include "bfa_fcs.h"
23#include <bfa.h> 23#include "bfa_fcbuild.h"
24#include <bfa_svc.h> 24#include "bfad_drv.h"
25#include "fcbuild.h"
26#include "fcs_vport.h"
27#include "fcs_lport.h"
28#include "fcs_rport.h"
29#include "fcs_fcpim.h"
30#include "fcs_fcptm.h"
31#include "fcs_trcmod.h"
32#include "fcs_fcxp.h"
33#include "fcs.h"
34#include <fcb/bfa_fcb_rport.h>
35#include <aen/bfa_aen_rport.h>
36 25
37BFA_TRC_FILE(FCS, RPORT); 26BFA_TRC_FILE(FCS, RPORT);
38 27
39/* In millisecs */ 28static u32
40static u32 bfa_fcs_rport_del_timeout = 29bfa_fcs_rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT * 1000;
41 BFA_FCS_RPORT_DEF_DEL_TIMEOUT * 1000; 30 /* In millisecs */
42
43/* 31/*
44 * forward declarations 32 * forward declarations
45 */ 33 */
46static struct bfa_fcs_rport_s *bfa_fcs_rport_alloc(struct bfa_fcs_port_s *port, 34static struct bfa_fcs_rport_s *bfa_fcs_rport_alloc(
47 wwn_t pwwn, u32 rpid); 35 struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid);
48static void bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport); 36static void bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport);
49static void bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport); 37static void bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport);
50static void bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport); 38static void bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport);
51static void bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport); 39static void bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport);
52static void bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, 40static void bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport,
53 struct fc_logi_s *plogi); 41 struct fc_logi_s *plogi);
54static void bfa_fcs_rport_fc4_pause(struct bfa_fcs_rport_s *rport); 42static void bfa_fcs_rport_timeout(void *arg);
55static void bfa_fcs_rport_fc4_resume(struct bfa_fcs_rport_s *rport); 43static void bfa_fcs_rport_send_plogi(void *rport_cbarg,
56static void bfa_fcs_rport_timeout(void *arg);
57static void bfa_fcs_rport_send_plogi(void *rport_cbarg,
58 struct bfa_fcxp_s *fcxp_alloced); 44 struct bfa_fcxp_s *fcxp_alloced);
59static void bfa_fcs_rport_send_plogiacc(void *rport_cbarg, 45static void bfa_fcs_rport_send_plogiacc(void *rport_cbarg,
60 struct bfa_fcxp_s *fcxp_alloced); 46 struct bfa_fcxp_s *fcxp_alloced);
61static void bfa_fcs_rport_plogi_response(void *fcsarg, 47static void bfa_fcs_rport_plogi_response(void *fcsarg,
62 struct bfa_fcxp_s *fcxp, 48 struct bfa_fcxp_s *fcxp, void *cbarg,
63 void *cbarg, 49 bfa_status_t req_status, u32 rsp_len,
64 bfa_status_t req_status, 50 u32 resid_len, struct fchs_s *rsp_fchs);
65 u32 rsp_len, 51static void bfa_fcs_rport_send_adisc(void *rport_cbarg,
66 u32 resid_len,
67 struct fchs_s *rsp_fchs);
68static void bfa_fcs_rport_send_adisc(void *rport_cbarg,
69 struct bfa_fcxp_s *fcxp_alloced); 52 struct bfa_fcxp_s *fcxp_alloced);
70static void bfa_fcs_rport_adisc_response(void *fcsarg, 53static void bfa_fcs_rport_adisc_response(void *fcsarg,
71 struct bfa_fcxp_s *fcxp, 54 struct bfa_fcxp_s *fcxp, void *cbarg,
72 void *cbarg, 55 bfa_status_t req_status, u32 rsp_len,
73 bfa_status_t req_status, 56 u32 resid_len, struct fchs_s *rsp_fchs);
74 u32 rsp_len, 57static void bfa_fcs_rport_send_nsdisc(void *rport_cbarg,
75 u32 resid_len,
76 struct fchs_s *rsp_fchs);
77static void bfa_fcs_rport_send_gidpn(void *rport_cbarg,
78 struct bfa_fcxp_s *fcxp_alloced); 58 struct bfa_fcxp_s *fcxp_alloced);
79static void bfa_fcs_rport_gidpn_response(void *fcsarg, 59static void bfa_fcs_rport_gidpn_response(void *fcsarg,
80 struct bfa_fcxp_s *fcxp, 60 struct bfa_fcxp_s *fcxp, void *cbarg,
81 void *cbarg, 61 bfa_status_t req_status, u32 rsp_len,
82 bfa_status_t req_status, 62 u32 resid_len, struct fchs_s *rsp_fchs);
83 u32 rsp_len, 63static void bfa_fcs_rport_gpnid_response(void *fcsarg,
84 u32 resid_len, 64 struct bfa_fcxp_s *fcxp, void *cbarg,
85 struct fchs_s *rsp_fchs); 65 bfa_status_t req_status, u32 rsp_len,
86static void bfa_fcs_rport_send_logo(void *rport_cbarg, 66 u32 resid_len, struct fchs_s *rsp_fchs);
67static void bfa_fcs_rport_send_logo(void *rport_cbarg,
87 struct bfa_fcxp_s *fcxp_alloced); 68 struct bfa_fcxp_s *fcxp_alloced);
88static void bfa_fcs_rport_send_logo_acc(void *rport_cbarg); 69static void bfa_fcs_rport_send_logo_acc(void *rport_cbarg);
89static void bfa_fcs_rport_process_prli(struct bfa_fcs_rport_s *rport, 70static void bfa_fcs_rport_process_prli(struct bfa_fcs_rport_s *rport,
90 struct fchs_s *rx_fchs, u16 len); 71 struct fchs_s *rx_fchs, u16 len);
91static void bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, 72static void bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport,
92 struct fchs_s *rx_fchs, u8 reason_code, 73 struct fchs_s *rx_fchs, u8 reason_code,
93 u8 reason_code_expl); 74 u8 reason_code_expl);
94static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport, 75static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
95 struct fchs_s *rx_fchs, u16 len); 76 struct fchs_s *rx_fchs, u16 len);
96static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport); 77static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport);
97/** 78/**
98 * fcs_rport_sm FCS rport state machine events 79 * fcs_rport_sm FCS rport state machine events
99 */ 80 */
100 81
101enum rport_event { 82enum rport_event {
102 RPSM_EVENT_PLOGI_SEND = 1, /* new rport; start with PLOGI */ 83 RPSM_EVENT_PLOGI_SEND = 1, /* new rport; start with PLOGI */
103 RPSM_EVENT_PLOGI_RCVD = 2, /* Inbound PLOGI from remote port */ 84 RPSM_EVENT_PLOGI_RCVD = 2, /* Inbound PLOGI from remote port */
104 RPSM_EVENT_PLOGI_COMP = 3, /* PLOGI completed to rport */ 85 RPSM_EVENT_PLOGI_COMP = 3, /* PLOGI completed to rport */
105 RPSM_EVENT_LOGO_RCVD = 4, /* LOGO from remote device */ 86 RPSM_EVENT_LOGO_RCVD = 4, /* LOGO from remote device */
106 RPSM_EVENT_LOGO_IMP = 5, /* implicit logo for SLER */ 87 RPSM_EVENT_LOGO_IMP = 5, /* implicit logo for SLER */
107 RPSM_EVENT_FCXP_SENT = 6, /* Frame from has been sent */ 88 RPSM_EVENT_FCXP_SENT = 6, /* Frame from has been sent */
108 RPSM_EVENT_DELETE = 7, /* RPORT delete request */ 89 RPSM_EVENT_DELETE = 7, /* RPORT delete request */
109 RPSM_EVENT_SCN = 8, /* state change notification */ 90 RPSM_EVENT_SCN = 8, /* state change notification */
110 RPSM_EVENT_ACCEPTED = 9,/* Good response from remote device */ 91 RPSM_EVENT_ACCEPTED = 9, /* Good response from remote device */
111 RPSM_EVENT_FAILED = 10, /* Request to rport failed. */ 92 RPSM_EVENT_FAILED = 10, /* Request to rport failed. */
112 RPSM_EVENT_TIMEOUT = 11, /* Rport SM timeout event */ 93 RPSM_EVENT_TIMEOUT = 11, /* Rport SM timeout event */
113 RPSM_EVENT_HCB_ONLINE = 12, /* BFA rport online callback */ 94 RPSM_EVENT_HCB_ONLINE = 12, /* BFA rport online callback */
114 RPSM_EVENT_HCB_OFFLINE = 13, /* BFA rport offline callback */ 95 RPSM_EVENT_HCB_OFFLINE = 13, /* BFA rport offline callback */
115 RPSM_EVENT_FC4_OFFLINE = 14, /* FC-4 offline complete */ 96 RPSM_EVENT_FC4_OFFLINE = 14, /* FC-4 offline complete */
116 RPSM_EVENT_ADDRESS_CHANGE = 15, /* Rport's PID has changed */ 97 RPSM_EVENT_ADDRESS_CHANGE = 15, /* Rport's PID has changed */
117 RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */ 98 RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */
118 RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */ 99 RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */
100 RPSM_EVENT_PLOGI_RETRY = 18, /* Retry PLOGI continously */
119}; 101};
120 102
121static void bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, 103static void bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport,
122 enum rport_event event); 104 enum rport_event event);
123static void bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport, 105static void bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
124 enum rport_event event); 106 enum rport_event event);
125static void bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport, 107static void bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
126 enum rport_event event); 108 enum rport_event event);
127static void bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport, 109static void bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
128 enum rport_event event); 110 enum rport_event event);
129static void bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, 111static void bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport,
130 enum rport_event event);
131static void bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
132 enum rport_event event);
133static void bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport,
134 enum rport_event event); 112 enum rport_event event);
135static void bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport, 113static void bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
114 enum rport_event event);
115static void bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport,
116 enum rport_event event);
117static void bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
136 enum rport_event event); 118 enum rport_event event);
137static void bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, 119static void bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport,
138 enum rport_event event); 120 enum rport_event event);
139static void bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport, 121static void bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
140 enum rport_event event); 122 enum rport_event event);
141static void bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, 123static void bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport,
142 enum rport_event event); 124 enum rport_event event);
143static void bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, 125static void bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
144 enum rport_event event); 126 enum rport_event event);
145static void bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport, 127static void bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
146 enum rport_event event); 128 enum rport_event event);
147static void bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport, 129static void bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
148 enum rport_event event); 130 enum rport_event event);
149static void bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport, 131static void bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
150 enum rport_event event); 132 enum rport_event event);
151static void bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport, 133static void bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
152 enum rport_event event); 134 enum rport_event event);
153static void bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport, 135static void bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
154 enum rport_event event); 136 enum rport_event event);
155static void bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport, 137static void bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
156 enum rport_event event); 138 enum rport_event event);
157static void bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, 139static void bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport,
158 enum rport_event event); 140 enum rport_event event);
159static void bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport, 141static void bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
142 enum rport_event event);
143static void bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
144 enum rport_event event);
145static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
146 enum rport_event event);
147static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
160 enum rport_event event); 148 enum rport_event event);
161static void bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
162 enum rport_event event);
163static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
164 enum rport_event event);
165static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
166 enum rport_event event);
167 149
168static struct bfa_sm_table_s rport_sm_table[] = { 150static struct bfa_sm_table_s rport_sm_table[] = {
169 {BFA_SM(bfa_fcs_rport_sm_uninit), BFA_RPORT_UNINIT}, 151 {BFA_SM(bfa_fcs_rport_sm_uninit), BFA_RPORT_UNINIT},
@@ -191,7 +173,7 @@ static struct bfa_sm_table_s rport_sm_table[] = {
191}; 173};
192 174
193/** 175/**
194 * Beginning state. 176 * Beginning state.
195 */ 177 */
196static void 178static void
197bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event) 179bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event)
@@ -221,20 +203,19 @@ bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event)
221 case RPSM_EVENT_ADDRESS_DISC: 203 case RPSM_EVENT_ADDRESS_DISC:
222 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); 204 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
223 rport->ns_retries = 0; 205 rport->ns_retries = 0;
224 bfa_fcs_rport_send_gidpn(rport, NULL); 206 bfa_fcs_rport_send_nsdisc(rport, NULL);
225 break; 207 break;
226
227 default: 208 default:
228 bfa_sm_fault(rport->fcs, event); 209 bfa_sm_fault(rport->fcs, event);
229 } 210 }
230} 211}
231 212
232/** 213/**
233 * PLOGI is being sent. 214 * PLOGI is being sent.
234 */ 215 */
235static void 216static void
236bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport, 217bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
237 enum rport_event event) 218 enum rport_event event)
238{ 219{
239 bfa_trc(rport->fcs, rport->pwwn); 220 bfa_trc(rport->fcs, rport->pwwn);
240 bfa_trc(rport->fcs, rport->pid); 221 bfa_trc(rport->fcs, rport->pid);
@@ -258,10 +239,12 @@ bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
258 break; 239 break;
259 240
260 case RPSM_EVENT_ADDRESS_CHANGE: 241 case RPSM_EVENT_ADDRESS_CHANGE:
242 case RPSM_EVENT_SCN:
243 /* query the NS */
261 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); 244 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
262 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); 245 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
263 rport->ns_retries = 0; 246 rport->ns_retries = 0;
264 bfa_fcs_rport_send_gidpn(rport, NULL); 247 bfa_fcs_rport_send_nsdisc(rport, NULL);
265 break; 248 break;
266 249
267 case RPSM_EVENT_LOGO_IMP: 250 case RPSM_EVENT_LOGO_IMP:
@@ -273,8 +256,6 @@ bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
273 bfa_fcs_rport_del_timeout); 256 bfa_fcs_rport_del_timeout);
274 break; 257 break;
275 258
276 case RPSM_EVENT_SCN:
277 break;
278 259
279 default: 260 default:
280 bfa_sm_fault(rport->fcs, event); 261 bfa_sm_fault(rport->fcs, event);
@@ -282,11 +263,11 @@ bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
282} 263}
283 264
284/** 265/**
285 * PLOGI is being sent. 266 * PLOGI is being sent.
286 */ 267 */
287static void 268static void
288bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport, 269bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
289 enum rport_event event) 270 enum rport_event event)
290{ 271{
291 bfa_trc(rport->fcs, rport->pwwn); 272 bfa_trc(rport->fcs, rport->pwwn);
292 bfa_trc(rport->fcs, rport->pid); 273 bfa_trc(rport->fcs, rport->pid);
@@ -304,6 +285,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
304 bfa_fcs_rport_free(rport); 285 bfa_fcs_rport_free(rport);
305 break; 286 break;
306 287
288 case RPSM_EVENT_PLOGI_RCVD:
307 case RPSM_EVENT_SCN: 289 case RPSM_EVENT_SCN:
308 /** 290 /**
309 * Ignore, SCN is possibly online notification. 291 * Ignore, SCN is possibly online notification.
@@ -314,7 +296,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
314 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); 296 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
315 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); 297 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
316 rport->ns_retries = 0; 298 rport->ns_retries = 0;
317 bfa_fcs_rport_send_gidpn(rport, NULL); 299 bfa_fcs_rport_send_nsdisc(rport, NULL);
318 break; 300 break;
319 301
320 case RPSM_EVENT_LOGO_IMP: 302 case RPSM_EVENT_LOGO_IMP:
@@ -338,7 +320,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
338} 320}
339 321
340/** 322/**
341 * PLOGI is sent. 323 * PLOGI is sent.
342 */ 324 */
343static void 325static void
344bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport, 326bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
@@ -349,24 +331,9 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
349 bfa_trc(rport->fcs, event); 331 bfa_trc(rport->fcs, event);
350 332
351 switch (event) { 333 switch (event) {
352 case RPSM_EVENT_SCN:
353 bfa_timer_stop(&rport->timer);
354 /*
355 * !! fall through !!
356 */
357
358 case RPSM_EVENT_TIMEOUT: 334 case RPSM_EVENT_TIMEOUT:
359 if (rport->plogi_retries < BFA_FCS_RPORT_MAX_RETRIES) { 335 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
360 rport->plogi_retries++; 336 bfa_fcs_rport_send_plogi(rport, NULL);
361 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
362 bfa_fcs_rport_send_plogi(rport, NULL);
363 } else {
364 rport->pid = 0;
365 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
366 bfa_timer_start(rport->fcs->bfa, &rport->timer,
367 bfa_fcs_rport_timeout, rport,
368 bfa_fcs_rport_del_timeout);
369 }
370 break; 337 break;
371 338
372 case RPSM_EVENT_DELETE: 339 case RPSM_EVENT_DELETE:
@@ -386,10 +353,11 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
386 break; 353 break;
387 354
388 case RPSM_EVENT_ADDRESS_CHANGE: 355 case RPSM_EVENT_ADDRESS_CHANGE:
356 case RPSM_EVENT_SCN:
389 bfa_timer_stop(&rport->timer); 357 bfa_timer_stop(&rport->timer);
390 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); 358 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
391 rport->ns_retries = 0; 359 rport->ns_retries = 0;
392 bfa_fcs_rport_send_gidpn(rport, NULL); 360 bfa_fcs_rport_send_nsdisc(rport, NULL);
393 break; 361 break;
394 362
395 case RPSM_EVENT_LOGO_IMP: 363 case RPSM_EVENT_LOGO_IMP:
@@ -413,7 +381,7 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
413} 381}
414 382
415/** 383/**
416 * PLOGI is sent. 384 * PLOGI is sent.
417 */ 385 */
418static void 386static void
419bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event) 387bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
@@ -443,10 +411,28 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
443 * !! fall through !! 411 * !! fall through !!
444 */ 412 */
445 case RPSM_EVENT_FAILED: 413 case RPSM_EVENT_FAILED:
414 if (rport->plogi_retries < BFA_FCS_RPORT_MAX_RETRIES) {
415 rport->plogi_retries++;
416 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry);
417 bfa_timer_start(rport->fcs->bfa, &rport->timer,
418 bfa_fcs_rport_timeout, rport,
419 BFA_FCS_RETRY_TIMEOUT);
420 } else {
421 bfa_stats(rport->port, rport_del_max_plogi_retry);
422 rport->pid = 0;
423 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
424 bfa_timer_start(rport->fcs->bfa, &rport->timer,
425 bfa_fcs_rport_timeout, rport,
426 bfa_fcs_rport_del_timeout);
427 }
428 break;
429
430 case RPSM_EVENT_PLOGI_RETRY:
431 rport->plogi_retries = 0;
446 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry); 432 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry);
447 bfa_timer_start(rport->fcs->bfa, &rport->timer, 433 bfa_timer_start(rport->fcs->bfa, &rport->timer,
448 bfa_fcs_rport_timeout, rport, 434 bfa_fcs_rport_timeout, rport,
449 BFA_FCS_RETRY_TIMEOUT); 435 (FC_RA_TOV * 1000));
450 break; 436 break;
451 437
452 case RPSM_EVENT_LOGO_IMP: 438 case RPSM_EVENT_LOGO_IMP:
@@ -459,10 +445,11 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
459 break; 445 break;
460 446
461 case RPSM_EVENT_ADDRESS_CHANGE: 447 case RPSM_EVENT_ADDRESS_CHANGE:
448 case RPSM_EVENT_SCN:
462 bfa_fcxp_discard(rport->fcxp); 449 bfa_fcxp_discard(rport->fcxp);
463 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); 450 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
464 rport->ns_retries = 0; 451 rport->ns_retries = 0;
465 bfa_fcs_rport_send_gidpn(rport, NULL); 452 bfa_fcs_rport_send_nsdisc(rport, NULL);
466 break; 453 break;
467 454
468 case RPSM_EVENT_PLOGI_RCVD: 455 case RPSM_EVENT_PLOGI_RCVD:
@@ -471,12 +458,6 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
471 bfa_fcs_rport_send_plogiacc(rport, NULL); 458 bfa_fcs_rport_send_plogiacc(rport, NULL);
472 break; 459 break;
473 460
474 case RPSM_EVENT_SCN:
475 /**
476 * Ignore SCN - wait for PLOGI response.
477 */
478 break;
479
480 case RPSM_EVENT_DELETE: 461 case RPSM_EVENT_DELETE:
481 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); 462 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
482 bfa_fcxp_discard(rport->fcxp); 463 bfa_fcxp_discard(rport->fcxp);
@@ -495,8 +476,8 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
495} 476}
496 477
497/** 478/**
498 * PLOGI is complete. Awaiting BFA rport online callback. FC-4s 479 * PLOGI is complete. Awaiting BFA rport online callback. FC-4s
499 * are offline. 480 * are offline.
500 */ 481 */
501static void 482static void
502bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport, 483bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
@@ -551,7 +532,7 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
551} 532}
552 533
553/** 534/**
554 * Rport is ONLINE. FC-4s active. 535 * Rport is ONLINE. FC-4s active.
555 */ 536 */
556static void 537static void
557bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event) 538bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
@@ -562,18 +543,11 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
562 543
563 switch (event) { 544 switch (event) {
564 case RPSM_EVENT_SCN: 545 case RPSM_EVENT_SCN:
565 /**
566 * Pause FC-4 activity till rport is authenticated.
567 * In switched fabrics, check presence of device in nameserver
568 * first.
569 */
570 bfa_fcs_rport_fc4_pause(rport);
571
572 if (bfa_fcs_fabric_is_switched(rport->port->fabric)) { 546 if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
573 bfa_sm_set_state(rport, 547 bfa_sm_set_state(rport,
574 bfa_fcs_rport_sm_nsquery_sending); 548 bfa_fcs_rport_sm_nsquery_sending);
575 rport->ns_retries = 0; 549 rport->ns_retries = 0;
576 bfa_fcs_rport_send_gidpn(rport, NULL); 550 bfa_fcs_rport_send_nsdisc(rport, NULL);
577 } else { 551 } else {
578 bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_sending); 552 bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_sending);
579 bfa_fcs_rport_send_adisc(rport, NULL); 553 bfa_fcs_rport_send_adisc(rport, NULL);
@@ -607,12 +581,12 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
607} 581}
608 582
609/** 583/**
610 * An SCN event is received in ONLINE state. NS query is being sent 584 * An SCN event is received in ONLINE state. NS query is being sent
611 * prior to ADISC authentication with rport. FC-4s are paused. 585 * prior to ADISC authentication with rport. FC-4s are paused.
612 */ 586 */
613static void 587static void
614bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport, 588bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
615 enum rport_event event) 589 enum rport_event event)
616{ 590{
617 bfa_trc(rport->fcs, rport->pwwn); 591 bfa_trc(rport->fcs, rport->pwwn);
618 bfa_trc(rport->fcs, rport->pid); 592 bfa_trc(rport->fcs, rport->pid);
@@ -665,8 +639,8 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
665} 639}
666 640
667/** 641/**
668 * An SCN event is received in ONLINE state. NS query is sent to rport. 642 * An SCN event is received in ONLINE state. NS query is sent to rport.
669 * FC-4s are paused. 643 * FC-4s are paused.
670 */ 644 */
671static void 645static void
672bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event) 646bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
@@ -686,7 +660,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
686 if (rport->ns_retries < BFA_FCS_RPORT_MAX_RETRIES) { 660 if (rport->ns_retries < BFA_FCS_RPORT_MAX_RETRIES) {
687 bfa_sm_set_state(rport, 661 bfa_sm_set_state(rport,
688 bfa_fcs_rport_sm_nsquery_sending); 662 bfa_fcs_rport_sm_nsquery_sending);
689 bfa_fcs_rport_send_gidpn(rport, NULL); 663 bfa_fcs_rport_send_nsdisc(rport, NULL);
690 } else { 664 } else {
691 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); 665 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
692 bfa_fcs_rport_offline_action(rport); 666 bfa_fcs_rport_offline_action(rport);
@@ -724,12 +698,12 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
724} 698}
725 699
726/** 700/**
727 * An SCN event is received in ONLINE state. ADISC is being sent for 701 * An SCN event is received in ONLINE state. ADISC is being sent for
728 * authenticating with rport. FC-4s are paused. 702 * authenticating with rport. FC-4s are paused.
729 */ 703 */
730static void 704static void
731bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport, 705bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
732 enum rport_event event) 706 enum rport_event event)
733{ 707{
734 bfa_trc(rport->fcs, rport->pwwn); 708 bfa_trc(rport->fcs, rport->pwwn);
735 bfa_trc(rport->fcs, rport->pid); 709 bfa_trc(rport->fcs, rport->pid);
@@ -775,8 +749,8 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
775} 749}
776 750
777/** 751/**
778 * An SCN event is received in ONLINE state. ADISC is to rport. 752 * An SCN event is received in ONLINE state. ADISC is to rport.
779 * FC-4s are paused. 753 * FC-4s are paused.
780 */ 754 */
781static void 755static void
782bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event) 756bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
@@ -788,7 +762,6 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
788 switch (event) { 762 switch (event) {
789 case RPSM_EVENT_ACCEPTED: 763 case RPSM_EVENT_ACCEPTED:
790 bfa_sm_set_state(rport, bfa_fcs_rport_sm_online); 764 bfa_sm_set_state(rport, bfa_fcs_rport_sm_online);
791 bfa_fcs_rport_fc4_resume(rport);
792 break; 765 break;
793 766
794 case RPSM_EVENT_PLOGI_RCVD: 767 case RPSM_EVENT_PLOGI_RCVD:
@@ -838,7 +811,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
838} 811}
839 812
840/** 813/**
841 * Rport has sent LOGO. Awaiting FC-4 offline completion callback. 814 * Rport has sent LOGO. Awaiting FC-4 offline completion callback.
842 */ 815 */
843static void 816static void
844bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, 817bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
@@ -869,12 +842,12 @@ bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
869} 842}
870 843
871/** 844/**
872 * LOGO needs to be sent to rport. Awaiting FC-4 offline completion 845 * LOGO needs to be sent to rport. Awaiting FC-4 offline completion
873 * callback. 846 * callback.
874 */ 847 */
875static void 848static void
876bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport, 849bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
877 enum rport_event event) 850 enum rport_event event)
878{ 851{
879 bfa_trc(rport->fcs, rport->pwwn); 852 bfa_trc(rport->fcs, rport->pwwn);
880 bfa_trc(rport->fcs, rport->pid); 853 bfa_trc(rport->fcs, rport->pid);
@@ -892,7 +865,7 @@ bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
892} 865}
893 866
894/** 867/**
895 * Rport is going offline. Awaiting FC-4 offline completion callback. 868 * Rport is going offline. Awaiting FC-4 offline completion callback.
896 */ 869 */
897static void 870static void
898bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport, 871bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
@@ -929,12 +902,12 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
929} 902}
930 903
931/** 904/**
932 * Rport is offline. FC-4s are offline. Awaiting BFA rport offline 905 * Rport is offline. FC-4s are offline. Awaiting BFA rport offline
933 * callback. 906 * callback.
934 */ 907 */
935static void 908static void
936bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport, 909bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
937 enum rport_event event) 910 enum rport_event event)
938{ 911{
939 bfa_trc(rport->fcs, rport->pwwn); 912 bfa_trc(rport->fcs, rport->pwwn);
940 bfa_trc(rport->fcs, rport->pid); 913 bfa_trc(rport->fcs, rport->pid);
@@ -943,12 +916,12 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
943 switch (event) { 916 switch (event) {
944 case RPSM_EVENT_HCB_OFFLINE: 917 case RPSM_EVENT_HCB_OFFLINE:
945 case RPSM_EVENT_ADDRESS_CHANGE: 918 case RPSM_EVENT_ADDRESS_CHANGE:
946 if (bfa_fcs_port_is_online(rport->port)) { 919 if (bfa_fcs_lport_is_online(rport->port)) {
947 if (bfa_fcs_fabric_is_switched(rport->port->fabric)) { 920 if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
948 bfa_sm_set_state(rport, 921 bfa_sm_set_state(rport,
949 bfa_fcs_rport_sm_nsdisc_sending); 922 bfa_fcs_rport_sm_nsdisc_sending);
950 rport->ns_retries = 0; 923 rport->ns_retries = 0;
951 bfa_fcs_rport_send_gidpn(rport, NULL); 924 bfa_fcs_rport_send_nsdisc(rport, NULL);
952 } else { 925 } else {
953 bfa_sm_set_state(rport, 926 bfa_sm_set_state(rport,
954 bfa_fcs_rport_sm_plogi_sending); 927 bfa_fcs_rport_sm_plogi_sending);
@@ -983,8 +956,8 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
983} 956}
984 957
985/** 958/**
986 * Rport is offline. FC-4s are offline. Awaiting BFA rport offline 959 * Rport is offline. FC-4s are offline. Awaiting BFA rport offline
987 * callback to send LOGO accept. 960 * callback to send LOGO accept.
988 */ 961 */
989static void 962static void
990bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport, 963bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
@@ -1001,21 +974,21 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
1001 bfa_fcs_rport_send_prlo_acc(rport); 974 bfa_fcs_rport_send_prlo_acc(rport);
1002 if (rport->pid && (rport->prlo == BFA_FALSE)) 975 if (rport->pid && (rport->prlo == BFA_FALSE))
1003 bfa_fcs_rport_send_logo_acc(rport); 976 bfa_fcs_rport_send_logo_acc(rport);
1004
1005 /* 977 /*
1006 * If the lport is online and if the rport is not a well known 978 * If the lport is online and if the rport is not a well
1007 * address port, we try to re-discover the r-port. 979 * known address port,
980 * we try to re-discover the r-port.
1008 */ 981 */
1009 if (bfa_fcs_port_is_online(rport->port) 982 if (bfa_fcs_lport_is_online(rport->port) &&
1010 && (!BFA_FCS_PID_IS_WKA(rport->pid))) { 983 (!BFA_FCS_PID_IS_WKA(rport->pid))) {
1011 bfa_sm_set_state(rport, 984 bfa_sm_set_state(rport,
1012 bfa_fcs_rport_sm_nsdisc_sending); 985 bfa_fcs_rport_sm_nsdisc_sending);
1013 rport->ns_retries = 0; 986 rport->ns_retries = 0;
1014 bfa_fcs_rport_send_gidpn(rport, NULL); 987 bfa_fcs_rport_send_nsdisc(rport, NULL);
1015 } else { 988 } else {
1016 /* 989 /*
1017 * if it is not a well known address, reset the pid to 990 * if it is not a well known address, reset the
1018 * 991 * pid to 0.
1019 */ 992 */
1020 if (!BFA_FCS_PID_IS_WKA(rport->pid)) 993 if (!BFA_FCS_PID_IS_WKA(rport->pid))
1021 rport->pid = 0; 994 rport->pid = 0;
@@ -1047,12 +1020,13 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
1047} 1020}
1048 1021
1049/** 1022/**
1050 * Rport is being deleted. FC-4s are offline. Awaiting BFA rport offline 1023 * Rport is being deleted. FC-4s are offline.
1051 * callback to send LOGO. 1024 * Awaiting BFA rport offline
1025 * callback to send LOGO.
1052 */ 1026 */
1053static void 1027static void
1054bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport, 1028bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
1055 enum rport_event event) 1029 enum rport_event event)
1056{ 1030{
1057 bfa_trc(rport->fcs, rport->pwwn); 1031 bfa_trc(rport->fcs, rport->pwwn);
1058 bfa_trc(rport->fcs, rport->pid); 1032 bfa_trc(rport->fcs, rport->pid);
@@ -1075,11 +1049,11 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
1075} 1049}
1076 1050
1077/** 1051/**
1078 * Rport is being deleted. FC-4s are offline. LOGO is being sent. 1052 * Rport is being deleted. FC-4s are offline. LOGO is being sent.
1079 */ 1053 */
1080static void 1054static void
1081bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport, 1055bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
1082 enum rport_event event) 1056 enum rport_event event)
1083{ 1057{
1084 bfa_trc(rport->fcs, rport->pwwn); 1058 bfa_trc(rport->fcs, rport->pwwn);
1085 bfa_trc(rport->fcs, rport->pid); 1059 bfa_trc(rport->fcs, rport->pid);
@@ -1087,9 +1061,7 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
1087 1061
1088 switch (event) { 1062 switch (event) {
1089 case RPSM_EVENT_FCXP_SENT: 1063 case RPSM_EVENT_FCXP_SENT:
1090 /* 1064 /* Once LOGO is sent, we donot wait for the response */
1091 * Once LOGO is sent, we donot wait for the response
1092 */
1093 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); 1065 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
1094 bfa_fcs_rport_free(rport); 1066 bfa_fcs_rport_free(rport);
1095 break; 1067 break;
@@ -1111,8 +1083,8 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
1111} 1083}
1112 1084
1113/** 1085/**
1114 * Rport is offline. FC-4s are offline. BFA rport is offline. 1086 * Rport is offline. FC-4s are offline. BFA rport is offline.
1115 * Timer active to delete stale rport. 1087 * Timer active to delete stale rport.
1116 */ 1088 */
1117static void 1089static void
1118bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event) 1090bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
@@ -1132,7 +1104,7 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
1132 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); 1104 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
1133 bfa_timer_stop(&rport->timer); 1105 bfa_timer_stop(&rport->timer);
1134 rport->ns_retries = 0; 1106 rport->ns_retries = 0;
1135 bfa_fcs_rport_send_gidpn(rport, NULL); 1107 bfa_fcs_rport_send_nsdisc(rport, NULL);
1136 break; 1108 break;
1137 1109
1138 case RPSM_EVENT_DELETE: 1110 case RPSM_EVENT_DELETE:
@@ -1171,11 +1143,11 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
1171} 1143}
1172 1144
1173/** 1145/**
1174 * Rport address has changed. Nameserver discovery request is being sent. 1146 * Rport address has changed. Nameserver discovery request is being sent.
1175 */ 1147 */
1176static void 1148static void
1177bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport, 1149bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
1178 enum rport_event event) 1150 enum rport_event event)
1179{ 1151{
1180 bfa_trc(rport->fcs, rport->pwwn); 1152 bfa_trc(rport->fcs, rport->pwwn);
1181 bfa_trc(rport->fcs, rport->pid); 1153 bfa_trc(rport->fcs, rport->pid);
@@ -1205,7 +1177,7 @@ bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
1205 break; 1177 break;
1206 1178
1207 case RPSM_EVENT_ADDRESS_CHANGE: 1179 case RPSM_EVENT_ADDRESS_CHANGE:
1208 rport->ns_retries = 0; /* reset the retry count */ 1180 rport->ns_retries = 0; /* reset the retry count */
1209 break; 1181 break;
1210 1182
1211 case RPSM_EVENT_LOGO_IMP: 1183 case RPSM_EVENT_LOGO_IMP:
@@ -1228,11 +1200,11 @@ bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
1228} 1200}
1229 1201
1230/** 1202/**
1231 * Nameserver discovery failed. Waiting for timeout to retry. 1203 * Nameserver discovery failed. Waiting for timeout to retry.
1232 */ 1204 */
1233static void 1205static void
1234bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport, 1206bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
1235 enum rport_event event) 1207 enum rport_event event)
1236{ 1208{
1237 bfa_trc(rport->fcs, rport->pwwn); 1209 bfa_trc(rport->fcs, rport->pwwn);
1238 bfa_trc(rport->fcs, rport->pid); 1210 bfa_trc(rport->fcs, rport->pid);
@@ -1241,7 +1213,7 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
1241 switch (event) { 1213 switch (event) {
1242 case RPSM_EVENT_TIMEOUT: 1214 case RPSM_EVENT_TIMEOUT:
1243 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); 1215 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
1244 bfa_fcs_rport_send_gidpn(rport, NULL); 1216 bfa_fcs_rport_send_nsdisc(rport, NULL);
1245 break; 1217 break;
1246 1218
1247 case RPSM_EVENT_SCN: 1219 case RPSM_EVENT_SCN:
@@ -1249,7 +1221,7 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
1249 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); 1221 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
1250 bfa_timer_stop(&rport->timer); 1222 bfa_timer_stop(&rport->timer);
1251 rport->ns_retries = 0; 1223 rport->ns_retries = 0;
1252 bfa_fcs_rport_send_gidpn(rport, NULL); 1224 bfa_fcs_rport_send_nsdisc(rport, NULL);
1253 break; 1225 break;
1254 1226
1255 case RPSM_EVENT_DELETE: 1227 case RPSM_EVENT_DELETE:
@@ -1276,7 +1248,6 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
1276 case RPSM_EVENT_LOGO_RCVD: 1248 case RPSM_EVENT_LOGO_RCVD:
1277 bfa_fcs_rport_send_logo_acc(rport); 1249 bfa_fcs_rport_send_logo_acc(rport);
1278 break; 1250 break;
1279
1280 case RPSM_EVENT_PRLO_RCVD: 1251 case RPSM_EVENT_PRLO_RCVD:
1281 bfa_fcs_rport_send_prlo_acc(rport); 1252 bfa_fcs_rport_send_prlo_acc(rport);
1282 break; 1253 break;
@@ -1293,7 +1264,7 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
1293} 1264}
1294 1265
1295/** 1266/**
1296 * Rport address has changed. Nameserver discovery request is sent. 1267 * Rport address has changed. Nameserver discovery request is sent.
1297 */ 1268 */
1298static void 1269static void
1299bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport, 1270bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
@@ -1311,9 +1282,9 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
1311 bfa_fcs_rport_send_plogi(rport, NULL); 1282 bfa_fcs_rport_send_plogi(rport, NULL);
1312 } else { 1283 } else {
1313 bfa_sm_set_state(rport, 1284 bfa_sm_set_state(rport,
1314 bfa_fcs_rport_sm_nsdisc_sending); 1285 bfa_fcs_rport_sm_nsdisc_sending);
1315 rport->ns_retries = 0; 1286 rport->ns_retries = 0;
1316 bfa_fcs_rport_send_gidpn(rport, NULL); 1287 bfa_fcs_rport_send_nsdisc(rport, NULL);
1317 } 1288 }
1318 break; 1289 break;
1319 1290
@@ -1321,8 +1292,8 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
1321 rport->ns_retries++; 1292 rport->ns_retries++;
1322 if (rport->ns_retries < BFA_FCS_RPORT_MAX_RETRIES) { 1293 if (rport->ns_retries < BFA_FCS_RPORT_MAX_RETRIES) {
1323 bfa_sm_set_state(rport, 1294 bfa_sm_set_state(rport,
1324 bfa_fcs_rport_sm_nsdisc_sending); 1295 bfa_fcs_rport_sm_nsdisc_sending);
1325 bfa_fcs_rport_send_gidpn(rport, NULL); 1296 bfa_fcs_rport_send_nsdisc(rport, NULL);
1326 } else { 1297 } else {
1327 rport->pid = 0; 1298 rport->pid = 0;
1328 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); 1299 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
@@ -1353,10 +1324,10 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
1353 bfa_fcs_rport_del_timeout); 1324 bfa_fcs_rport_del_timeout);
1354 break; 1325 break;
1355 1326
1327
1356 case RPSM_EVENT_PRLO_RCVD: 1328 case RPSM_EVENT_PRLO_RCVD:
1357 bfa_fcs_rport_send_prlo_acc(rport); 1329 bfa_fcs_rport_send_prlo_acc(rport);
1358 break; 1330 break;
1359
1360 case RPSM_EVENT_SCN: 1331 case RPSM_EVENT_SCN:
1361 /** 1332 /**
1362 * ignore, wait for NS query response 1333 * ignore, wait for NS query response
@@ -1391,29 +1362,29 @@ static void
1391bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) 1362bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1392{ 1363{
1393 struct bfa_fcs_rport_s *rport = rport_cbarg; 1364 struct bfa_fcs_rport_s *rport = rport_cbarg;
1394 struct bfa_fcs_port_s *port = rport->port; 1365 struct bfa_fcs_lport_s *port = rport->port;
1395 struct fchs_s fchs; 1366 struct fchs_s fchs;
1396 int len; 1367 int len;
1397 struct bfa_fcxp_s *fcxp; 1368 struct bfa_fcxp_s *fcxp;
1398 1369
1399 bfa_trc(rport->fcs, rport->pwwn); 1370 bfa_trc(rport->fcs, rport->pwwn);
1400 1371
1401 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 1372 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
1402 if (!fcxp) { 1373 if (!fcxp) {
1403 bfa_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, 1374 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
1404 bfa_fcs_rport_send_plogi, rport); 1375 bfa_fcs_rport_send_plogi, rport);
1405 return; 1376 return;
1406 } 1377 }
1407 rport->fcxp = fcxp; 1378 rport->fcxp = fcxp;
1408 1379
1409 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, 1380 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
1410 bfa_fcs_port_get_fcid(port), 0, 1381 bfa_fcs_lport_get_fcid(port), 0,
1411 port->port_cfg.pwwn, port->port_cfg.nwwn, 1382 port->port_cfg.pwwn, port->port_cfg.nwwn,
1412 bfa_fcport_get_maxfrsize(port->fcs->bfa)); 1383 bfa_fcport_get_maxfrsize(port->fcs->bfa));
1413 1384
1414 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1385 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1415 FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response, 1386 FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response,
1416 (void *)rport, FC_MAX_PDUSZ, FC_ELS_TOV); 1387 (void *)rport, FC_MAX_PDUSZ, FC_ELS_TOV);
1417 1388
1418 rport->stats.plogis++; 1389 rport->stats.plogis++;
1419 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); 1390 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
@@ -1421,14 +1392,14 @@ bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1421 1392
1422static void 1393static void
1423bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, 1394bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1424 bfa_status_t req_status, u32 rsp_len, 1395 bfa_status_t req_status, u32 rsp_len,
1425 u32 resid_len, struct fchs_s *rsp_fchs) 1396 u32 resid_len, struct fchs_s *rsp_fchs)
1426{ 1397{
1427 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg; 1398 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
1428 struct fc_logi_s *plogi_rsp; 1399 struct fc_logi_s *plogi_rsp;
1429 struct fc_ls_rjt_s *ls_rjt; 1400 struct fc_ls_rjt_s *ls_rjt;
1430 struct bfa_fcs_rport_s *twin; 1401 struct bfa_fcs_rport_s *twin;
1431 struct list_head *qe; 1402 struct list_head *qe;
1432 1403
1433 bfa_trc(rport->fcs, rport->pwwn); 1404 bfa_trc(rport->fcs, rport->pwwn);
1434 1405
@@ -1453,6 +1424,13 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1453 bfa_trc(rport->fcs, ls_rjt->reason_code); 1424 bfa_trc(rport->fcs, ls_rjt->reason_code);
1454 bfa_trc(rport->fcs, ls_rjt->reason_code_expl); 1425 bfa_trc(rport->fcs, ls_rjt->reason_code_expl);
1455 1426
1427 if ((ls_rjt->reason_code == FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD) &&
1428 (ls_rjt->reason_code_expl == FC_LS_RJT_EXP_INSUFF_RES)) {
1429 rport->stats.rjt_insuff_res++;
1430 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RETRY);
1431 return;
1432 }
1433
1456 rport->stats.plogi_rejects++; 1434 rport->stats.plogi_rejects++;
1457 bfa_sm_send_event(rport, RPSM_EVENT_FAILED); 1435 bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
1458 return; 1436 return;
@@ -1463,22 +1441,22 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1463 * device with a new FC port address. 1441 * device with a new FC port address.
1464 */ 1442 */
1465 list_for_each(qe, &rport->port->rport_q) { 1443 list_for_each(qe, &rport->port->rport_q) {
1466 twin = (struct bfa_fcs_rport_s *)qe; 1444 twin = (struct bfa_fcs_rport_s *) qe;
1467 if (twin == rport) 1445 if (twin == rport)
1468 continue; 1446 continue;
1469 if (!rport->pwwn && (plogi_rsp->port_name == twin->pwwn)) { 1447 if (!rport->pwwn && (plogi_rsp->port_name == twin->pwwn)) {
1470 bfa_trc(rport->fcs, twin->pid); 1448 bfa_trc(rport->fcs, twin->pid);
1471 bfa_trc(rport->fcs, rport->pid); 1449 bfa_trc(rport->fcs, rport->pid);
1472 1450
1473 /* 1451 /* Update plogi stats in twin */
1474 * Update plogi stats in twin 1452 twin->stats.plogis += rport->stats.plogis;
1475 */ 1453 twin->stats.plogi_rejects +=
1476 twin->stats.plogis += rport->stats.plogis; 1454 rport->stats.plogi_rejects;
1477 twin->stats.plogi_rejects += rport->stats.plogi_rejects; 1455 twin->stats.plogi_timeouts +=
1478 twin->stats.plogi_timeouts += 1456 rport->stats.plogi_timeouts;
1479 rport->stats.plogi_timeouts; 1457 twin->stats.plogi_failed +=
1480 twin->stats.plogi_failed += rport->stats.plogi_failed; 1458 rport->stats.plogi_failed;
1481 twin->stats.plogi_rcvd += rport->stats.plogi_rcvd; 1459 twin->stats.plogi_rcvd += rport->stats.plogi_rcvd;
1482 twin->stats.plogi_accs++; 1460 twin->stats.plogi_accs++;
1483 1461
1484 bfa_fcs_rport_delete(rport); 1462 bfa_fcs_rport_delete(rport);
@@ -1502,9 +1480,9 @@ static void
1502bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) 1480bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1503{ 1481{
1504 struct bfa_fcs_rport_s *rport = rport_cbarg; 1482 struct bfa_fcs_rport_s *rport = rport_cbarg;
1505 struct bfa_fcs_port_s *port = rport->port; 1483 struct bfa_fcs_lport_s *port = rport->port;
1506 struct fchs_s fchs; 1484 struct fchs_s fchs;
1507 int len; 1485 int len;
1508 struct bfa_fcxp_s *fcxp; 1486 struct bfa_fcxp_s *fcxp;
1509 1487
1510 bfa_trc(rport->fcs, rport->pwwn); 1488 bfa_trc(rport->fcs, rport->pwwn);
@@ -1512,19 +1490,20 @@ bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1512 1490
1513 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 1491 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
1514 if (!fcxp) { 1492 if (!fcxp) {
1515 bfa_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, 1493 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
1516 bfa_fcs_rport_send_plogiacc, rport); 1494 bfa_fcs_rport_send_plogiacc, rport);
1517 return; 1495 return;
1518 } 1496 }
1519 rport->fcxp = fcxp; 1497 rport->fcxp = fcxp;
1520 1498
1521 len = fc_plogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, 1499 len = fc_plogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
1522 bfa_fcs_port_get_fcid(port), rport->reply_oxid, 1500 rport->pid, bfa_fcs_lport_get_fcid(port),
1523 port->port_cfg.pwwn, port->port_cfg.nwwn, 1501 rport->reply_oxid, port->port_cfg.pwwn,
1502 port->port_cfg.nwwn,
1524 bfa_fcport_get_maxfrsize(port->fcs->bfa)); 1503 bfa_fcport_get_maxfrsize(port->fcs->bfa));
1525 1504
1526 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1505 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1527 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); 1506 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
1528 1507
1529 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); 1508 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
1530} 1509}
@@ -1533,28 +1512,28 @@ static void
1533bfa_fcs_rport_send_adisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) 1512bfa_fcs_rport_send_adisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1534{ 1513{
1535 struct bfa_fcs_rport_s *rport = rport_cbarg; 1514 struct bfa_fcs_rport_s *rport = rport_cbarg;
1536 struct bfa_fcs_port_s *port = rport->port; 1515 struct bfa_fcs_lport_s *port = rport->port;
1537 struct fchs_s fchs; 1516 struct fchs_s fchs;
1538 int len; 1517 int len;
1539 struct bfa_fcxp_s *fcxp; 1518 struct bfa_fcxp_s *fcxp;
1540 1519
1541 bfa_trc(rport->fcs, rport->pwwn); 1520 bfa_trc(rport->fcs, rport->pwwn);
1542 1521
1543 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 1522 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
1544 if (!fcxp) { 1523 if (!fcxp) {
1545 bfa_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, 1524 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
1546 bfa_fcs_rport_send_adisc, rport); 1525 bfa_fcs_rport_send_adisc, rport);
1547 return; 1526 return;
1548 } 1527 }
1549 rport->fcxp = fcxp; 1528 rport->fcxp = fcxp;
1550 1529
1551 len = fc_adisc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, 1530 len = fc_adisc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
1552 bfa_fcs_port_get_fcid(port), 0, 1531 bfa_fcs_lport_get_fcid(port), 0,
1553 port->port_cfg.pwwn, port->port_cfg.nwwn); 1532 port->port_cfg.pwwn, port->port_cfg.nwwn);
1554 1533
1555 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1534 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1556 FC_CLASS_3, len, &fchs, bfa_fcs_rport_adisc_response, 1535 FC_CLASS_3, len, &fchs, bfa_fcs_rport_adisc_response,
1557 rport, FC_MAX_PDUSZ, FC_ELS_TOV); 1536 rport, FC_MAX_PDUSZ, FC_ELS_TOV);
1558 1537
1559 rport->stats.adisc_sent++; 1538 rport->stats.adisc_sent++;
1560 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); 1539 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
@@ -1562,12 +1541,12 @@ bfa_fcs_rport_send_adisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1562 1541
1563static void 1542static void
1564bfa_fcs_rport_adisc_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, 1543bfa_fcs_rport_adisc_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1565 bfa_status_t req_status, u32 rsp_len, 1544 bfa_status_t req_status, u32 rsp_len,
1566 u32 resid_len, struct fchs_s *rsp_fchs) 1545 u32 resid_len, struct fchs_s *rsp_fchs)
1567{ 1546{
1568 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg; 1547 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
1569 void *pld = bfa_fcxp_get_rspbuf(fcxp); 1548 void *pld = bfa_fcxp_get_rspbuf(fcxp);
1570 struct fc_ls_rjt_s *ls_rjt; 1549 struct fc_ls_rjt_s *ls_rjt;
1571 1550
1572 if (req_status != BFA_STATUS_OK) { 1551 if (req_status != BFA_STATUS_OK) {
1573 bfa_trc(rport->fcs, req_status); 1552 bfa_trc(rport->fcs, req_status);
@@ -1577,7 +1556,7 @@ bfa_fcs_rport_adisc_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1577 } 1556 }
1578 1557
1579 if (fc_adisc_rsp_parse((struct fc_adisc_s *)pld, rsp_len, rport->pwwn, 1558 if (fc_adisc_rsp_parse((struct fc_adisc_s *)pld, rsp_len, rport->pwwn,
1580 rport->nwwn) == FC_PARSE_OK) { 1559 rport->nwwn) == FC_PARSE_OK) {
1581 rport->stats.adisc_accs++; 1560 rport->stats.adisc_accs++;
1582 bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED); 1561 bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED);
1583 return; 1562 return;
@@ -1592,44 +1571,52 @@ bfa_fcs_rport_adisc_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1592} 1571}
1593 1572
1594static void 1573static void
1595bfa_fcs_rport_send_gidpn(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) 1574bfa_fcs_rport_send_nsdisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1596{ 1575{
1597 struct bfa_fcs_rport_s *rport = rport_cbarg; 1576 struct bfa_fcs_rport_s *rport = rport_cbarg;
1598 struct bfa_fcs_port_s *port = rport->port; 1577 struct bfa_fcs_lport_s *port = rport->port;
1599 struct fchs_s fchs; 1578 struct fchs_s fchs;
1600 struct bfa_fcxp_s *fcxp; 1579 struct bfa_fcxp_s *fcxp;
1601 int len; 1580 int len;
1581 bfa_cb_fcxp_send_t cbfn;
1602 1582
1603 bfa_trc(rport->fcs, rport->pid); 1583 bfa_trc(rport->fcs, rport->pid);
1604 1584
1605 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 1585 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
1606 if (!fcxp) { 1586 if (!fcxp) {
1607 bfa_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, 1587 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
1608 bfa_fcs_rport_send_gidpn, rport); 1588 bfa_fcs_rport_send_nsdisc, rport);
1609 return; 1589 return;
1610 } 1590 }
1611 rport->fcxp = fcxp; 1591 rport->fcxp = fcxp;
1612 1592
1613 len = fc_gidpn_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), 1593 if (rport->pwwn) {
1614 bfa_fcs_port_get_fcid(port), 0, rport->pwwn); 1594 len = fc_gidpn_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
1595 bfa_fcs_lport_get_fcid(port), 0, rport->pwwn);
1596 cbfn = bfa_fcs_rport_gidpn_response;
1597 } else {
1598 len = fc_gpnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
1599 bfa_fcs_lport_get_fcid(port), 0, rport->pid);
1600 cbfn = bfa_fcs_rport_gpnid_response;
1601 }
1615 1602
1616 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1603 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1617 FC_CLASS_3, len, &fchs, bfa_fcs_rport_gidpn_response, 1604 FC_CLASS_3, len, &fchs, cbfn,
1618 (void *)rport, FC_MAX_PDUSZ, FC_FCCT_TOV); 1605 (void *)rport, FC_MAX_PDUSZ, FC_FCCT_TOV);
1619 1606
1620 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); 1607 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
1621} 1608}
1622 1609
1623static void 1610static void
1624bfa_fcs_rport_gidpn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, 1611bfa_fcs_rport_gidpn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1625 bfa_status_t req_status, u32 rsp_len, 1612 bfa_status_t req_status, u32 rsp_len,
1626 u32 resid_len, struct fchs_s *rsp_fchs) 1613 u32 resid_len, struct fchs_s *rsp_fchs)
1627{ 1614{
1628 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg; 1615 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
1629 struct bfa_fcs_rport_s *twin; 1616 struct ct_hdr_s *cthdr;
1630 struct list_head *qe;
1631 struct ct_hdr_s *cthdr;
1632 struct fcgs_gidpn_resp_s *gidpn_rsp; 1617 struct fcgs_gidpn_resp_s *gidpn_rsp;
1618 struct bfa_fcs_rport_s *twin;
1619 struct list_head *qe;
1633 1620
1634 bfa_trc(rport->fcs, rport->pwwn); 1621 bfa_trc(rport->fcs, rport->pwwn);
1635 1622
@@ -1637,25 +1624,21 @@ bfa_fcs_rport_gidpn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1637 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); 1624 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
1638 1625
1639 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { 1626 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
1640 /* 1627 /* Check if the pid is the same as before. */
1641 * Check if the pid is the same as before.
1642 */
1643 gidpn_rsp = (struct fcgs_gidpn_resp_s *) (cthdr + 1); 1628 gidpn_rsp = (struct fcgs_gidpn_resp_s *) (cthdr + 1);
1644 1629
1645 if (gidpn_rsp->dap == rport->pid) { 1630 if (gidpn_rsp->dap == rport->pid) {
1646 /* 1631 /* Device is online */
1647 * Device is online
1648 */
1649 bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED); 1632 bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED);
1650 } else { 1633 } else {
1651 /* 1634 /*
1652 * Device's PID has changed. We need to cleanup and 1635 * Device's PID has changed. We need to cleanup
1653 * re-login. If there is another device with the the 1636 * and re-login. If there is another device with
1654 * newly discovered pid, send an scn notice so that its 1637 * the the newly discovered pid, send an scn notice
1655 * new pid can be discovered. 1638 * so that its new pid can be discovered.
1656 */ 1639 */
1657 list_for_each(qe, &rport->port->rport_q) { 1640 list_for_each(qe, &rport->port->rport_q) {
1658 twin = (struct bfa_fcs_rport_s *)qe; 1641 twin = (struct bfa_fcs_rport_s *) qe;
1659 if (twin == rport) 1642 if (twin == rport)
1660 continue; 1643 continue;
1661 if (gidpn_rsp->dap == twin->pid) { 1644 if (gidpn_rsp->dap == twin->pid) {
@@ -1664,7 +1647,7 @@ bfa_fcs_rport_gidpn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1664 1647
1665 twin->pid = 0; 1648 twin->pid = 0;
1666 bfa_sm_send_event(twin, 1649 bfa_sm_send_event(twin,
1667 RPSM_EVENT_ADDRESS_CHANGE); 1650 RPSM_EVENT_ADDRESS_CHANGE);
1668 } 1651 }
1669 } 1652 }
1670 rport->pid = gidpn_rsp->dap; 1653 rport->pid = gidpn_rsp->dap;
@@ -1697,17 +1680,59 @@ bfa_fcs_rport_gidpn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1697 } 1680 }
1698} 1681}
1699 1682
1683static void
1684bfa_fcs_rport_gpnid_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1685 bfa_status_t req_status, u32 rsp_len,
1686 u32 resid_len, struct fchs_s *rsp_fchs)
1687{
1688 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
1689 struct ct_hdr_s *cthdr;
1690
1691 bfa_trc(rport->fcs, rport->pwwn);
1692
1693 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
1694 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
1695
1696 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
1697 bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED);
1698 return;
1699 }
1700
1701 /*
1702 * Reject Response
1703 */
1704 switch (cthdr->reason_code) {
1705 case CT_RSN_LOGICAL_BUSY:
1706 /*
1707 * Need to retry
1708 */
1709 bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT);
1710 break;
1711
1712 case CT_RSN_UNABLE_TO_PERF:
1713 /*
1714 * device doesn't exist : Start timer to cleanup this later.
1715 */
1716 bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
1717 break;
1718
1719 default:
1720 bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
1721 break;
1722 }
1723}
1724
1700/** 1725/**
1701 * Called to send a logout to the rport. 1726 * Called to send a logout to the rport.
1702 */ 1727 */
1703static void 1728static void
1704bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) 1729bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1705{ 1730{
1706 struct bfa_fcs_rport_s *rport = rport_cbarg; 1731 struct bfa_fcs_rport_s *rport = rport_cbarg;
1707 struct bfa_fcs_port_s *port; 1732 struct bfa_fcs_lport_s *port;
1708 struct fchs_s fchs; 1733 struct fchs_s fchs;
1709 struct bfa_fcxp_s *fcxp; 1734 struct bfa_fcxp_s *fcxp;
1710 u16 len; 1735 u16 len;
1711 1736
1712 bfa_trc(rport->fcs, rport->pid); 1737 bfa_trc(rport->fcs, rport->pid);
1713 1738
@@ -1715,19 +1740,19 @@ bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1715 1740
1716 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 1741 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
1717 if (!fcxp) { 1742 if (!fcxp) {
1718 bfa_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, 1743 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
1719 bfa_fcs_rport_send_logo, rport); 1744 bfa_fcs_rport_send_logo, rport);
1720 return; 1745 return;
1721 } 1746 }
1722 rport->fcxp = fcxp; 1747 rport->fcxp = fcxp;
1723 1748
1724 len = fc_logo_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, 1749 len = fc_logo_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
1725 bfa_fcs_port_get_fcid(port), 0, 1750 bfa_fcs_lport_get_fcid(port), 0,
1726 bfa_fcs_port_get_pwwn(port)); 1751 bfa_fcs_lport_get_pwwn(port));
1727 1752
1728 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1753 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1729 FC_CLASS_3, len, &fchs, NULL, rport, FC_MAX_PDUSZ, 1754 FC_CLASS_3, len, &fchs, NULL,
1730 FC_ELS_TOV); 1755 rport, FC_MAX_PDUSZ, FC_ELS_TOV);
1731 1756
1732 rport->stats.logos++; 1757 rport->stats.logos++;
1733 bfa_fcxp_discard(rport->fcxp); 1758 bfa_fcxp_discard(rport->fcxp);
@@ -1735,16 +1760,16 @@ bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1735} 1760}
1736 1761
1737/** 1762/**
1738 * Send ACC for a LOGO received. 1763 * Send ACC for a LOGO received.
1739 */ 1764 */
1740static void 1765static void
1741bfa_fcs_rport_send_logo_acc(void *rport_cbarg) 1766bfa_fcs_rport_send_logo_acc(void *rport_cbarg)
1742{ 1767{
1743 struct bfa_fcs_rport_s *rport = rport_cbarg; 1768 struct bfa_fcs_rport_s *rport = rport_cbarg;
1744 struct bfa_fcs_port_s *port; 1769 struct bfa_fcs_lport_s *port;
1745 struct fchs_s fchs; 1770 struct fchs_s fchs;
1746 struct bfa_fcxp_s *fcxp; 1771 struct bfa_fcxp_s *fcxp;
1747 u16 len; 1772 u16 len;
1748 1773
1749 bfa_trc(rport->fcs, rport->pid); 1774 bfa_trc(rport->fcs, rport->pid);
1750 1775
@@ -1755,32 +1780,35 @@ bfa_fcs_rport_send_logo_acc(void *rport_cbarg)
1755 return; 1780 return;
1756 1781
1757 rport->stats.logo_rcvd++; 1782 rport->stats.logo_rcvd++;
1758 len = fc_logo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, 1783 len = fc_logo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
1759 bfa_fcs_port_get_fcid(port), rport->reply_oxid); 1784 rport->pid, bfa_fcs_lport_get_fcid(port),
1785 rport->reply_oxid);
1760 1786
1761 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1787 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1762 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); 1788 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
1763} 1789}
1764 1790
1765/** 1791/**
1766 * This routine will be called by bfa_timer on timer timeouts. 1792 * brief
1793 * This routine will be called by bfa_timer on timer timeouts.
1767 * 1794 *
1768 * param[in] rport - pointer to bfa_fcs_port_ns_t. 1795 * param[in] rport - pointer to bfa_fcs_lport_ns_t.
1769 * param[out] rport_status - pointer to return vport status in 1796 * param[out] rport_status - pointer to return vport status in
1770 * 1797 *
1771 * return 1798 * return
1772 * void 1799 * void
1773 * 1800 *
1774* Special Considerations: 1801 * Special Considerations:
1775 * 1802 *
1776 * note 1803 * note
1777 */ 1804 */
1778static void 1805static void
1779bfa_fcs_rport_timeout(void *arg) 1806bfa_fcs_rport_timeout(void *arg)
1780{ 1807{
1781 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)arg; 1808 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) arg;
1782 1809
1783 rport->stats.plogi_timeouts++; 1810 rport->stats.plogi_timeouts++;
1811 bfa_stats(rport->port, rport_plogi_timeouts);
1784 bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT); 1812 bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT);
1785} 1813}
1786 1814
@@ -1789,50 +1817,45 @@ bfa_fcs_rport_process_prli(struct bfa_fcs_rport_s *rport,
1789 struct fchs_s *rx_fchs, u16 len) 1817 struct fchs_s *rx_fchs, u16 len)
1790{ 1818{
1791 struct bfa_fcxp_s *fcxp; 1819 struct bfa_fcxp_s *fcxp;
1792 struct fchs_s fchs; 1820 struct fchs_s fchs;
1793 struct bfa_fcs_port_s *port = rport->port; 1821 struct bfa_fcs_lport_s *port = rport->port;
1794 struct fc_prli_s *prli; 1822 struct fc_prli_s *prli;
1795 1823
1796 bfa_trc(port->fcs, rx_fchs->s_id); 1824 bfa_trc(port->fcs, rx_fchs->s_id);
1797 bfa_trc(port->fcs, rx_fchs->d_id); 1825 bfa_trc(port->fcs, rx_fchs->d_id);
1798 1826
1799 rport->stats.prli_rcvd++; 1827 rport->stats.prli_rcvd++;
1800 1828
1801 if (BFA_FCS_VPORT_IS_TARGET_MODE(port)) {
1802 /*
1803 * Target Mode : Let the fcptm handle it
1804 */
1805 bfa_fcs_tin_rx_prli(rport->tin, rx_fchs, len);
1806 return;
1807 }
1808
1809 /* 1829 /*
1810 * We are either in Initiator or ipfc Mode 1830 * We are in Initiator Mode
1811 */ 1831 */
1812 prli = (struct fc_prli_s *) (rx_fchs + 1); 1832 prli = (struct fc_prli_s *) (rx_fchs + 1);
1813 1833
1814 if (prli->parampage.servparams.initiator) { 1834 if (prli->parampage.servparams.target) {
1815 bfa_trc(rport->fcs, prli->parampage.type);
1816 rport->scsi_function = BFA_RPORT_INITIATOR;
1817 bfa_fcs_itnim_is_initiator(rport->itnim);
1818 } else {
1819 /* 1835 /*
1820 * @todo: PRLI from a target ? 1836 * PRLI from a target ?
1837 * Send the Acc.
1838 * PRLI sent by us will be used to transition the IT nexus,
1839 * once the response is received from the target.
1821 */ 1840 */
1822 bfa_trc(port->fcs, rx_fchs->s_id); 1841 bfa_trc(port->fcs, rx_fchs->s_id);
1823 rport->scsi_function = BFA_RPORT_TARGET; 1842 rport->scsi_function = BFA_RPORT_TARGET;
1843 } else {
1844 bfa_trc(rport->fcs, prli->parampage.type);
1845 rport->scsi_function = BFA_RPORT_INITIATOR;
1846 bfa_fcs_itnim_is_initiator(rport->itnim);
1824 } 1847 }
1825 1848
1826 fcxp = bfa_fcs_fcxp_alloc(port->fcs); 1849 fcxp = bfa_fcs_fcxp_alloc(port->fcs);
1827 if (!fcxp) 1850 if (!fcxp)
1828 return; 1851 return;
1829 1852
1830 len = fc_prli_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, 1853 len = fc_prli_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
1831 bfa_fcs_port_get_fcid(port), rx_fchs->ox_id, 1854 rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
1832 port->port_cfg.roles); 1855 rx_fchs->ox_id, port->port_cfg.roles);
1833 1856
1834 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1857 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1835 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); 1858 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
1836} 1859}
1837 1860
1838static void 1861static void
@@ -1840,10 +1863,10 @@ bfa_fcs_rport_process_rpsc(struct bfa_fcs_rport_s *rport,
1840 struct fchs_s *rx_fchs, u16 len) 1863 struct fchs_s *rx_fchs, u16 len)
1841{ 1864{
1842 struct bfa_fcxp_s *fcxp; 1865 struct bfa_fcxp_s *fcxp;
1843 struct fchs_s fchs; 1866 struct fchs_s fchs;
1844 struct bfa_fcs_port_s *port = rport->port; 1867 struct bfa_fcs_lport_s *port = rport->port;
1845 struct fc_rpsc_speed_info_s speeds; 1868 struct fc_rpsc_speed_info_s speeds;
1846 struct bfa_pport_attr_s pport_attr; 1869 struct bfa_port_attr_s pport_attr;
1847 1870
1848 bfa_trc(port->fcs, rx_fchs->s_id); 1871 bfa_trc(port->fcs, rx_fchs->s_id);
1849 bfa_trc(port->fcs, rx_fchs->d_id); 1872 bfa_trc(port->fcs, rx_fchs->d_id);
@@ -1864,12 +1887,12 @@ bfa_fcs_rport_process_rpsc(struct bfa_fcs_rport_s *rport,
1864 if (!fcxp) 1887 if (!fcxp)
1865 return; 1888 return;
1866 1889
1867 len = fc_rpsc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, 1890 len = fc_rpsc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
1868 bfa_fcs_port_get_fcid(port), rx_fchs->ox_id, 1891 rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
1869 &speeds); 1892 rx_fchs->ox_id, &speeds);
1870 1893
1871 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1894 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1872 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); 1895 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
1873} 1896}
1874 1897
1875static void 1898static void
@@ -1877,28 +1900,20 @@ bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
1877 struct fchs_s *rx_fchs, u16 len) 1900 struct fchs_s *rx_fchs, u16 len)
1878{ 1901{
1879 struct bfa_fcxp_s *fcxp; 1902 struct bfa_fcxp_s *fcxp;
1880 struct fchs_s fchs; 1903 struct fchs_s fchs;
1881 struct bfa_fcs_port_s *port = rport->port; 1904 struct bfa_fcs_lport_s *port = rport->port;
1882 struct fc_adisc_s *adisc; 1905 struct fc_adisc_s *adisc;
1883 1906
1884 bfa_trc(port->fcs, rx_fchs->s_id); 1907 bfa_trc(port->fcs, rx_fchs->s_id);
1885 bfa_trc(port->fcs, rx_fchs->d_id); 1908 bfa_trc(port->fcs, rx_fchs->d_id);
1886 1909
1887 rport->stats.adisc_rcvd++; 1910 rport->stats.adisc_rcvd++;
1888 1911
1889 if (BFA_FCS_VPORT_IS_TARGET_MODE(port)) {
1890 /*
1891 * @todo : Target Mode handling
1892 */
1893 bfa_trc(port->fcs, rx_fchs->d_id);
1894 bfa_assert(0);
1895 return;
1896 }
1897
1898 adisc = (struct fc_adisc_s *) (rx_fchs + 1); 1912 adisc = (struct fc_adisc_s *) (rx_fchs + 1);
1899 1913
1900 /* 1914 /*
1901 * Accept if the itnim for this rport is online. Else reject the ADISC 1915 * Accept if the itnim for this rport is online.
1916 * Else reject the ADISC.
1902 */ 1917 */
1903 if (bfa_fcs_itnim_get_online_state(rport->itnim) == BFA_STATUS_OK) { 1918 if (bfa_fcs_itnim_get_online_state(rport->itnim) == BFA_STATUS_OK) {
1904 1919
@@ -1907,27 +1922,25 @@ bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
1907 return; 1922 return;
1908 1923
1909 len = fc_adisc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), 1924 len = fc_adisc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
1910 rx_fchs->s_id, 1925 rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
1911 bfa_fcs_port_get_fcid(port), 1926 rx_fchs->ox_id, port->port_cfg.pwwn,
1912 rx_fchs->ox_id, port->port_cfg.pwwn, 1927 port->port_cfg.nwwn);
1913 port->port_cfg.nwwn);
1914 1928
1915 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, 1929 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag,
1916 BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, 1930 BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
1917 FC_MAX_PDUSZ, 0); 1931 FC_MAX_PDUSZ, 0);
1918 } else { 1932 } else {
1919 rport->stats.adisc_rejected++; 1933 rport->stats.adisc_rejected++;
1920 bfa_fcs_rport_send_ls_rjt(rport, rx_fchs, 1934 bfa_fcs_rport_send_ls_rjt(rport, rx_fchs,
1921 FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD, 1935 FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD,
1922 FC_LS_RJT_EXP_LOGIN_REQUIRED); 1936 FC_LS_RJT_EXP_LOGIN_REQUIRED);
1923 } 1937 }
1924
1925} 1938}
1926 1939
1927static void 1940static void
1928bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport) 1941bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport)
1929{ 1942{
1930 struct bfa_fcs_port_s *port = rport->port; 1943 struct bfa_fcs_lport_s *port = rport->port;
1931 struct bfa_rport_info_s rport_info; 1944 struct bfa_rport_info_s rport_info;
1932 1945
1933 rport_info.pid = rport->pid; 1946 rport_info.pid = rport->pid;
@@ -1941,38 +1954,18 @@ bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport)
1941 bfa_rport_online(rport->bfa_rport, &rport_info); 1954 bfa_rport_online(rport->bfa_rport, &rport_info);
1942} 1955}
1943 1956
1944static void
1945bfa_fcs_rport_fc4_pause(struct bfa_fcs_rport_s *rport)
1946{
1947 if (bfa_fcs_port_is_initiator(rport->port))
1948 bfa_fcs_itnim_pause(rport->itnim);
1949
1950 if (bfa_fcs_port_is_target(rport->port))
1951 bfa_fcs_tin_pause(rport->tin);
1952}
1953
1954static void
1955bfa_fcs_rport_fc4_resume(struct bfa_fcs_rport_s *rport)
1956{
1957 if (bfa_fcs_port_is_initiator(rport->port))
1958 bfa_fcs_itnim_resume(rport->itnim);
1959
1960 if (bfa_fcs_port_is_target(rport->port))
1961 bfa_fcs_tin_resume(rport->tin);
1962}
1963
1964static struct bfa_fcs_rport_s * 1957static struct bfa_fcs_rport_s *
1965bfa_fcs_rport_alloc(struct bfa_fcs_port_s *port, wwn_t pwwn, u32 rpid) 1958bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
1966{ 1959{
1967 struct bfa_fcs_s *fcs = port->fcs; 1960 struct bfa_fcs_s *fcs = port->fcs;
1968 struct bfa_fcs_rport_s *rport; 1961 struct bfa_fcs_rport_s *rport;
1969 struct bfad_rport_s *rport_drv; 1962 struct bfad_rport_s *rport_drv;
1970 1963
1971 /** 1964 /**
1972 * allocate rport 1965 * allocate rport
1973 */ 1966 */
1974 if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv) 1967 if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv)
1975 != BFA_STATUS_OK) { 1968 != BFA_STATUS_OK) {
1976 bfa_trc(fcs, rpid); 1969 bfa_trc(fcs, rpid);
1977 return NULL; 1970 return NULL;
1978 } 1971 }
@@ -1999,10 +1992,9 @@ bfa_fcs_rport_alloc(struct bfa_fcs_port_s *port, wwn_t pwwn, u32 rpid)
1999 /** 1992 /**
2000 * allocate FC-4s 1993 * allocate FC-4s
2001 */ 1994 */
2002 bfa_assert(bfa_fcs_port_is_initiator(port) ^ 1995 bfa_assert(bfa_fcs_lport_is_initiator(port));
2003 bfa_fcs_port_is_target(port));
2004 1996
2005 if (bfa_fcs_port_is_initiator(port)) { 1997 if (bfa_fcs_lport_is_initiator(port)) {
2006 rport->itnim = bfa_fcs_itnim_create(rport); 1998 rport->itnim = bfa_fcs_itnim_create(rport);
2007 if (!rport->itnim) { 1999 if (!rport->itnim) {
2008 bfa_trc(fcs, rpid); 2000 bfa_trc(fcs, rpid);
@@ -2012,23 +2004,11 @@ bfa_fcs_rport_alloc(struct bfa_fcs_port_s *port, wwn_t pwwn, u32 rpid)
2012 } 2004 }
2013 } 2005 }
2014 2006
2015 if (bfa_fcs_port_is_target(port)) { 2007 bfa_fcs_lport_add_rport(port, rport);
2016 rport->tin = bfa_fcs_tin_create(rport);
2017 if (!rport->tin) {
2018 bfa_trc(fcs, rpid);
2019 bfa_rport_delete(rport->bfa_rport);
2020 kfree(rport_drv);
2021 return NULL;
2022 }
2023 }
2024
2025 bfa_fcs_port_add_rport(port, rport);
2026 2008
2027 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); 2009 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
2028 2010
2029 /* 2011 /* Initialize the Rport Features(RPF) Sub Module */
2030 * Initialize the Rport Features(RPF) Sub Module
2031 */
2032 if (!BFA_FCS_PID_IS_WKA(rport->pid)) 2012 if (!BFA_FCS_PID_IS_WKA(rport->pid))
2033 bfa_fcs_rpf_init(rport); 2013 bfa_fcs_rpf_init(rport);
2034 2014
@@ -2039,121 +2019,78 @@ bfa_fcs_rport_alloc(struct bfa_fcs_port_s *port, wwn_t pwwn, u32 rpid)
2039static void 2019static void
2040bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport) 2020bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport)
2041{ 2021{
2042 struct bfa_fcs_port_s *port = rport->port; 2022 struct bfa_fcs_lport_s *port = rport->port;
2043 2023
2044 /** 2024 /**
2045 * - delete FC-4s 2025 * - delete FC-4s
2046 * - delete BFA rport 2026 * - delete BFA rport
2047 * - remove from queue of rports 2027 * - remove from queue of rports
2048 */ 2028 */
2049 if (bfa_fcs_port_is_initiator(port)) 2029 if (bfa_fcs_lport_is_initiator(port)) {
2050 bfa_fcs_itnim_delete(rport->itnim); 2030 bfa_fcs_itnim_delete(rport->itnim);
2051 2031 if (rport->pid != 0 && !BFA_FCS_PID_IS_WKA(rport->pid))
2052 if (bfa_fcs_port_is_target(port)) 2032 bfa_fcs_rpf_rport_offline(rport);
2053 bfa_fcs_tin_delete(rport->tin); 2033 }
2054 2034
2055 bfa_rport_delete(rport->bfa_rport); 2035 bfa_rport_delete(rport->bfa_rport);
2056 bfa_fcs_port_del_rport(port, rport); 2036 bfa_fcs_lport_del_rport(port, rport);
2057 kfree(rport->rp_drv); 2037 kfree(rport->rp_drv);
2058} 2038}
2059 2039
2060static void 2040static void
2061bfa_fcs_rport_aen_post(struct bfa_fcs_rport_s *rport,
2062 enum bfa_rport_aen_event event,
2063 struct bfa_rport_aen_data_s *data)
2064{
2065 union bfa_aen_data_u aen_data;
2066 struct bfa_log_mod_s *logmod = rport->fcs->logm;
2067 wwn_t lpwwn = bfa_fcs_port_get_pwwn(rport->port);
2068 wwn_t rpwwn = rport->pwwn;
2069 char lpwwn_ptr[BFA_STRING_32];
2070 char rpwwn_ptr[BFA_STRING_32];
2071 char *prio_str[] = { "unknown", "high", "medium", "low" };
2072
2073 wwn2str(lpwwn_ptr, lpwwn);
2074 wwn2str(rpwwn_ptr, rpwwn);
2075
2076 switch (event) {
2077 case BFA_RPORT_AEN_ONLINE:
2078 case BFA_RPORT_AEN_OFFLINE:
2079 case BFA_RPORT_AEN_DISCONNECT:
2080 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, event),
2081 rpwwn_ptr, lpwwn_ptr);
2082 break;
2083 case BFA_RPORT_AEN_QOS_PRIO:
2084 aen_data.rport.priv.qos = data->priv.qos;
2085 bfa_log(logmod, BFA_AEN_RPORT_QOS_PRIO,
2086 prio_str[aen_data.rport.priv.qos.qos_priority],
2087 rpwwn_ptr, lpwwn_ptr);
2088 break;
2089 case BFA_RPORT_AEN_QOS_FLOWID:
2090 aen_data.rport.priv.qos = data->priv.qos;
2091 bfa_log(logmod, BFA_AEN_RPORT_QOS_FLOWID,
2092 aen_data.rport.priv.qos.qos_flow_id, rpwwn_ptr,
2093 lpwwn_ptr);
2094 break;
2095 default:
2096 break;
2097 }
2098
2099 aen_data.rport.vf_id = rport->port->fabric->vf_id;
2100 aen_data.rport.ppwwn =
2101 bfa_fcs_port_get_pwwn(bfa_fcs_get_base_port(rport->fcs));
2102 aen_data.rport.lpwwn = lpwwn;
2103 aen_data.rport.rpwwn = rpwwn;
2104}
2105
2106static void
2107bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport) 2041bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
2108{ 2042{
2109 struct bfa_fcs_port_s *port = rport->port; 2043 struct bfa_fcs_lport_s *port = rport->port;
2044 struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
2045 char lpwwn_buf[BFA_STRING_32];
2046 char rpwwn_buf[BFA_STRING_32];
2110 2047
2111 rport->stats.onlines++; 2048 rport->stats.onlines++;
2112 2049
2113 if (bfa_fcs_port_is_initiator(port)) { 2050 if (bfa_fcs_lport_is_initiator(port)) {
2114 bfa_fcs_itnim_rport_online(rport->itnim); 2051 bfa_fcs_itnim_rport_online(rport->itnim);
2115 if (!BFA_FCS_PID_IS_WKA(rport->pid)) 2052 if (!BFA_FCS_PID_IS_WKA(rport->pid))
2116 bfa_fcs_rpf_rport_online(rport); 2053 bfa_fcs_rpf_rport_online(rport);
2117 }; 2054 };
2118 2055
2119 if (bfa_fcs_port_is_target(port)) 2056 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
2120 bfa_fcs_tin_rport_online(rport->tin); 2057 wwn2str(rpwwn_buf, rport->pwwn);
2121
2122 /*
2123 * Don't post events for well known addresses
2124 */
2125 if (!BFA_FCS_PID_IS_WKA(rport->pid)) 2058 if (!BFA_FCS_PID_IS_WKA(rport->pid))
2126 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_ONLINE, NULL); 2059 BFA_LOG(KERN_INFO, bfad, log_level,
2060 "Remote port (WWN = %s) online for logical port (WWN = %s)\n",
2061 rpwwn_buf, lpwwn_buf);
2127} 2062}
2128 2063
2129static void 2064static void
2130bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport) 2065bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport)
2131{ 2066{
2132 struct bfa_fcs_port_s *port = rport->port; 2067 struct bfa_fcs_lport_s *port = rport->port;
2068 struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
2069 char lpwwn_buf[BFA_STRING_32];
2070 char rpwwn_buf[BFA_STRING_32];
2133 2071
2134 rport->stats.offlines++; 2072 rport->stats.offlines++;
2135 2073
2136 /* 2074 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
2137 * Don't post events for well known addresses 2075 wwn2str(rpwwn_buf, rport->pwwn);
2138 */
2139 if (!BFA_FCS_PID_IS_WKA(rport->pid)) { 2076 if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
2140 if (bfa_fcs_port_is_online(rport->port) == BFA_TRUE) { 2077 if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE)
2141 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_DISCONNECT, 2078 BFA_LOG(KERN_ERR, bfad, log_level,
2142 NULL); 2079 "Remote port (WWN = %s) connectivity lost for "
2143 } else { 2080 "logical port (WWN = %s)\n",
2144 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_OFFLINE, 2081 rpwwn_buf, lpwwn_buf);
2145 NULL); 2082 else
2146 } 2083 BFA_LOG(KERN_INFO, bfad, log_level,
2084 "Remote port (WWN = %s) offlined by "
2085 "logical port (WWN = %s)\n",
2086 rpwwn_buf, lpwwn_buf);
2147 } 2087 }
2148 2088
2149 if (bfa_fcs_port_is_initiator(port)) { 2089 if (bfa_fcs_lport_is_initiator(port)) {
2150 bfa_fcs_itnim_rport_offline(rport->itnim); 2090 bfa_fcs_itnim_rport_offline(rport->itnim);
2151 if (!BFA_FCS_PID_IS_WKA(rport->pid)) 2091 if (!BFA_FCS_PID_IS_WKA(rport->pid))
2152 bfa_fcs_rpf_rport_offline(rport); 2092 bfa_fcs_rpf_rport_offline(rport);
2153 } 2093 }
2154
2155 if (bfa_fcs_port_is_target(port))
2156 bfa_fcs_tin_rport_offline(rport->tin);
2157} 2094}
2158 2095
2159/** 2096/**
@@ -2162,7 +2099,7 @@ bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport)
2162static void 2099static void
2163bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi) 2100bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
2164{ 2101{
2165 struct bfa_fcs_port_s *port = rport->port; 2102 bfa_fcs_lport_t *port = rport->port;
2166 2103
2167 /** 2104 /**
2168 * - port name 2105 * - port name
@@ -2193,12 +2130,13 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
2193 /** 2130 /**
2194 * Direct Attach P2P mode : 2131 * Direct Attach P2P mode :
2195 * This is to handle a bug (233476) in IBM targets in Direct Attach 2132 * This is to handle a bug (233476) in IBM targets in Direct Attach
2196 * Mode. Basically, in FLOGI Accept the target would have erroneously 2133 * Mode. Basically, in FLOGI Accept the target would have
2197 * set the BB Credit to the value used in the FLOGI sent by the HBA. 2134 * erroneously set the BB Credit to the value used in the FLOGI
2198 * It uses the correct value (its own BB credit) in PLOGI. 2135 * sent by the HBA. It uses the correct value (its own BB credit)
2136 * in PLOGI.
2199 */ 2137 */
2200 if ((!bfa_fcs_fabric_is_switched(port->fabric)) 2138 if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
2201 && (bfa_os_ntohs(plogi->csp.bbcred) < port->fabric->bb_credit)) { 2139 (bfa_os_ntohs(plogi->csp.bbcred) < port->fabric->bb_credit)) {
2202 2140
2203 bfa_trc(port->fcs, bfa_os_ntohs(plogi->csp.bbcred)); 2141 bfa_trc(port->fcs, bfa_os_ntohs(plogi->csp.bbcred));
2204 bfa_trc(port->fcs, port->fabric->bb_credit); 2142 bfa_trc(port->fcs, port->fabric->bb_credit);
@@ -2211,7 +2149,7 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
2211} 2149}
2212 2150
2213/** 2151/**
2214 * Called to handle LOGO received from an existing remote port. 2152 * Called to handle LOGO received from an existing remote port.
2215 */ 2153 */
2216static void 2154static void
2217bfa_fcs_rport_process_logo(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs) 2155bfa_fcs_rport_process_logo(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs)
@@ -2231,8 +2169,8 @@ bfa_fcs_rport_process_logo(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs)
2231 */ 2169 */
2232 2170
2233/** 2171/**
2234 * Called by bport/vport to create a remote port instance for a discovered 2172 * Called by bport/vport to create a remote port instance for a discovered
2235 * remote device. 2173 * remote device.
2236 * 2174 *
2237 * @param[in] port - base port or vport 2175 * @param[in] port - base port or vport
2238 * @param[in] rpid - remote port ID 2176 * @param[in] rpid - remote port ID
@@ -2240,7 +2178,7 @@ bfa_fcs_rport_process_logo(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs)
2240 * @return None 2178 * @return None
2241 */ 2179 */
2242struct bfa_fcs_rport_s * 2180struct bfa_fcs_rport_s *
2243bfa_fcs_rport_create(struct bfa_fcs_port_s *port, u32 rpid) 2181bfa_fcs_rport_create(struct bfa_fcs_lport_s *port, u32 rpid)
2244{ 2182{
2245 struct bfa_fcs_rport_s *rport; 2183 struct bfa_fcs_rport_s *rport;
2246 2184
@@ -2262,10 +2200,9 @@ bfa_fcs_rport_create(struct bfa_fcs_port_s *port, u32 rpid)
2262 * @return None 2200 * @return None
2263 */ 2201 */
2264struct bfa_fcs_rport_s * 2202struct bfa_fcs_rport_s *
2265bfa_fcs_rport_create_by_wwn(struct bfa_fcs_port_s *port, wwn_t rpwwn) 2203bfa_fcs_rport_create_by_wwn(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
2266{ 2204{
2267 struct bfa_fcs_rport_s *rport; 2205 struct bfa_fcs_rport_s *rport;
2268
2269 bfa_trc(port->fcs, rpwwn); 2206 bfa_trc(port->fcs, rpwwn);
2270 rport = bfa_fcs_rport_alloc(port, rpwwn, 0); 2207 rport = bfa_fcs_rport_alloc(port, rpwwn, 0);
2271 if (!rport) 2208 if (!rport)
@@ -2274,7 +2211,6 @@ bfa_fcs_rport_create_by_wwn(struct bfa_fcs_port_s *port, wwn_t rpwwn)
2274 bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC); 2211 bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC);
2275 return rport; 2212 return rport;
2276} 2213}
2277
2278/** 2214/**
2279 * Called by bport in private loop topology to indicate that a 2215 * Called by bport in private loop topology to indicate that a
2280 * rport has been discovered and plogi has been completed. 2216 * rport has been discovered and plogi has been completed.
@@ -2283,8 +2219,8 @@ bfa_fcs_rport_create_by_wwn(struct bfa_fcs_port_s *port, wwn_t rpwwn)
2283 * @param[in] rpid - remote port ID 2219 * @param[in] rpid - remote port ID
2284 */ 2220 */
2285void 2221void
2286bfa_fcs_rport_start(struct bfa_fcs_port_s *port, struct fchs_s *fchs, 2222bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *fchs,
2287 struct fc_logi_s *plogi) 2223 struct fc_logi_s *plogi)
2288{ 2224{
2289 struct bfa_fcs_rport_s *rport; 2225 struct bfa_fcs_rport_s *rport;
2290 2226
@@ -2298,12 +2234,12 @@ bfa_fcs_rport_start(struct bfa_fcs_port_s *port, struct fchs_s *fchs,
2298} 2234}
2299 2235
2300/** 2236/**
2301 * Called by bport/vport to handle PLOGI received from a new remote port. 2237 * Called by bport/vport to handle PLOGI received from a new remote port.
2302 * If an existing rport does a plogi, it will be handled separately. 2238 * If an existing rport does a plogi, it will be handled separately.
2303 */ 2239 */
2304void 2240void
2305bfa_fcs_rport_plogi_create(struct bfa_fcs_port_s *port, struct fchs_s *fchs, 2241bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port, struct fchs_s *fchs,
2306 struct fc_logi_s *plogi) 2242 struct fc_logi_s *plogi)
2307{ 2243{
2308 struct bfa_fcs_rport_s *rport; 2244 struct bfa_fcs_rport_s *rport;
2309 2245
@@ -2323,9 +2259,9 @@ bfa_fcs_rport_plogi_create(struct bfa_fcs_port_s *port, struct fchs_s *fchs,
2323static int 2259static int
2324wwn_compare(wwn_t wwn1, wwn_t wwn2) 2260wwn_compare(wwn_t wwn1, wwn_t wwn2)
2325{ 2261{
2326 u8 *b1 = (u8 *) &wwn1; 2262 u8 *b1 = (u8 *) &wwn1;
2327 u8 *b2 = (u8 *) &wwn2; 2263 u8 *b2 = (u8 *) &wwn2;
2328 int i; 2264 int i;
2329 2265
2330 for (i = 0; i < sizeof(wwn_t); i++) { 2266 for (i = 0; i < sizeof(wwn_t); i++) {
2331 if (b1[i] < b2[i]) 2267 if (b1[i] < b2[i])
@@ -2337,12 +2273,12 @@ wwn_compare(wwn_t wwn1, wwn_t wwn2)
2337} 2273}
2338 2274
2339/** 2275/**
2340 * Called by bport/vport to handle PLOGI received from an existing 2276 * Called by bport/vport to handle PLOGI received from an existing
2341 * remote port. 2277 * remote port.
2342 */ 2278 */
2343void 2279void
2344bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, 2280bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
2345 struct fc_logi_s *plogi) 2281 struct fc_logi_s *plogi)
2346{ 2282{
2347 /** 2283 /**
2348 * @todo Handle P2P and initiator-initiator. 2284 * @todo Handle P2P and initiator-initiator.
@@ -2360,9 +2296,9 @@ bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
2360 * If the link topology is N2N, 2296 * If the link topology is N2N,
2361 * this Plogi should be accepted. 2297 * this Plogi should be accepted.
2362 */ 2298 */
2363 if ((wwn_compare(rport->port->port_cfg.pwwn, rport->pwwn) == -1) 2299 if ((wwn_compare(rport->port->port_cfg.pwwn, rport->pwwn) == -1) &&
2364 && (bfa_fcs_fabric_is_switched(rport->port->fabric)) 2300 (bfa_fcs_fabric_is_switched(rport->port->fabric)) &&
2365 && (!BFA_FCS_PID_IS_WKA(rport->pid))) { 2301 (!BFA_FCS_PID_IS_WKA(rport->pid))) {
2366 bfa_trc(rport->fcs, rport->pid); 2302 bfa_trc(rport->fcs, rport->pid);
2367 return; 2303 return;
2368 } 2304 }
@@ -2374,10 +2310,10 @@ bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
2374/** 2310/**
2375 * Called by bport/vport to delete a remote port instance. 2311 * Called by bport/vport to delete a remote port instance.
2376 * 2312 *
2377* Rport delete is called under the following conditions: 2313 * Rport delete is called under the following conditions:
2378 * - vport is deleted 2314 * - vport is deleted
2379 * - vf is deleted 2315 * - vf is deleted
2380 * - explicit request from OS to delete rport (vmware) 2316 * - explicit request from OS to delete rport
2381 */ 2317 */
2382void 2318void
2383bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport) 2319bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport)
@@ -2404,20 +2340,18 @@ bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport)
2404{ 2340{
2405 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND); 2341 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND);
2406} 2342}
2407
2408/** 2343/**
2409 * Called by bport/vport to notify SCN for the remote port 2344 * Called by bport/vport to notify SCN for the remote port
2410 */ 2345 */
2411void 2346void
2412bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport) 2347bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport)
2413{ 2348{
2414
2415 rport->stats.rscns++; 2349 rport->stats.rscns++;
2416 bfa_sm_send_event(rport, RPSM_EVENT_SCN); 2350 bfa_sm_send_event(rport, RPSM_EVENT_SCN);
2417} 2351}
2418 2352
2419/** 2353/**
2420 * Called by fcpim to notify that the ITN cleanup is done. 2354 * Called by fcpim to notify that the ITN cleanup is done.
2421 */ 2355 */
2422void 2356void
2423bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport) 2357bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport)
@@ -2426,7 +2360,7 @@ bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport)
2426} 2360}
2427 2361
2428/** 2362/**
2429 * Called by fcptm to notify that the ITN cleanup is done. 2363 * Called by fcptm to notify that the ITN cleanup is done.
2430 */ 2364 */
2431void 2365void
2432bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport) 2366bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport)
@@ -2435,99 +2369,100 @@ bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport)
2435} 2369}
2436 2370
2437/** 2371/**
2438 * This routine BFA callback for bfa_rport_online() call. 2372 * brief
2373 * This routine BFA callback for bfa_rport_online() call.
2439 * 2374 *
2440 * param[in] cb_arg - rport struct. 2375 * param[in] cb_arg - rport struct.
2441 * 2376 *
2442 * return 2377 * return
2443 * void 2378 * void
2444 * 2379 *
2445* Special Considerations: 2380 * Special Considerations:
2446 * 2381 *
2447 * note 2382 * note
2448 */ 2383 */
2449void 2384void
2450bfa_cb_rport_online(void *cbarg) 2385bfa_cb_rport_online(void *cbarg)
2451{ 2386{
2452 2387
2453 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg; 2388 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
2454 2389
2455 bfa_trc(rport->fcs, rport->pwwn); 2390 bfa_trc(rport->fcs, rport->pwwn);
2456 bfa_sm_send_event(rport, RPSM_EVENT_HCB_ONLINE); 2391 bfa_sm_send_event(rport, RPSM_EVENT_HCB_ONLINE);
2457} 2392}
2458 2393
2459/** 2394/**
2460 * This routine BFA callback for bfa_rport_offline() call. 2395 * brief
2396 * This routine BFA callback for bfa_rport_offline() call.
2461 * 2397 *
2462 * param[in] rport - 2398 * param[in] rport -
2463 * 2399 *
2464 * return 2400 * return
2465 * void 2401 * void
2466 * 2402 *
2467 * Special Considerations: 2403 * Special Considerations:
2468 * 2404 *
2469 * note 2405 * note
2470 */ 2406 */
2471void 2407void
2472bfa_cb_rport_offline(void *cbarg) 2408bfa_cb_rport_offline(void *cbarg)
2473{ 2409{
2474 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg; 2410 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
2475 2411
2476 bfa_trc(rport->fcs, rport->pwwn); 2412 bfa_trc(rport->fcs, rport->pwwn);
2477 bfa_sm_send_event(rport, RPSM_EVENT_HCB_OFFLINE); 2413 bfa_sm_send_event(rport, RPSM_EVENT_HCB_OFFLINE);
2478} 2414}
2479 2415
2480/** 2416/**
2481 * This routine is a static BFA callback when there is a QoS flow_id 2417 * brief
2482 * change notification 2418 * This routine is a static BFA callback when there is a QoS flow_id
2419 * change notification
2483 * 2420 *
2484 * @param[in] rport - 2421 * param[in] rport -
2485 * 2422 *
2486 * @return void 2423 * return
2424 * void
2487 * 2425 *
2488 * Special Considerations: 2426 * Special Considerations:
2489 * 2427 *
2490 * @note 2428 * note
2491 */ 2429 */
2492void 2430void
2493bfa_cb_rport_qos_scn_flowid(void *cbarg, 2431bfa_cb_rport_qos_scn_flowid(void *cbarg,
2494 struct bfa_rport_qos_attr_s old_qos_attr, 2432 struct bfa_rport_qos_attr_s old_qos_attr,
2495 struct bfa_rport_qos_attr_s new_qos_attr) 2433 struct bfa_rport_qos_attr_s new_qos_attr)
2496{ 2434{
2497 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg; 2435 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
2498 struct bfa_rport_aen_data_s aen_data;
2499 2436
2500 bfa_trc(rport->fcs, rport->pwwn); 2437 bfa_trc(rport->fcs, rport->pwwn);
2501 aen_data.priv.qos = new_qos_attr;
2502 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_FLOWID, &aen_data);
2503} 2438}
2504 2439
2505/** 2440/**
2506 * This routine is a static BFA callback when there is a QoS priority 2441 * brief
2507 * change notification 2442 * This routine is a static BFA callback when there is a QoS priority
2443 * change notification
2508 * 2444 *
2509 * @param[in] rport - 2445 * param[in] rport -
2510 * 2446 *
2511 * @return void 2447 * return
2448 * void
2512 * 2449 *
2513 * Special Considerations: 2450 * Special Considerations:
2514 * 2451 *
2515 * @note 2452 * note
2516 */ 2453 */
2517void 2454void
2518bfa_cb_rport_qos_scn_prio(void *cbarg, struct bfa_rport_qos_attr_s old_qos_attr, 2455bfa_cb_rport_qos_scn_prio(void *cbarg,
2519 struct bfa_rport_qos_attr_s new_qos_attr) 2456 struct bfa_rport_qos_attr_s old_qos_attr,
2457 struct bfa_rport_qos_attr_s new_qos_attr)
2520{ 2458{
2521 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg; 2459 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
2522 struct bfa_rport_aen_data_s aen_data;
2523 2460
2524 bfa_trc(rport->fcs, rport->pwwn); 2461 bfa_trc(rport->fcs, rport->pwwn);
2525 aen_data.priv.qos = new_qos_attr;
2526 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_PRIO, &aen_data);
2527} 2462}
2528 2463
2529/** 2464/**
2530 * Called to process any unsolicted frames from this remote port 2465 * Called to process any unsolicted frames from this remote port
2531 */ 2466 */
2532void 2467void
2533bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport) 2468bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport)
@@ -2536,14 +2471,14 @@ bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport)
2536} 2471}
2537 2472
2538/** 2473/**
2539 * Called to process any unsolicted frames from this remote port 2474 * Called to process any unsolicted frames from this remote port
2540 */ 2475 */
2541void 2476void
2542bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs, 2477bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport,
2543 u16 len) 2478 struct fchs_s *fchs, u16 len)
2544{ 2479{
2545 struct bfa_fcs_port_s *port = rport->port; 2480 struct bfa_fcs_lport_s *port = rport->port;
2546 struct fc_els_cmd_s *els_cmd; 2481 struct fc_els_cmd_s *els_cmd;
2547 2482
2548 bfa_trc(rport->fcs, fchs->s_id); 2483 bfa_trc(rport->fcs, fchs->s_id);
2549 bfa_trc(rport->fcs, fchs->d_id); 2484 bfa_trc(rport->fcs, fchs->d_id);
@@ -2558,30 +2493,33 @@ bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
2558 2493
2559 switch (els_cmd->els_code) { 2494 switch (els_cmd->els_code) {
2560 case FC_ELS_LOGO: 2495 case FC_ELS_LOGO:
2496 bfa_stats(port, plogi_rcvd);
2561 bfa_fcs_rport_process_logo(rport, fchs); 2497 bfa_fcs_rport_process_logo(rport, fchs);
2562 break; 2498 break;
2563 2499
2564 case FC_ELS_ADISC: 2500 case FC_ELS_ADISC:
2501 bfa_stats(port, adisc_rcvd);
2565 bfa_fcs_rport_process_adisc(rport, fchs, len); 2502 bfa_fcs_rport_process_adisc(rport, fchs, len);
2566 break; 2503 break;
2567 2504
2568 case FC_ELS_PRLO: 2505 case FC_ELS_PRLO:
2569 if (bfa_fcs_port_is_initiator(port)) 2506 bfa_stats(port, prlo_rcvd);
2507 if (bfa_fcs_lport_is_initiator(port))
2570 bfa_fcs_fcpim_uf_recv(rport->itnim, fchs, len); 2508 bfa_fcs_fcpim_uf_recv(rport->itnim, fchs, len);
2571
2572 if (bfa_fcs_port_is_target(port))
2573 bfa_fcs_fcptm_uf_recv(rport->tin, fchs, len);
2574 break; 2509 break;
2575 2510
2576 case FC_ELS_PRLI: 2511 case FC_ELS_PRLI:
2512 bfa_stats(port, prli_rcvd);
2577 bfa_fcs_rport_process_prli(rport, fchs, len); 2513 bfa_fcs_rport_process_prli(rport, fchs, len);
2578 break; 2514 break;
2579 2515
2580 case FC_ELS_RPSC: 2516 case FC_ELS_RPSC:
2517 bfa_stats(port, rpsc_rcvd);
2581 bfa_fcs_rport_process_rpsc(rport, fchs, len); 2518 bfa_fcs_rport_process_rpsc(rport, fchs, len);
2582 break; 2519 break;
2583 2520
2584 default: 2521 default:
2522 bfa_stats(port, un_handled_els_rcvd);
2585 bfa_fcs_rport_send_ls_rjt(rport, fchs, 2523 bfa_fcs_rport_send_ls_rjt(rport, fchs,
2586 FC_LS_RJT_RSN_CMD_NOT_SUPP, 2524 FC_LS_RJT_RSN_CMD_NOT_SUPP,
2587 FC_LS_RJT_EXP_NO_ADDL_INFO); 2525 FC_LS_RJT_EXP_NO_ADDL_INFO);
@@ -2589,28 +2527,27 @@ bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
2589 } 2527 }
2590} 2528}
2591 2529
2592/* Send best case acc to prlo */ 2530/* send best case acc to prlo */
2593static void 2531static void
2594bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport) 2532bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport)
2595{ 2533{
2596 struct bfa_fcs_port_s *port = rport->port; 2534 struct bfa_fcs_lport_s *port = rport->port;
2597 struct fchs_s fchs; 2535 struct fchs_s fchs;
2598 struct bfa_fcxp_s *fcxp; 2536 struct bfa_fcxp_s *fcxp;
2599 int len; 2537 int len;
2600 2538
2601 bfa_trc(rport->fcs, rport->pid); 2539 bfa_trc(rport->fcs, rport->pid);
2602 2540
2603 fcxp = bfa_fcs_fcxp_alloc(port->fcs); 2541 fcxp = bfa_fcs_fcxp_alloc(port->fcs);
2604 if (!fcxp) 2542 if (!fcxp)
2605 return; 2543 return;
2606
2607 len = fc_prlo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), 2544 len = fc_prlo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
2608 rport->pid, bfa_fcs_port_get_fcid(port), 2545 rport->pid, bfa_fcs_lport_get_fcid(port),
2609 rport->reply_oxid, 0); 2546 rport->reply_oxid, 0);
2610 2547
2611 bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id, 2548 bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id,
2612 port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, 2549 port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs,
2613 NULL, NULL, FC_MAX_PDUSZ, 0); 2550 NULL, NULL, FC_MAX_PDUSZ, 0);
2614} 2551}
2615 2552
2616/* 2553/*
@@ -2620,10 +2557,10 @@ static void
2620bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, 2557bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
2621 u8 reason_code, u8 reason_code_expl) 2558 u8 reason_code, u8 reason_code_expl)
2622{ 2559{
2623 struct bfa_fcs_port_s *port = rport->port; 2560 struct bfa_fcs_lport_s *port = rport->port;
2624 struct fchs_s fchs; 2561 struct fchs_s fchs;
2625 struct bfa_fcxp_s *fcxp; 2562 struct bfa_fcxp_s *fcxp;
2626 int len; 2563 int len;
2627 2564
2628 bfa_trc(rport->fcs, rx_fchs->s_id); 2565 bfa_trc(rport->fcs, rx_fchs->s_id);
2629 2566
@@ -2631,12 +2568,13 @@ bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
2631 if (!fcxp) 2568 if (!fcxp)
2632 return; 2569 return;
2633 2570
2634 len = fc_ls_rjt_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, 2571 len = fc_ls_rjt_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
2635 bfa_fcs_port_get_fcid(port), rx_fchs->ox_id, 2572 rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
2636 reason_code, reason_code_expl); 2573 rx_fchs->ox_id, reason_code, reason_code_expl);
2637 2574
2638 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 2575 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag,
2639 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); 2576 BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
2577 FC_MAX_PDUSZ, 0);
2640} 2578}
2641 2579
2642/** 2580/**
@@ -2649,24 +2587,22 @@ bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport)
2649} 2587}
2650 2588
2651/** 2589/**
2652 * Called by the Driver to set rport delete/ageout timeout 2590 * brief
2591 * Called by the Driver to set rport delete/ageout timeout
2653 * 2592 *
2654 * param[in] rport timeout value in seconds. 2593 * param[in] rport timeout value in seconds.
2655 * 2594 *
2656 * return None 2595 * return None
2657 */ 2596 */
2658void 2597void
2659bfa_fcs_rport_set_del_timeout(u8 rport_tmo) 2598bfa_fcs_rport_set_del_timeout(u8 rport_tmo)
2660{ 2599{
2661 /* 2600 /* convert to Millisecs */
2662 * convert to Millisecs
2663 */
2664 if (rport_tmo > 0) 2601 if (rport_tmo > 0)
2665 bfa_fcs_rport_del_timeout = rport_tmo * 1000; 2602 bfa_fcs_rport_del_timeout = rport_tmo * 1000;
2666} 2603}
2667
2668void 2604void
2669bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, uint16_t ox_id) 2605bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id)
2670{ 2606{
2671 bfa_trc(rport->fcs, rport->pid); 2607 bfa_trc(rport->fcs, rport->pid);
2672 2608
@@ -2674,3 +2610,517 @@ bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, uint16_t ox_id)
2674 rport->reply_oxid = ox_id; 2610 rport->reply_oxid = ox_id;
2675 bfa_sm_send_event(rport, RPSM_EVENT_PRLO_RCVD); 2611 bfa_sm_send_event(rport, RPSM_EVENT_PRLO_RCVD);
2676} 2612}
2613
2614
2615
2616/**
2617 * Remote port implementation.
2618 */
2619
2620/**
2621 * fcs_rport_api FCS rport API.
2622 */
2623
2624/**
2625 * Direct API to add a target by port wwn. This interface is used, for
2626 * example, by bios when target pwwn is known from boot lun configuration.
2627 */
2628bfa_status_t
2629bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn,
2630 struct bfa_fcs_rport_s *rport, struct bfad_rport_s *rport_drv)
2631{
2632 bfa_trc(port->fcs, *pwwn);
2633
2634 return BFA_STATUS_OK;
2635}
2636
2637/**
2638 * Direct API to remove a target and its associated resources. This
2639 * interface is used, for example, by driver to remove target
2640 * ports from the target list for a VM.
2641 */
2642bfa_status_t
2643bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport_in)
2644{
2645
2646 struct bfa_fcs_rport_s *rport;
2647
2648 bfa_trc(rport_in->fcs, rport_in->pwwn);
2649
2650 rport = bfa_fcs_lport_get_rport_by_pwwn(rport_in->port, rport_in->pwwn);
2651 if (rport == NULL) {
2652 /*
2653 * TBD Error handling
2654 */
2655 bfa_trc(rport_in->fcs, rport_in->pid);
2656 return BFA_STATUS_UNKNOWN_RWWN;
2657 }
2658
2659 /*
2660 * TBD if this remote port is online, send a logo
2661 */
2662 return BFA_STATUS_OK;
2663
2664}
2665
2666/**
2667 * Remote device status for display/debug.
2668 */
2669void
2670bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
2671 struct bfa_rport_attr_s *rport_attr)
2672{
2673 struct bfa_rport_qos_attr_s qos_attr;
2674 bfa_fcs_lport_t *port = rport->port;
2675 bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed;
2676
2677 bfa_os_memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
2678
2679 rport_attr->pid = rport->pid;
2680 rport_attr->pwwn = rport->pwwn;
2681 rport_attr->nwwn = rport->nwwn;
2682 rport_attr->cos_supported = rport->fc_cos;
2683 rport_attr->df_sz = rport->maxfrsize;
2684 rport_attr->state = bfa_fcs_rport_get_state(rport);
2685 rport_attr->fc_cos = rport->fc_cos;
2686 rport_attr->cisc = rport->cisc;
2687 rport_attr->scsi_function = rport->scsi_function;
2688 rport_attr->curr_speed = rport->rpf.rpsc_speed;
2689 rport_attr->assigned_speed = rport->rpf.assigned_speed;
2690
2691 bfa_rport_get_qos_attr(rport->bfa_rport, &qos_attr);
2692 rport_attr->qos_attr = qos_attr;
2693
2694 rport_attr->trl_enforced = BFA_FALSE;
2695 if (bfa_fcport_is_ratelim(port->fcs->bfa)) {
2696 if (rport_speed == BFA_PORT_SPEED_UNKNOWN) {
2697 /* Use default ratelim speed setting */
2698 rport_speed =
2699 bfa_fcport_get_ratelim_speed(rport->fcs->bfa);
2700 }
2701
2702 if (rport_speed < bfa_fcs_lport_get_rport_max_speed(port))
2703 rport_attr->trl_enforced = BFA_TRUE;
2704 }
2705}
2706
2707/**
2708 * Per remote device statistics.
2709 */
2710void
2711bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport,
2712 struct bfa_rport_stats_s *stats)
2713{
2714 *stats = rport->stats;
2715}
2716
2717void
2718bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport)
2719{
2720 bfa_os_memset((char *)&rport->stats, 0,
2721 sizeof(struct bfa_rport_stats_s));
2722}
2723
2724struct bfa_fcs_rport_s *
2725bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
2726{
2727 struct bfa_fcs_rport_s *rport;
2728
2729 rport = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2730 if (rport == NULL) {
2731 /*
2732 * TBD Error handling
2733 */
2734 }
2735
2736 return rport;
2737}
2738
2739struct bfa_fcs_rport_s *
2740bfa_fcs_rport_lookup_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t rnwwn)
2741{
2742 struct bfa_fcs_rport_s *rport;
2743
2744 rport = bfa_fcs_lport_get_rport_by_nwwn(port, rnwwn);
2745 if (rport == NULL) {
2746 /*
2747 * TBD Error handling
2748 */
2749 }
2750
2751 return rport;
2752}
2753
2754/*
2755 * This API is to set the Rport's speed. Should be used when RPSC is not
2756 * supported by the rport.
2757 */
2758void
2759bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport, bfa_port_speed_t speed)
2760{
2761 rport->rpf.assigned_speed = speed;
2762
2763 /* Set this speed in f/w only if the RPSC speed is not available */
2764 if (rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
2765 bfa_rport_speed(rport->bfa_rport, speed);
2766}
2767
2768
2769
2770/**
2771 * Remote port features (RPF) implementation.
2772 */
2773
2774#define BFA_FCS_RPF_RETRIES (3)
2775#define BFA_FCS_RPF_RETRY_TIMEOUT (1000) /* 1 sec (In millisecs) */
2776
2777static void bfa_fcs_rpf_send_rpsc2(void *rport_cbarg,
2778 struct bfa_fcxp_s *fcxp_alloced);
2779static void bfa_fcs_rpf_rpsc2_response(void *fcsarg,
2780 struct bfa_fcxp_s *fcxp,
2781 void *cbarg,
2782 bfa_status_t req_status,
2783 u32 rsp_len,
2784 u32 resid_len,
2785 struct fchs_s *rsp_fchs);
2786
2787static void bfa_fcs_rpf_timeout(void *arg);
2788
2789/**
2790 * fcs_rport_ftrs_sm FCS rport state machine events
2791 */
2792
2793enum rpf_event {
2794 RPFSM_EVENT_RPORT_OFFLINE = 1, /* Rport offline */
2795 RPFSM_EVENT_RPORT_ONLINE = 2, /* Rport online */
2796 RPFSM_EVENT_FCXP_SENT = 3, /* Frame from has been sent */
2797 RPFSM_EVENT_TIMEOUT = 4, /* Rport SM timeout event */
2798 RPFSM_EVENT_RPSC_COMP = 5,
2799 RPFSM_EVENT_RPSC_FAIL = 6,
2800 RPFSM_EVENT_RPSC_ERROR = 7,
2801};
2802
2803static void bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf,
2804 enum rpf_event event);
2805static void bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf,
2806 enum rpf_event event);
2807static void bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf,
2808 enum rpf_event event);
2809static void bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf,
2810 enum rpf_event event);
2811static void bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf,
2812 enum rpf_event event);
2813static void bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf,
2814 enum rpf_event event);
2815
2816static void
2817bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
2818{
2819 struct bfa_fcs_rport_s *rport = rpf->rport;
2820 struct bfa_fcs_fabric_s *fabric = &rport->fcs->fabric;
2821
2822 bfa_trc(rport->fcs, rport->pwwn);
2823 bfa_trc(rport->fcs, rport->pid);
2824 bfa_trc(rport->fcs, event);
2825
2826 switch (event) {
2827 case RPFSM_EVENT_RPORT_ONLINE:
2828 /* Send RPSC2 to a Brocade fabric only. */
2829 if ((!BFA_FCS_PID_IS_WKA(rport->pid)) &&
2830 ((bfa_lps_is_brcd_fabric(rport->port->fabric->lps)) ||
2831 (bfa_fcs_fabric_get_switch_oui(fabric) ==
2832 BFA_FCS_BRCD_SWITCH_OUI))) {
2833 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
2834 rpf->rpsc_retries = 0;
2835 bfa_fcs_rpf_send_rpsc2(rpf, NULL);
2836 }
2837 break;
2838
2839 case RPFSM_EVENT_RPORT_OFFLINE:
2840 break;
2841
2842 default:
2843 bfa_sm_fault(rport->fcs, event);
2844 }
2845}
2846
2847static void
2848bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
2849{
2850 struct bfa_fcs_rport_s *rport = rpf->rport;
2851
2852 bfa_trc(rport->fcs, event);
2853
2854 switch (event) {
2855 case RPFSM_EVENT_FCXP_SENT:
2856 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc);
2857 break;
2858
2859 case RPFSM_EVENT_RPORT_OFFLINE:
2860 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
2861 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rpf->fcxp_wqe);
2862 rpf->rpsc_retries = 0;
2863 break;
2864
2865 default:
2866 bfa_sm_fault(rport->fcs, event);
2867 }
2868}
2869
2870static void
2871bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
2872{
2873 struct bfa_fcs_rport_s *rport = rpf->rport;
2874
2875 bfa_trc(rport->fcs, rport->pid);
2876 bfa_trc(rport->fcs, event);
2877
2878 switch (event) {
2879 case RPFSM_EVENT_RPSC_COMP:
2880 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online);
2881 /* Update speed info in f/w via BFA */
2882 if (rpf->rpsc_speed != BFA_PORT_SPEED_UNKNOWN)
2883 bfa_rport_speed(rport->bfa_rport, rpf->rpsc_speed);
2884 else if (rpf->assigned_speed != BFA_PORT_SPEED_UNKNOWN)
2885 bfa_rport_speed(rport->bfa_rport, rpf->assigned_speed);
2886 break;
2887
2888 case RPFSM_EVENT_RPSC_FAIL:
2889 /* RPSC not supported by rport */
2890 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online);
2891 break;
2892
2893 case RPFSM_EVENT_RPSC_ERROR:
2894 /* need to retry...delayed a bit. */
2895 if (rpf->rpsc_retries++ < BFA_FCS_RPF_RETRIES) {
2896 bfa_timer_start(rport->fcs->bfa, &rpf->timer,
2897 bfa_fcs_rpf_timeout, rpf,
2898 BFA_FCS_RPF_RETRY_TIMEOUT);
2899 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_retry);
2900 } else {
2901 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online);
2902 }
2903 break;
2904
2905 case RPFSM_EVENT_RPORT_OFFLINE:
2906 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
2907 bfa_fcxp_discard(rpf->fcxp);
2908 rpf->rpsc_retries = 0;
2909 break;
2910
2911 default:
2912 bfa_sm_fault(rport->fcs, event);
2913 }
2914}
2915
2916static void
2917bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
2918{
2919 struct bfa_fcs_rport_s *rport = rpf->rport;
2920
2921 bfa_trc(rport->fcs, rport->pid);
2922 bfa_trc(rport->fcs, event);
2923
2924 switch (event) {
2925 case RPFSM_EVENT_TIMEOUT:
2926 /* re-send the RPSC */
2927 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
2928 bfa_fcs_rpf_send_rpsc2(rpf, NULL);
2929 break;
2930
2931 case RPFSM_EVENT_RPORT_OFFLINE:
2932 bfa_timer_stop(&rpf->timer);
2933 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
2934 rpf->rpsc_retries = 0;
2935 break;
2936
2937 default:
2938 bfa_sm_fault(rport->fcs, event);
2939 }
2940}
2941
2942static void
2943bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
2944{
2945 struct bfa_fcs_rport_s *rport = rpf->rport;
2946
2947 bfa_trc(rport->fcs, rport->pwwn);
2948 bfa_trc(rport->fcs, rport->pid);
2949 bfa_trc(rport->fcs, event);
2950
2951 switch (event) {
2952 case RPFSM_EVENT_RPORT_OFFLINE:
2953 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
2954 rpf->rpsc_retries = 0;
2955 break;
2956
2957 default:
2958 bfa_sm_fault(rport->fcs, event);
2959 }
2960}
2961
2962static void
2963bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
2964{
2965 struct bfa_fcs_rport_s *rport = rpf->rport;
2966
2967 bfa_trc(rport->fcs, rport->pwwn);
2968 bfa_trc(rport->fcs, rport->pid);
2969 bfa_trc(rport->fcs, event);
2970
2971 switch (event) {
2972 case RPFSM_EVENT_RPORT_ONLINE:
2973 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
2974 bfa_fcs_rpf_send_rpsc2(rpf, NULL);
2975 break;
2976
2977 case RPFSM_EVENT_RPORT_OFFLINE:
2978 break;
2979
2980 default:
2981 bfa_sm_fault(rport->fcs, event);
2982 }
2983}
2984/**
2985 * Called when Rport is created.
2986 */
2987void
2988bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport)
2989{
2990 struct bfa_fcs_rpf_s *rpf = &rport->rpf;
2991
2992 bfa_trc(rport->fcs, rport->pid);
2993 rpf->rport = rport;
2994
2995 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_uninit);
2996}
2997
2998/**
2999 * Called when Rport becomes online
3000 */
3001void
3002bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport)
3003{
3004 bfa_trc(rport->fcs, rport->pid);
3005
3006 if (__fcs_min_cfg(rport->port->fcs))
3007 return;
3008
3009 if (bfa_fcs_fabric_is_switched(rport->port->fabric))
3010 bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_ONLINE);
3011}
3012
3013/**
3014 * Called when Rport becomes offline
3015 */
3016void
3017bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport)
3018{
3019 bfa_trc(rport->fcs, rport->pid);
3020
3021 if (__fcs_min_cfg(rport->port->fcs))
3022 return;
3023
3024 rport->rpf.rpsc_speed = 0;
3025 bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_OFFLINE);
3026}
3027
3028static void
3029bfa_fcs_rpf_timeout(void *arg)
3030{
3031 struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *) arg;
3032 struct bfa_fcs_rport_s *rport = rpf->rport;
3033
3034 bfa_trc(rport->fcs, rport->pid);
3035 bfa_sm_send_event(rpf, RPFSM_EVENT_TIMEOUT);
3036}
3037
3038static void
3039bfa_fcs_rpf_send_rpsc2(void *rpf_cbarg, struct bfa_fcxp_s *fcxp_alloced)
3040{
3041 struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *)rpf_cbarg;
3042 struct bfa_fcs_rport_s *rport = rpf->rport;
3043 struct bfa_fcs_lport_s *port = rport->port;
3044 struct fchs_s fchs;
3045 int len;
3046 struct bfa_fcxp_s *fcxp;
3047
3048 bfa_trc(rport->fcs, rport->pwwn);
3049
3050 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
3051 if (!fcxp) {
3052 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rpf->fcxp_wqe,
3053 bfa_fcs_rpf_send_rpsc2, rpf);
3054 return;
3055 }
3056 rpf->fcxp = fcxp;
3057
3058 len = fc_rpsc2_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
3059 bfa_fcs_lport_get_fcid(port), &rport->pid, 1);
3060
3061 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
3062 FC_CLASS_3, len, &fchs, bfa_fcs_rpf_rpsc2_response,
3063 rpf, FC_MAX_PDUSZ, FC_ELS_TOV);
3064 rport->stats.rpsc_sent++;
3065 bfa_sm_send_event(rpf, RPFSM_EVENT_FCXP_SENT);
3066
3067}
3068
3069static void
3070bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
3071 bfa_status_t req_status, u32 rsp_len,
3072 u32 resid_len, struct fchs_s *rsp_fchs)
3073{
3074 struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *) cbarg;
3075 struct bfa_fcs_rport_s *rport = rpf->rport;
3076 struct fc_ls_rjt_s *ls_rjt;
3077 struct fc_rpsc2_acc_s *rpsc2_acc;
3078 u16 num_ents;
3079
3080 bfa_trc(rport->fcs, req_status);
3081
3082 if (req_status != BFA_STATUS_OK) {
3083 bfa_trc(rport->fcs, req_status);
3084 if (req_status == BFA_STATUS_ETIMER)
3085 rport->stats.rpsc_failed++;
3086 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR);
3087 return;
3088 }
3089
3090 rpsc2_acc = (struct fc_rpsc2_acc_s *) BFA_FCXP_RSP_PLD(fcxp);
3091 if (rpsc2_acc->els_cmd == FC_ELS_ACC) {
3092 rport->stats.rpsc_accs++;
3093 num_ents = bfa_os_ntohs(rpsc2_acc->num_pids);
3094 bfa_trc(rport->fcs, num_ents);
3095 if (num_ents > 0) {
3096 bfa_assert(rpsc2_acc->port_info[0].pid != rport->pid);
3097 bfa_trc(rport->fcs,
3098 bfa_os_ntohs(rpsc2_acc->port_info[0].pid));
3099 bfa_trc(rport->fcs,
3100 bfa_os_ntohs(rpsc2_acc->port_info[0].speed));
3101 bfa_trc(rport->fcs,
3102 bfa_os_ntohs(rpsc2_acc->port_info[0].index));
3103 bfa_trc(rport->fcs,
3104 rpsc2_acc->port_info[0].type);
3105
3106 if (rpsc2_acc->port_info[0].speed == 0) {
3107 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR);
3108 return;
3109 }
3110
3111 rpf->rpsc_speed = fc_rpsc_operspeed_to_bfa_speed(
3112 bfa_os_ntohs(rpsc2_acc->port_info[0].speed));
3113
3114 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_COMP);
3115 }
3116 } else {
3117 ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
3118 bfa_trc(rport->fcs, ls_rjt->reason_code);
3119 bfa_trc(rport->fcs, ls_rjt->reason_code_expl);
3120 rport->stats.rpsc_rejects++;
3121 if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP)
3122 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_FAIL);
3123 else
3124 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR);
3125 }
3126}
diff --git a/drivers/scsi/bfa/bfa_fcs_uf.c b/drivers/scsi/bfa/bfa_fcs_uf.c
deleted file mode 100644
index 3d57d48bbae4..000000000000
--- a/drivers/scsi/bfa/bfa_fcs_uf.c
+++ /dev/null
@@ -1,99 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcs_uf.c BFA FCS UF ( Unsolicited Frames)
20 */
21
22#include <fcs/bfa_fcs.h>
23#include <bfa_svc.h>
24#include <fcs/bfa_fcs_fabric.h>
25#include "fcs.h"
26#include "fcs_trcmod.h"
27#include "fcs_fabric.h"
28#include "fcs_uf.h"
29
30BFA_TRC_FILE(FCS, UF);
31
32/**
33 * BFA callback for unsolicited frame receive handler.
34 *
35 * @param[in] cbarg callback arg for receive handler
36 * @param[in] uf unsolicited frame descriptor
37 *
38 * @return None
39 */
40static void
41bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
42{
43 struct bfa_fcs_s *fcs = (struct bfa_fcs_s *) cbarg;
44 struct fchs_s *fchs = bfa_uf_get_frmbuf(uf);
45 u16 len = bfa_uf_get_frmlen(uf);
46 struct fc_vft_s *vft;
47 struct bfa_fcs_fabric_s *fabric;
48
49 /**
50 * check for VFT header
51 */
52 if (fchs->routing == FC_RTG_EXT_HDR &&
53 fchs->cat_info == FC_CAT_VFT_HDR) {
54 bfa_stats(fcs, uf.tagged);
55 vft = bfa_uf_get_frmbuf(uf);
56 if (fcs->port_vfid == vft->vf_id)
57 fabric = &fcs->fabric;
58 else
59 fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id);
60
61 /**
62 * drop frame if vfid is unknown
63 */
64 if (!fabric) {
65 bfa_assert(0);
66 bfa_stats(fcs, uf.vfid_unknown);
67 bfa_uf_free(uf);
68 return;
69 }
70
71 /**
72 * skip vft header
73 */
74 fchs = (struct fchs_s *) (vft + 1);
75 len -= sizeof(struct fc_vft_s);
76
77 bfa_trc(fcs, vft->vf_id);
78 } else {
79 bfa_stats(fcs, uf.untagged);
80 fabric = &fcs->fabric;
81 }
82
83 bfa_trc(fcs, ((u32 *) fchs)[0]);
84 bfa_trc(fcs, ((u32 *) fchs)[1]);
85 bfa_trc(fcs, ((u32 *) fchs)[2]);
86 bfa_trc(fcs, ((u32 *) fchs)[3]);
87 bfa_trc(fcs, ((u32 *) fchs)[4]);
88 bfa_trc(fcs, ((u32 *) fchs)[5]);
89 bfa_trc(fcs, len);
90
91 bfa_fcs_fabric_uf_recv(fabric, fchs, len);
92 bfa_uf_free(uf);
93}
94
95void
96bfa_fcs_uf_attach(struct bfa_fcs_s *fcs)
97{
98 bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs);
99}
diff --git a/drivers/scsi/bfa/bfa_fcxp.c b/drivers/scsi/bfa/bfa_fcxp.c
deleted file mode 100644
index 8258f88bfee6..000000000000
--- a/drivers/scsi/bfa/bfa_fcxp.c
+++ /dev/null
@@ -1,774 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfi/bfi_uf.h>
20#include <cs/bfa_debug.h>
21
22BFA_TRC_FILE(HAL, FCXP);
23BFA_MODULE(fcxp);
24
25/**
26 * forward declarations
27 */
28static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
29static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
30 struct bfi_fcxp_send_rsp_s *fcxp_rsp);
31static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
32 struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
33static void bfa_fcxp_qresume(void *cbarg);
34static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
35 struct bfi_fcxp_send_req_s *send_req);
36
37/**
38 * fcxp_pvt BFA FCXP private functions
39 */
40
41static void
42claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
43{
44 u8 *dm_kva = NULL;
45 u64 dm_pa;
46 u32 buf_pool_sz;
47
48 dm_kva = bfa_meminfo_dma_virt(mi);
49 dm_pa = bfa_meminfo_dma_phys(mi);
50
51 buf_pool_sz = mod->req_pld_sz * mod->num_fcxps;
52
53 /*
54 * Initialize the fcxp req payload list
55 */
56 mod->req_pld_list_kva = dm_kva;
57 mod->req_pld_list_pa = dm_pa;
58 dm_kva += buf_pool_sz;
59 dm_pa += buf_pool_sz;
60 bfa_os_memset(mod->req_pld_list_kva, 0, buf_pool_sz);
61
62 /*
63 * Initialize the fcxp rsp payload list
64 */
65 buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps;
66 mod->rsp_pld_list_kva = dm_kva;
67 mod->rsp_pld_list_pa = dm_pa;
68 dm_kva += buf_pool_sz;
69 dm_pa += buf_pool_sz;
70 bfa_os_memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
71
72 bfa_meminfo_dma_virt(mi) = dm_kva;
73 bfa_meminfo_dma_phys(mi) = dm_pa;
74}
75
76static void
77claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
78{
79 u16 i;
80 struct bfa_fcxp_s *fcxp;
81
82 fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
83 bfa_os_memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
84
85 INIT_LIST_HEAD(&mod->fcxp_free_q);
86 INIT_LIST_HEAD(&mod->fcxp_active_q);
87
88 mod->fcxp_list = fcxp;
89
90 for (i = 0; i < mod->num_fcxps; i++) {
91 fcxp->fcxp_mod = mod;
92 fcxp->fcxp_tag = i;
93
94 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
95 bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
96 fcxp->reqq_waiting = BFA_FALSE;
97
98 fcxp = fcxp + 1;
99 }
100
101 bfa_meminfo_kva(mi) = (void *)fcxp;
102}
103
104static void
105bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
106 u32 *dm_len)
107{
108 u16 num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs;
109
110 if (num_fcxp_reqs == 0)
111 return;
112
113 /*
114 * Account for req/rsp payload
115 */
116 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
117 if (cfg->drvcfg.min_cfg)
118 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
119 else
120 *dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs;
121
122 /*
123 * Account for fcxp structs
124 */
125 *ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs;
126}
127
128static void
129bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
130 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
131{
132 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
133
134 bfa_os_memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
135 mod->bfa = bfa;
136 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
137
138 /**
139 * Initialize FCXP request and response payload sizes.
140 */
141 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
142 if (!cfg->drvcfg.min_cfg)
143 mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
144
145 INIT_LIST_HEAD(&mod->wait_q);
146
147 claim_fcxp_req_rsp_mem(mod, meminfo);
148 claim_fcxps_mem(mod, meminfo);
149}
150
151static void
152bfa_fcxp_detach(struct bfa_s *bfa)
153{
154}
155
156static void
157bfa_fcxp_start(struct bfa_s *bfa)
158{
159}
160
161static void
162bfa_fcxp_stop(struct bfa_s *bfa)
163{
164}
165
166static void
167bfa_fcxp_iocdisable(struct bfa_s *bfa)
168{
169 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
170 struct bfa_fcxp_s *fcxp;
171 struct list_head *qe, *qen;
172
173 list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
174 fcxp = (struct bfa_fcxp_s *) qe;
175 if (fcxp->caller == NULL) {
176 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
177 BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
178 bfa_fcxp_free(fcxp);
179 } else {
180 fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
181 bfa_cb_queue(bfa, &fcxp->hcb_qe,
182 __bfa_fcxp_send_cbfn, fcxp);
183 }
184 }
185}
186
187static struct bfa_fcxp_s *
188bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
189{
190 struct bfa_fcxp_s *fcxp;
191
192 bfa_q_deq(&fm->fcxp_free_q, &fcxp);
193
194 if (fcxp)
195 list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
196
197 return fcxp;
198}
199
200static void
201bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
202{
203 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
204 struct bfa_fcxp_wqe_s *wqe;
205
206 bfa_q_deq(&mod->wait_q, &wqe);
207 if (wqe) {
208 bfa_trc(mod->bfa, fcxp->fcxp_tag);
209 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
210 return;
211 }
212
213 bfa_assert(bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
214 list_del(&fcxp->qe);
215 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
216}
217
218static void
219bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
220 bfa_status_t req_status, u32 rsp_len,
221 u32 resid_len, struct fchs_s *rsp_fchs)
222{
223 /* discarded fcxp completion */
224}
225
226static void
227__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
228{
229 struct bfa_fcxp_s *fcxp = cbarg;
230
231 if (complete) {
232 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
233 fcxp->rsp_status, fcxp->rsp_len,
234 fcxp->residue_len, &fcxp->rsp_fchs);
235 } else {
236 bfa_fcxp_free(fcxp);
237 }
238}
239
240static void
241hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
242{
243 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
244 struct bfa_fcxp_s *fcxp;
245 u16 fcxp_tag = bfa_os_ntohs(fcxp_rsp->fcxp_tag);
246
247 bfa_trc(bfa, fcxp_tag);
248
249 fcxp_rsp->rsp_len = bfa_os_ntohl(fcxp_rsp->rsp_len);
250
251 /**
252 * @todo f/w should not set residue to non-0 when everything
253 * is received.
254 */
255 if (fcxp_rsp->req_status == BFA_STATUS_OK)
256 fcxp_rsp->residue_len = 0;
257 else
258 fcxp_rsp->residue_len = bfa_os_ntohl(fcxp_rsp->residue_len);
259
260 fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
261
262 bfa_assert(fcxp->send_cbfn != NULL);
263
264 hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
265
266 if (fcxp->send_cbfn != NULL) {
267 if (fcxp->caller == NULL) {
268 bfa_trc(mod->bfa, fcxp->fcxp_tag);
269
270 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
271 fcxp_rsp->req_status, fcxp_rsp->rsp_len,
272 fcxp_rsp->residue_len, &fcxp_rsp->fchs);
273 /*
274 * fcxp automatically freed on return from the callback
275 */
276 bfa_fcxp_free(fcxp);
277 } else {
278 bfa_trc(mod->bfa, fcxp->fcxp_tag);
279 fcxp->rsp_status = fcxp_rsp->req_status;
280 fcxp->rsp_len = fcxp_rsp->rsp_len;
281 fcxp->residue_len = fcxp_rsp->residue_len;
282 fcxp->rsp_fchs = fcxp_rsp->fchs;
283
284 bfa_cb_queue(bfa, &fcxp->hcb_qe,
285 __bfa_fcxp_send_cbfn, fcxp);
286 }
287 } else {
288 bfa_trc(bfa, fcxp_tag);
289 }
290}
291
292static void
293hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa)
294{
295 union bfi_addr_u sga_zero = { {0} };
296
297 sge->sg_len = reqlen;
298 sge->flags = BFI_SGE_DATA_LAST;
299 bfa_dma_addr_set(sge[0].sga, req_pa);
300 bfa_sge_to_be(sge);
301 sge++;
302
303 sge->sga = sga_zero;
304 sge->sg_len = reqlen;
305 sge->flags = BFI_SGE_PGDLEN;
306 bfa_sge_to_be(sge);
307}
308
309static void
310hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
311 struct fchs_s *fchs)
312{
313 /*
314 * TODO: TX ox_id
315 */
316 if (reqlen > 0) {
317 if (fcxp->use_ireqbuf) {
318 u32 pld_w0 =
319 *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
320
321 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
322 BFA_PL_EID_TX,
323 reqlen + sizeof(struct fchs_s), fchs, pld_w0);
324 } else {
325 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
326 BFA_PL_EID_TX, reqlen + sizeof(struct fchs_s),
327 fchs);
328 }
329 } else {
330 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
331 reqlen + sizeof(struct fchs_s), fchs);
332 }
333}
334
335static void
336hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
337 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
338{
339 if (fcxp_rsp->rsp_len > 0) {
340 if (fcxp->use_irspbuf) {
341 u32 pld_w0 =
342 *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
343
344 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
345 BFA_PL_EID_RX,
346 (u16) fcxp_rsp->rsp_len,
347 &fcxp_rsp->fchs, pld_w0);
348 } else {
349 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
350 BFA_PL_EID_RX,
351 (u16) fcxp_rsp->rsp_len,
352 &fcxp_rsp->fchs);
353 }
354 } else {
355 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
356 (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
357 }
358}
359
360/**
361 * Handler to resume sending fcxp when space in available in cpe queue.
362 */
363static void
364bfa_fcxp_qresume(void *cbarg)
365{
366 struct bfa_fcxp_s *fcxp = cbarg;
367 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
368 struct bfi_fcxp_send_req_s *send_req;
369
370 fcxp->reqq_waiting = BFA_FALSE;
371 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
372 bfa_fcxp_queue(fcxp, send_req);
373}
374
375/**
376 * Queue fcxp send request to foimrware.
377 */
378static void
379bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
380{
381 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
382 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
383 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
384 struct bfa_rport_s *rport = reqi->bfa_rport;
385
386 bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
387 bfa_lpuid(bfa));
388
389 send_req->fcxp_tag = bfa_os_htons(fcxp->fcxp_tag);
390 if (rport) {
391 send_req->rport_fw_hndl = rport->fw_handle;
392 send_req->max_frmsz = bfa_os_htons(rport->rport_info.max_frmsz);
393 if (send_req->max_frmsz == 0)
394 send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
395 } else {
396 send_req->rport_fw_hndl = 0;
397 send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
398 }
399
400 send_req->vf_id = bfa_os_htons(reqi->vf_id);
401 send_req->lp_tag = reqi->lp_tag;
402 send_req->class = reqi->class;
403 send_req->rsp_timeout = rspi->rsp_timeout;
404 send_req->cts = reqi->cts;
405 send_req->fchs = reqi->fchs;
406
407 send_req->req_len = bfa_os_htonl(reqi->req_tot_len);
408 send_req->rsp_maxlen = bfa_os_htonl(rspi->rsp_maxlen);
409
410 /*
411 * setup req sgles
412 */
413 if (fcxp->use_ireqbuf == 1) {
414 hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len,
415 BFA_FCXP_REQ_PLD_PA(fcxp));
416 } else {
417 if (fcxp->nreq_sgles > 0) {
418 bfa_assert(fcxp->nreq_sgles == 1);
419 hal_fcxp_set_local_sges(send_req->req_sge,
420 reqi->req_tot_len,
421 fcxp->req_sga_cbfn(fcxp->caller,
422 0));
423 } else {
424 bfa_assert(reqi->req_tot_len == 0);
425 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
426 }
427 }
428
429 /*
430 * setup rsp sgles
431 */
432 if (fcxp->use_irspbuf == 1) {
433 bfa_assert(rspi->rsp_maxlen <= BFA_FCXP_MAX_LBUF_SZ);
434
435 hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen,
436 BFA_FCXP_RSP_PLD_PA(fcxp));
437
438 } else {
439 if (fcxp->nrsp_sgles > 0) {
440 bfa_assert(fcxp->nrsp_sgles == 1);
441 hal_fcxp_set_local_sges(send_req->rsp_sge,
442 rspi->rsp_maxlen,
443 fcxp->rsp_sga_cbfn(fcxp->caller,
444 0));
445 } else {
446 bfa_assert(rspi->rsp_maxlen == 0);
447 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
448 }
449 }
450
451 hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
452
453 bfa_reqq_produce(bfa, BFA_REQQ_FCXP);
454
455 bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
456 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
457}
458
459
460/**
461 * hal_fcxp_api BFA FCXP API
462 */
463
464/**
465 * Allocate an FCXP instance to send a response or to send a request
466 * that has a response. Request/response buffers are allocated by caller.
467 *
468 * @param[in] bfa BFA bfa instance
469 * @param[in] nreq_sgles Number of SG elements required for request
470 * buffer. 0, if fcxp internal buffers are used.
471 * Use bfa_fcxp_get_reqbuf() to get the
472 * internal req buffer.
473 * @param[in] req_sgles SG elements describing request buffer. Will be
474 * copied in by BFA and hence can be freed on
475 * return from this function.
476 * @param[in] get_req_sga function ptr to be called to get a request SG
477 * Address (given the sge index).
478 * @param[in] get_req_sglen function ptr to be called to get a request SG
479 * len (given the sge index).
480 * @param[in] get_rsp_sga function ptr to be called to get a response SG
481 * Address (given the sge index).
482 * @param[in] get_rsp_sglen function ptr to be called to get a response SG
483 * len (given the sge index).
484 *
485 * @return FCXP instance. NULL on failure.
486 */
487struct bfa_fcxp_s *
488bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
489 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
490 bfa_fcxp_get_sglen_t req_sglen_cbfn,
491 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
492 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
493{
494 struct bfa_fcxp_s *fcxp = NULL;
495 u32 nreq_sgpg, nrsp_sgpg;
496
497 bfa_assert(bfa != NULL);
498
499 fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
500 if (fcxp == NULL)
501 return NULL;
502
503 bfa_trc(bfa, fcxp->fcxp_tag);
504
505 fcxp->caller = caller;
506
507 if (nreq_sgles == 0) {
508 fcxp->use_ireqbuf = 1;
509 } else {
510 bfa_assert(req_sga_cbfn != NULL);
511 bfa_assert(req_sglen_cbfn != NULL);
512
513 fcxp->use_ireqbuf = 0;
514 fcxp->req_sga_cbfn = req_sga_cbfn;
515 fcxp->req_sglen_cbfn = req_sglen_cbfn;
516
517 fcxp->nreq_sgles = nreq_sgles;
518
519 /*
520 * alloc required sgpgs
521 */
522 if (nreq_sgles > BFI_SGE_INLINE) {
523 nreq_sgpg = BFA_SGPG_NPAGE(nreq_sgles);
524
525 if (bfa_sgpg_malloc(bfa, &fcxp->req_sgpg_q, nreq_sgpg)
526 != BFA_STATUS_OK) {
527 /*
528 * TODO
529 */
530 }
531 }
532 }
533
534 if (nrsp_sgles == 0) {
535 fcxp->use_irspbuf = 1;
536 } else {
537 bfa_assert(rsp_sga_cbfn != NULL);
538 bfa_assert(rsp_sglen_cbfn != NULL);
539
540 fcxp->use_irspbuf = 0;
541 fcxp->rsp_sga_cbfn = rsp_sga_cbfn;
542 fcxp->rsp_sglen_cbfn = rsp_sglen_cbfn;
543
544 fcxp->nrsp_sgles = nrsp_sgles;
545 /*
546 * alloc required sgpgs
547 */
548 if (nrsp_sgles > BFI_SGE_INLINE) {
549 nrsp_sgpg = BFA_SGPG_NPAGE(nreq_sgles);
550
551 if (bfa_sgpg_malloc
552 (bfa, &fcxp->rsp_sgpg_q, nrsp_sgpg)
553 != BFA_STATUS_OK) {
554 /* bfa_sgpg_wait(bfa, &fcxp->rsp_sgpg_wqe,
555 nrsp_sgpg); */
556 /*
557 * TODO
558 */
559 }
560 }
561 }
562
563 return fcxp;
564}
565
566/**
567 * Get the internal request buffer pointer
568 *
569 * @param[in] fcxp BFA fcxp pointer
570 *
571 * @return pointer to the internal request buffer
572 */
573void *
574bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
575{
576 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
577 void *reqbuf;
578
579 bfa_assert(fcxp->use_ireqbuf == 1);
580 reqbuf = ((u8 *)mod->req_pld_list_kva) +
581 fcxp->fcxp_tag * mod->req_pld_sz;
582 return reqbuf;
583}
584
585u32
586bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
587{
588 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
589
590 return mod->req_pld_sz;
591}
592
593/**
594 * Get the internal response buffer pointer
595 *
596 * @param[in] fcxp BFA fcxp pointer
597 *
598 * @return pointer to the internal request buffer
599 */
600void *
601bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
602{
603 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
604 void *rspbuf;
605
606 bfa_assert(fcxp->use_irspbuf == 1);
607
608 rspbuf = ((u8 *)mod->rsp_pld_list_kva) +
609 fcxp->fcxp_tag * mod->rsp_pld_sz;
610 return rspbuf;
611}
612
613/**
614 * Free the BFA FCXP
615 *
616 * @param[in] fcxp BFA fcxp pointer
617 *
618 * @return void
619 */
620void
621bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
622{
623 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
624
625 bfa_assert(fcxp != NULL);
626 bfa_trc(mod->bfa, fcxp->fcxp_tag);
627 bfa_fcxp_put(fcxp);
628}
629
630/**
631 * Send a FCXP request
632 *
633 * @param[in] fcxp BFA fcxp pointer
634 * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
635 * @param[in] vf_id virtual Fabric ID
636 * @param[in] lp_tag lport tag
637 * @param[in] cts use Continous sequence
638 * @param[in] cos fc Class of Service
639 * @param[in] reqlen request length, does not include FCHS length
640 * @param[in] fchs fc Header Pointer. The header content will be copied
641 * in by BFA.
642 *
643 * @param[in] cbfn call back function to be called on receiving
644 * the response
645 * @param[in] cbarg arg for cbfn
646 * @param[in] rsp_timeout
647 * response timeout
648 *
649 * @return bfa_status_t
650 */
651void
652bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
653 u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
654 u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
655 void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
656{
657 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
658 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
659 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
660 struct bfi_fcxp_send_req_s *send_req;
661
662 bfa_trc(bfa, fcxp->fcxp_tag);
663
664 /**
665 * setup request/response info
666 */
667 reqi->bfa_rport = rport;
668 reqi->vf_id = vf_id;
669 reqi->lp_tag = lp_tag;
670 reqi->class = cos;
671 rspi->rsp_timeout = rsp_timeout;
672 reqi->cts = cts;
673 reqi->fchs = *fchs;
674 reqi->req_tot_len = reqlen;
675 rspi->rsp_maxlen = rsp_maxlen;
676 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
677 fcxp->send_cbarg = cbarg;
678
679 /**
680 * If no room in CPE queue, wait for space in request queue
681 */
682 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
683 if (!send_req) {
684 bfa_trc(bfa, fcxp->fcxp_tag);
685 fcxp->reqq_waiting = BFA_TRUE;
686 bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
687 return;
688 }
689
690 bfa_fcxp_queue(fcxp, send_req);
691}
692
693/**
694 * Abort a BFA FCXP
695 *
696 * @param[in] fcxp BFA fcxp pointer
697 *
698 * @return void
699 */
700bfa_status_t
701bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
702{
703 bfa_assert(0);
704 return BFA_STATUS_OK;
705}
706
707void
708bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
709 bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg)
710{
711 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
712
713 bfa_assert(list_empty(&mod->fcxp_free_q));
714
715 wqe->alloc_cbfn = alloc_cbfn;
716 wqe->alloc_cbarg = alloc_cbarg;
717 list_add_tail(&wqe->qe, &mod->wait_q);
718}
719
720void
721bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
722{
723 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
724
725 bfa_assert(bfa_q_is_on_q(&mod->wait_q, wqe));
726 list_del(&wqe->qe);
727}
728
729void
730bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
731{
732 /**
733 * If waiting for room in request queue, cancel reqq wait
734 * and free fcxp.
735 */
736 if (fcxp->reqq_waiting) {
737 fcxp->reqq_waiting = BFA_FALSE;
738 bfa_reqq_wcancel(&fcxp->reqq_wqe);
739 bfa_fcxp_free(fcxp);
740 return;
741 }
742
743 fcxp->send_cbfn = bfa_fcxp_null_comp;
744}
745
746
747
748/**
749 * hal_fcxp_public BFA FCXP public functions
750 */
751
752void
753bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
754{
755 switch (msg->mhdr.msg_id) {
756 case BFI_FCXP_I2H_SEND_RSP:
757 hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
758 break;
759
760 default:
761 bfa_trc(bfa, msg->mhdr.msg_id);
762 bfa_assert(0);
763 }
764}
765
766u32
767bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
768{
769 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
770
771 return mod->rsp_pld_sz;
772}
773
774
diff --git a/drivers/scsi/bfa/bfa_fcxp_priv.h b/drivers/scsi/bfa/bfa_fcxp_priv.h
deleted file mode 100644
index 4cda49397da0..000000000000
--- a/drivers/scsi/bfa/bfa_fcxp_priv.h
+++ /dev/null
@@ -1,138 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCXP_PRIV_H__
19#define __BFA_FCXP_PRIV_H__
20
21#include <cs/bfa_sm.h>
22#include <protocol/fc.h>
23#include <bfa_svc.h>
24#include <bfi/bfi_fcxp.h>
25
26#define BFA_FCXP_MIN (1)
27#define BFA_FCXP_MAX_IBUF_SZ (2 * 1024 + 256)
28#define BFA_FCXP_MAX_LBUF_SZ (4 * 1024 + 256)
29
30struct bfa_fcxp_mod_s {
31 struct bfa_s *bfa; /* backpointer to BFA */
32 struct bfa_fcxp_s *fcxp_list; /* array of FCXPs */
33 u16 num_fcxps; /* max num FCXP requests */
34 struct list_head fcxp_free_q; /* free FCXPs */
35 struct list_head fcxp_active_q; /* active FCXPs */
36 void *req_pld_list_kva; /* list of FCXP req pld */
37 u64 req_pld_list_pa; /* list of FCXP req pld */
38 void *rsp_pld_list_kva; /* list of FCXP resp pld */
39 u64 rsp_pld_list_pa; /* list of FCXP resp pld */
40 struct list_head wait_q; /* wait queue for free fcxp */
41 u32 req_pld_sz;
42 u32 rsp_pld_sz;
43};
44
45#define BFA_FCXP_MOD(__bfa) (&(__bfa)->modules.fcxp_mod)
46#define BFA_FCXP_FROM_TAG(__mod, __tag) (&(__mod)->fcxp_list[__tag])
47
48typedef void (*fcxp_send_cb_t) (struct bfa_s *ioc, struct bfa_fcxp_s *fcxp,
49 void *cb_arg, bfa_status_t req_status,
50 u32 rsp_len, u32 resid_len,
51 struct fchs_s *rsp_fchs);
52
53/**
54 * Information needed for a FCXP request
55 */
56struct bfa_fcxp_req_info_s {
57 struct bfa_rport_s *bfa_rport; /* Pointer to the bfa rport that was
58 *returned from bfa_rport_create().
59 *This could be left NULL for WKA or for
60 *FCXP interactions before the rport
61 *nexus is established
62 */
63 struct fchs_s fchs; /* request FC header structure */
64 u8 cts; /* continous sequence */
65 u8 class; /* FC class for the request/response */
66 u16 max_frmsz; /* max send frame size */
67 u16 vf_id; /* vsan tag if applicable */
68 u8 lp_tag; /* lport tag */
69 u32 req_tot_len; /* request payload total length */
70};
71
72struct bfa_fcxp_rsp_info_s {
73 struct fchs_s rsp_fchs; /* Response frame's FC header will
74 * be *sent back in this field */
75 u8 rsp_timeout; /* timeout in seconds, 0-no response
76 */
77 u8 rsvd2[3];
78 u32 rsp_maxlen; /* max response length expected */
79};
80
81struct bfa_fcxp_s {
82 struct list_head qe; /* fcxp queue element */
83 bfa_sm_t sm; /* state machine */
84 void *caller; /* driver or fcs */
85 struct bfa_fcxp_mod_s *fcxp_mod;
86 /* back pointer to fcxp mod */
87 u16 fcxp_tag; /* internal tag */
88 struct bfa_fcxp_req_info_s req_info;
89 /* request info */
90 struct bfa_fcxp_rsp_info_s rsp_info;
91 /* response info */
92 u8 use_ireqbuf; /* use internal req buf */
93 u8 use_irspbuf; /* use internal rsp buf */
94 u32 nreq_sgles; /* num request SGLEs */
95 u32 nrsp_sgles; /* num response SGLEs */
96 struct list_head req_sgpg_q; /* SG pages for request buf */
97 struct list_head req_sgpg_wqe; /* wait queue for req SG page */
98 struct list_head rsp_sgpg_q; /* SG pages for response buf */
99 struct list_head rsp_sgpg_wqe; /* wait queue for rsp SG page */
100
101 bfa_fcxp_get_sgaddr_t req_sga_cbfn;
102 /* SG elem addr user function */
103 bfa_fcxp_get_sglen_t req_sglen_cbfn;
104 /* SG elem len user function */
105 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn;
106 /* SG elem addr user function */
107 bfa_fcxp_get_sglen_t rsp_sglen_cbfn;
108 /* SG elem len user function */
109 bfa_cb_fcxp_send_t send_cbfn; /* send completion callback */
110 void *send_cbarg; /* callback arg */
111 struct bfa_sge_s req_sge[BFA_FCXP_MAX_SGES];
112 /* req SG elems */
113 struct bfa_sge_s rsp_sge[BFA_FCXP_MAX_SGES];
114 /* rsp SG elems */
115 u8 rsp_status; /* comp: rsp status */
116 u32 rsp_len; /* comp: actual response len */
117 u32 residue_len; /* comp: residual rsp length */
118 struct fchs_s rsp_fchs; /* comp: response fchs */
119 struct bfa_cb_qe_s hcb_qe; /* comp: callback qelem */
120 struct bfa_reqq_wait_s reqq_wqe;
121 bfa_boolean_t reqq_waiting;
122};
123
124#define BFA_FCXP_REQ_PLD(_fcxp) (bfa_fcxp_get_reqbuf(_fcxp))
125
126#define BFA_FCXP_RSP_FCHS(_fcxp) (&((_fcxp)->rsp_info.fchs))
127#define BFA_FCXP_RSP_PLD(_fcxp) (bfa_fcxp_get_rspbuf(_fcxp))
128
129#define BFA_FCXP_REQ_PLD_PA(_fcxp) \
130 ((_fcxp)->fcxp_mod->req_pld_list_pa + \
131 ((_fcxp)->fcxp_mod->req_pld_sz * (_fcxp)->fcxp_tag))
132
133#define BFA_FCXP_RSP_PLD_PA(_fcxp) \
134 ((_fcxp)->fcxp_mod->rsp_pld_list_pa + \
135 ((_fcxp)->fcxp_mod->rsp_pld_sz * (_fcxp)->fcxp_tag))
136
137void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
138#endif /* __BFA_FCXP_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_fwimg_priv.h b/drivers/scsi/bfa/bfa_fwimg_priv.h
deleted file mode 100644
index d33e19e54395..000000000000
--- a/drivers/scsi/bfa/bfa_fwimg_priv.h
+++ /dev/null
@@ -1,44 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FWIMG_PRIV_H__
19#define __BFA_FWIMG_PRIV_H__
20
21#define BFI_FLASH_CHUNK_SZ 256 /* Flash chunk size */
22#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
23
24/**
25 * BFI FW image type
26 */
27enum {
28 BFI_IMAGE_CB_FC,
29 BFI_IMAGE_CT_FC,
30 BFI_IMAGE_CT_CNA,
31 BFI_IMAGE_MAX,
32};
33
34extern u32 *bfi_image_get_chunk(int type, uint32_t off);
35extern u32 bfi_image_get_size(int type);
36extern u32 bfi_image_ct_fc_size;
37extern u32 bfi_image_ct_cna_size;
38extern u32 bfi_image_cb_fc_size;
39extern u32 *bfi_image_ct_fc;
40extern u32 *bfi_image_ct_cna;
41extern u32 *bfi_image_cb_fc;
42
43
44#endif /* __BFA_FWIMG_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
index edfd729445cf..c787d3af0886 100644
--- a/drivers/scsi/bfa/bfa_hw_cb.c
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -15,15 +15,15 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#include <bfa_priv.h> 18#include "bfa_modules.h"
19#include <bfi/bfi_cbreg.h> 19#include "bfi_cbreg.h"
20 20
21void 21void
22bfa_hwcb_reginit(struct bfa_s *bfa) 22bfa_hwcb_reginit(struct bfa_s *bfa)
23{ 23{
24 struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs; 24 struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
25 bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc); 25 bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc);
26 int i, q, fn = bfa_ioc_pcifn(&bfa->ioc); 26 int i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
27 27
28 if (fn == 0) { 28 if (fn == 0) {
29 bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS); 29 bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
index a357fb3066fd..c97ebafec5ea 100644
--- a/drivers/scsi/bfa/bfa_hw_ct.c
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -15,9 +15,8 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#include <bfa_priv.h> 18#include "bfa_modules.h"
19#include <bfi/bfi_ctreg.h> 19#include "bfi_ctreg.h"
20#include <bfa_ioc.h>
21 20
22BFA_TRC_FILE(HAL, IOCFC_CT); 21BFA_TRC_FILE(HAL, IOCFC_CT);
23 22
@@ -53,7 +52,7 @@ bfa_hwct_reginit(struct bfa_s *bfa)
53{ 52{
54 struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs; 53 struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
55 bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc); 54 bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc);
56 int i, q, fn = bfa_ioc_pcifn(&bfa->ioc); 55 int i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
57 56
58 if (fn == 0) { 57 if (fn == 0) {
59 bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS); 58 bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
@@ -87,7 +86,7 @@ bfa_hwct_reginit(struct bfa_s *bfa)
87void 86void
88bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq) 87bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq)
89{ 88{
90 u32 r32; 89 u32 r32;
91 90
92 r32 = bfa_reg_read(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]); 91 r32 = bfa_reg_read(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
93 bfa_reg_write(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq], r32); 92 bfa_reg_write(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq], r32);
diff --git a/drivers/scsi/bfa/bfa_intr.c b/drivers/scsi/bfa/bfa_intr.c
deleted file mode 100644
index 493678889b24..000000000000
--- a/drivers/scsi/bfa/bfa_intr.c
+++ /dev/null
@@ -1,270 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#include <bfa.h>
18#include <bfi/bfi_ctreg.h>
19#include <bfa_port_priv.h>
20#include <bfa_intr_priv.h>
21#include <cs/bfa_debug.h>
22
23BFA_TRC_FILE(HAL, INTR);
24
25static void
26bfa_msix_errint(struct bfa_s *bfa, u32 intr)
27{
28 bfa_ioc_error_isr(&bfa->ioc);
29}
30
31static void
32bfa_msix_lpu(struct bfa_s *bfa)
33{
34 bfa_ioc_mbox_isr(&bfa->ioc);
35}
36
37static void
38bfa_reqq_resume(struct bfa_s *bfa, int qid)
39{
40 struct list_head *waitq, *qe, *qen;
41 struct bfa_reqq_wait_s *wqe;
42
43 waitq = bfa_reqq(bfa, qid);
44 list_for_each_safe(qe, qen, waitq) {
45 /**
46 * Callback only as long as there is room in request queue
47 */
48 if (bfa_reqq_full(bfa, qid))
49 break;
50
51 list_del(qe);
52 wqe = (struct bfa_reqq_wait_s *) qe;
53 wqe->qresume(wqe->cbarg);
54 }
55}
56
57void
58bfa_msix_all(struct bfa_s *bfa, int vec)
59{
60 bfa_intx(bfa);
61}
62
63/**
64 * hal_intr_api
65 */
66bfa_boolean_t
67bfa_intx(struct bfa_s *bfa)
68{
69 u32 intr, qintr;
70 int queue;
71
72 intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
73 if (!intr)
74 return BFA_FALSE;
75
76 /**
77 * RME completion queue interrupt
78 */
79 qintr = intr & __HFN_INT_RME_MASK;
80 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
81
82 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
83 if (intr & (__HFN_INT_RME_Q0 << queue))
84 bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
85 }
86 intr &= ~qintr;
87 if (!intr)
88 return BFA_TRUE;
89
90 /**
91 * CPE completion queue interrupt
92 */
93 qintr = intr & __HFN_INT_CPE_MASK;
94 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
95
96 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
97 if (intr & (__HFN_INT_CPE_Q0 << queue))
98 bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
99 }
100 intr &= ~qintr;
101 if (!intr)
102 return BFA_TRUE;
103
104 bfa_msix_lpu_err(bfa, intr);
105
106 return BFA_TRUE;
107}
108
109void
110bfa_isr_enable(struct bfa_s *bfa)
111{
112 u32 intr_unmask;
113 int pci_func = bfa_ioc_pcifn(&bfa->ioc);
114
115 bfa_trc(bfa, pci_func);
116
117 bfa_msix_install(bfa);
118 intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
119 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS |
120 __HFN_INT_LL_HALT);
121
122 if (pci_func == 0)
123 intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
124 __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
125 __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
126 __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
127 __HFN_INT_MBOX_LPU0);
128 else
129 intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
130 __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
131 __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
132 __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
133 __HFN_INT_MBOX_LPU1);
134
135 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr_unmask);
136 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, ~intr_unmask);
137 bfa->iocfc.intr_mask = ~intr_unmask;
138 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
139}
140
141void
142bfa_isr_disable(struct bfa_s *bfa)
143{
144 bfa_isr_mode_set(bfa, BFA_FALSE);
145 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L);
146 bfa_msix_uninstall(bfa);
147}
148
149void
150bfa_msix_reqq(struct bfa_s *bfa, int qid)
151{
152 struct list_head *waitq;
153
154 qid &= (BFI_IOC_MAX_CQS - 1);
155
156 bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
157
158 /**
159 * Resume any pending requests in the corresponding reqq.
160 */
161 waitq = bfa_reqq(bfa, qid);
162 if (!list_empty(waitq))
163 bfa_reqq_resume(bfa, qid);
164}
165
166void
167bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
168{
169 bfa_trc(bfa, m->mhdr.msg_class);
170 bfa_trc(bfa, m->mhdr.msg_id);
171 bfa_trc(bfa, m->mhdr.mtag.i2htok);
172 bfa_assert(0);
173 bfa_trc_stop(bfa->trcmod);
174}
175
176void
177bfa_msix_rspq(struct bfa_s *bfa, int qid)
178{
179 struct bfi_msg_s *m;
180 u32 pi, ci;
181 struct list_head *waitq;
182
183 bfa_trc_fp(bfa, qid);
184
185 qid &= (BFI_IOC_MAX_CQS - 1);
186
187 bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
188
189 ci = bfa_rspq_ci(bfa, qid);
190 pi = bfa_rspq_pi(bfa, qid);
191
192 bfa_trc_fp(bfa, ci);
193 bfa_trc_fp(bfa, pi);
194
195 if (bfa->rme_process) {
196 while (ci != pi) {
197 m = bfa_rspq_elem(bfa, qid, ci);
198 bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);
199
200 bfa_isrs[m->mhdr.msg_class] (bfa, m);
201
202 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
203 }
204 }
205
206 /**
207 * update CI
208 */
209 bfa_rspq_ci(bfa, qid) = pi;
210 bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi);
211 bfa_os_mmiowb();
212
213 /**
214 * Resume any pending requests in the corresponding reqq.
215 */
216 waitq = bfa_reqq(bfa, qid);
217 if (!list_empty(waitq))
218 bfa_reqq_resume(bfa, qid);
219}
220
221void
222bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
223{
224 u32 intr, curr_value;
225
226 intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
227
228 if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
229 bfa_msix_lpu(bfa);
230
231 intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
232 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT);
233
234 if (intr) {
235 if (intr & __HFN_INT_LL_HALT) {
236 /**
237 * If LL_HALT bit is set then FW Init Halt LL Port
238 * Register needs to be cleared as well so Interrupt
239 * Status Register will be cleared.
240 */
241 curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt);
242 curr_value &= ~__FW_INIT_HALT_P;
243 bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value);
244 }
245
246 if (intr & __HFN_INT_ERR_PSS) {
247 /**
248 * ERR_PSS bit needs to be cleared as well in case
249 * interrups are shared so driver's interrupt handler is
250 * still called eventhough it is already masked out.
251 */
252 curr_value = bfa_reg_read(
253 bfa->ioc.ioc_regs.pss_err_status_reg);
254 curr_value &= __PSS_ERR_STATUS_SET;
255 bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg,
256 curr_value);
257 }
258
259 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr);
260 bfa_msix_errint(bfa, intr);
261 }
262}
263
264void
265bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func)
266{
267 bfa_isrs[mc] = isr_func;
268}
269
270
diff --git a/drivers/scsi/bfa/bfa_intr_priv.h b/drivers/scsi/bfa/bfa_intr_priv.h
deleted file mode 100644
index 5fc301cf4d1b..000000000000
--- a/drivers/scsi/bfa/bfa_intr_priv.h
+++ /dev/null
@@ -1,117 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_INTR_PRIV_H__
19#define __BFA_INTR_PRIV_H__
20
21/**
22 * Message handler
23 */
24typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
25void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
26void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
27
28
29#define bfa_reqq_pi(__bfa, __reqq) ((__bfa)->iocfc.req_cq_pi[__reqq])
30#define bfa_reqq_ci(__bfa, __reqq) \
31 (*(u32 *)((__bfa)->iocfc.req_cq_shadow_ci[__reqq].kva))
32
33#define bfa_reqq_full(__bfa, __reqq) \
34 (((bfa_reqq_pi(__bfa, __reqq) + 1) & \
35 ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1)) == \
36 bfa_reqq_ci(__bfa, __reqq))
37
38#define bfa_reqq_next(__bfa, __reqq) \
39 (bfa_reqq_full(__bfa, __reqq) ? NULL : \
40 ((void *)((struct bfi_msg_s *)((__bfa)->iocfc.req_cq_ba[__reqq].kva) \
41 + bfa_reqq_pi((__bfa), (__reqq)))))
42
43#define bfa_reqq_produce(__bfa, __reqq) do { \
44 (__bfa)->iocfc.req_cq_pi[__reqq]++; \
45 (__bfa)->iocfc.req_cq_pi[__reqq] &= \
46 ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \
47 bfa_reg_write((__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq], \
48 (__bfa)->iocfc.req_cq_pi[__reqq]); \
49 bfa_os_mmiowb(); \
50} while (0)
51
52#define bfa_rspq_pi(__bfa, __rspq) \
53 (*(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva))
54
55#define bfa_rspq_ci(__bfa, __rspq) ((__bfa)->iocfc.rsp_cq_ci[__rspq])
56#define bfa_rspq_elem(__bfa, __rspq, __ci) \
57 (&((struct bfi_msg_s *)((__bfa)->iocfc.rsp_cq_ba[__rspq].kva))[__ci])
58
59#define CQ_INCR(__index, __size) do { \
60 (__index)++; \
61 (__index) &= ((__size) - 1); \
62} while (0)
63
64/**
65 * Queue element to wait for room in request queue. FIFO order is
66 * maintained when fullfilling requests.
67 */
68struct bfa_reqq_wait_s {
69 struct list_head qe;
70 void (*qresume) (void *cbarg);
71 void *cbarg;
72};
73
74/**
75 * Circular queue usage assignments
76 */
77enum {
78 BFA_REQQ_IOC = 0, /* all low-priority IOC msgs */
79 BFA_REQQ_FCXP = 0, /* all FCXP messages */
80 BFA_REQQ_LPS = 0, /* all lport service msgs */
81 BFA_REQQ_PORT = 0, /* all port messages */
82 BFA_REQQ_FLASH = 0, /* for flash module */
83 BFA_REQQ_DIAG = 0, /* for diag module */
84 BFA_REQQ_RPORT = 0, /* all port messages */
85 BFA_REQQ_SBOOT = 0, /* all san boot messages */
86 BFA_REQQ_QOS_LO = 1, /* all low priority IO */
87 BFA_REQQ_QOS_MD = 2, /* all medium priority IO */
88 BFA_REQQ_QOS_HI = 3, /* all high priority IO */
89};
90
91static inline void
92bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
93 void *cbarg)
94{
95 wqe->qresume = qresume;
96 wqe->cbarg = cbarg;
97}
98
99#define bfa_reqq(__bfa, __reqq) (&(__bfa)->reqq_waitq[__reqq])
100
101/**
102 * static inline void
103 * bfa_reqq_wait(struct bfa_s *bfa, int reqq, struct bfa_reqq_wait_s *wqe)
104 */
105#define bfa_reqq_wait(__bfa, __reqq, __wqe) do { \
106 \
107 struct list_head *waitq = bfa_reqq(__bfa, __reqq); \
108 \
109 bfa_assert(((__reqq) < BFI_IOC_MAX_CQS)); \
110 bfa_assert((__wqe)->qresume && (__wqe)->cbarg); \
111 \
112 list_add_tail(&(__wqe)->qe, waitq); \
113} while (0)
114
115#define bfa_reqq_wcancel(__wqe) list_del(&(__wqe)->qe)
116
117#endif /* __BFA_INTR_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 8e78f20110a5..6795b247791a 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -15,35 +15,33 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#include <bfa.h> 18#include "bfa_ioc.h"
19#include <bfa_ioc.h> 19#include "bfi_ctreg.h"
20#include <bfa_fwimg_priv.h> 20#include "bfa_defs.h"
21#include <cna/bfa_cna_trcmod.h> 21#include "bfa_defs_svc.h"
22#include <cs/bfa_debug.h> 22#include "bfad_drv.h"
23#include <bfi/bfi_ioc.h>
24#include <bfi/bfi_ctreg.h>
25#include <aen/bfa_aen_ioc.h>
26#include <aen/bfa_aen.h>
27#include <log/bfa_log_hal.h>
28#include <defs/bfa_defs_pci.h>
29 23
30BFA_TRC_FILE(CNA, IOC); 24BFA_TRC_FILE(CNA, IOC);
31 25
32/** 26/**
33 * IOC local definitions 27 * IOC local definitions
34 */ 28 */
35#define BFA_IOC_TOV 2000 /* msecs */ 29#define BFA_IOC_TOV 3000 /* msecs */
36#define BFA_IOC_HWSEM_TOV 500 /* msecs */ 30#define BFA_IOC_HWSEM_TOV 500 /* msecs */
37#define BFA_IOC_HB_TOV 500 /* msecs */ 31#define BFA_IOC_HB_TOV 500 /* msecs */
38#define BFA_IOC_HWINIT_MAX 2 32#define BFA_IOC_HWINIT_MAX 2
39#define BFA_IOC_FWIMG_MINSZ (16 * 1024) 33#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
40#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
41 34
42#define bfa_ioc_timer_start(__ioc) \ 35#define bfa_ioc_timer_start(__ioc) \
43 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ 36 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
44 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV) 37 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
45#define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer) 38#define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
46 39
40#define bfa_hb_timer_start(__ioc) \
41 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
42 bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
43#define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
44
47#define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS) 45#define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS)
48#define BFA_DBG_FWTRC_LEN \ 46#define BFA_DBG_FWTRC_LEN \
49 (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \ 47 (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \
@@ -55,100 +53,226 @@ BFA_TRC_FILE(CNA, IOC);
55 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. 53 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
56 */ 54 */
57 55
58#define bfa_ioc_firmware_lock(__ioc) \ 56#define bfa_ioc_firmware_lock(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc)) 57 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
60#define bfa_ioc_firmware_unlock(__ioc) \ 58#define bfa_ioc_firmware_unlock(__ioc) \
61 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) 59 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
62#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) 60#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
63#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) 61#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
64#define bfa_ioc_notify_hbfail(__ioc) \ 62#define bfa_ioc_notify_hbfail(__ioc) \
65 ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc)) 63 ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
66#define bfa_ioc_is_optrom(__ioc) \
67 (bfi_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
68 64
69bfa_boolean_t bfa_auto_recover = BFA_TRUE; 65#ifdef BFA_IOC_IS_UEFI
66#define bfa_ioc_is_bios_optrom(__ioc) (0)
67#define bfa_ioc_is_uefi(__ioc) BFA_IOC_IS_UEFI
68#else
69#define bfa_ioc_is_bios_optrom(__ioc) \
70 (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
71#define bfa_ioc_is_uefi(__ioc) (0)
72#endif
73
74#define bfa_ioc_mbox_cmd_pending(__ioc) \
75 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
76 bfa_reg_read((__ioc)->ioc_regs.hfn_mbox_cmd))
77
78bfa_boolean_t bfa_auto_recover = BFA_TRUE;
70 79
71/* 80/*
72 * forward declarations 81 * forward declarations
73 */ 82 */
74static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc); 83static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
75static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc); 84static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
76static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force); 85static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
77static void bfa_ioc_timeout(void *ioc); 86static void bfa_ioc_timeout(void *ioc);
78static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc); 87static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
79static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc); 88static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
80static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc); 89static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
81static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc); 90static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
82static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc); 91static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc);
83static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force); 92static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
84static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc); 93static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
85static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc); 94static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
86static void bfa_ioc_recover(struct bfa_ioc_s *ioc); 95static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
87static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc); 96static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
88static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); 97static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
89static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc); 98static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
99static void bfa_ioc_pf_enabled(struct bfa_ioc_s *ioc);
100static void bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc);
101static void bfa_ioc_pf_failed(struct bfa_ioc_s *ioc);
102static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
90 103
91/** 104/**
92 * bfa_ioc_sm 105 * hal_ioc_sm
93 */ 106 */
94 107
95/** 108/**
96 * IOC state machine events 109 * IOC state machine definitions/declarations
97 */ 110 */
98enum ioc_event { 111enum ioc_event {
99 IOC_E_ENABLE = 1, /* IOC enable request */ 112 IOC_E_RESET = 1, /* IOC reset request */
100 IOC_E_DISABLE = 2, /* IOC disable request */ 113 IOC_E_ENABLE = 2, /* IOC enable request */
101 IOC_E_TIMEOUT = 3, /* f/w response timeout */ 114 IOC_E_DISABLE = 3, /* IOC disable request */
102 IOC_E_FWREADY = 4, /* f/w initialization done */ 115 IOC_E_DETACH = 4, /* driver detach cleanup */
103 IOC_E_FWRSP_GETATTR = 5, /* IOC get attribute response */ 116 IOC_E_ENABLED = 5, /* f/w enabled */
104 IOC_E_FWRSP_ENABLE = 6, /* enable f/w response */ 117 IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
105 IOC_E_FWRSP_DISABLE = 7, /* disable f/w response */ 118 IOC_E_DISABLED = 7, /* f/w disabled */
106 IOC_E_HBFAIL = 8, /* heartbeat failure */ 119 IOC_E_FAILED = 8, /* failure notice by iocpf sm */
107 IOC_E_HWERROR = 9, /* hardware error interrupt */ 120 IOC_E_HBFAIL = 9, /* heartbeat failure */
108 IOC_E_SEMLOCKED = 10, /* h/w semaphore is locked */ 121 IOC_E_HWERROR = 10, /* hardware error interrupt */
109 IOC_E_DETACH = 11, /* driver detach cleanup */ 122 IOC_E_TIMEOUT = 11, /* timeout */
110}; 123};
111 124
125bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
112bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event); 126bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
113bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc_s, enum ioc_event);
114bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc_s, enum ioc_event);
115bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc_s, enum ioc_event);
116bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc_s, enum ioc_event);
117bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event); 127bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
118bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event); 128bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
119bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event); 129bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
120bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event); 130bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event);
121bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc_s, enum ioc_event); 131bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
122bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event); 132bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
123bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event); 133bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
124 134
125static struct bfa_sm_table_s ioc_sm_table[] = { 135static struct bfa_sm_table_s ioc_sm_table[] = {
136 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
126 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET}, 137 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
127 {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH}, 138 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
128 {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
129 {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
130 {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
131 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
132 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR}, 139 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
133 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL}, 140 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
134 {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL}, 141 {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
135 {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL}, 142 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
136 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, 143 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
137 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, 144 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
138}; 145};
139 146
140/** 147/**
148 * IOCPF state machine definitions/declarations
149 */
150
151#define bfa_iocpf_timer_start(__ioc) \
152 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
153 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
154#define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
155
156#define bfa_iocpf_recovery_timer_start(__ioc) \
157 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
158 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV_RECOVER)
159
160#define bfa_sem_timer_start(__ioc) \
161 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
162 bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
163#define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
164
165/*
166 * Forward declareations for iocpf state machine
167 */
168static void bfa_iocpf_enable(struct bfa_ioc_s *ioc);
169static void bfa_iocpf_disable(struct bfa_ioc_s *ioc);
170static void bfa_iocpf_fail(struct bfa_ioc_s *ioc);
171static void bfa_iocpf_initfail(struct bfa_ioc_s *ioc);
172static void bfa_iocpf_getattrfail(struct bfa_ioc_s *ioc);
173static void bfa_iocpf_stop(struct bfa_ioc_s *ioc);
174static void bfa_iocpf_timeout(void *ioc_arg);
175static void bfa_iocpf_sem_timeout(void *ioc_arg);
176
177/**
178 * IOCPF state machine events
179 */
180enum iocpf_event {
181 IOCPF_E_ENABLE = 1, /* IOCPF enable request */
182 IOCPF_E_DISABLE = 2, /* IOCPF disable request */
183 IOCPF_E_STOP = 3, /* stop on driver detach */
184 IOCPF_E_FWREADY = 4, /* f/w initialization done */
185 IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
186 IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
187 IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
188 IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
189 IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
190 IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
191 IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
192};
193
194/**
195 * IOCPF states
196 */
197enum bfa_iocpf_state {
198 BFA_IOCPF_RESET = 1, /* IOC is in reset state */
199 BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
200 BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
201 BFA_IOCPF_READY = 4, /* IOCPF is initialized */
202 BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
203 BFA_IOCPF_FAIL = 6, /* IOCPF failed */
204 BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
205 BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
206 BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
207};
208
209bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
210bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
211bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
212bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
213bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
214bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
215bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
216bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
217bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
218bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
219bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
220
221static struct bfa_sm_table_s iocpf_sm_table[] = {
222 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
223 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
224 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
225 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
226 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
227 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
228 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
229 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
230 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
231 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
232 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
233};
234
235/**
236 * IOC State Machine
237 */
238
239/**
240 * Beginning state. IOC uninit state.
241 */
242
243static void
244bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
245{
246}
247
248/**
249 * IOC is in uninit state.
250 */
251static void
252bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
253{
254 bfa_trc(ioc, event);
255
256 switch (event) {
257 case IOC_E_RESET:
258 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
259 break;
260
261 default:
262 bfa_sm_fault(ioc, event);
263 }
264}
265/**
141 * Reset entry actions -- initialize state machine 266 * Reset entry actions -- initialize state machine
142 */ 267 */
143static void 268static void
144bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc) 269bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
145{ 270{
146 ioc->retry_count = 0; 271 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
147 ioc->auto_recover = bfa_auto_recover;
148} 272}
149 273
150/** 274/**
151 * Beginning state. IOC is in reset state. 275 * IOC is in reset state.
152 */ 276 */
153static void 277static void
154bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event) 278bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
@@ -157,7 +281,7 @@ bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
157 281
158 switch (event) { 282 switch (event) {
159 case IOC_E_ENABLE: 283 case IOC_E_ENABLE:
160 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck); 284 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
161 break; 285 break;
162 286
163 case IOC_E_DISABLE: 287 case IOC_E_DISABLE:
@@ -165,6 +289,7 @@ bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
165 break; 289 break;
166 290
167 case IOC_E_DETACH: 291 case IOC_E_DETACH:
292 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
168 break; 293 break;
169 294
170 default: 295 default:
@@ -172,46 +297,209 @@ bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
172 } 297 }
173} 298}
174 299
300
301static void
302bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
303{
304 bfa_iocpf_enable(ioc);
305}
306
175/** 307/**
176 * Semaphore should be acquired for version check. 308 * Host IOC function is being enabled, awaiting response from firmware.
309 * Semaphore is acquired.
177 */ 310 */
178static void 311static void
179bfa_ioc_sm_fwcheck_entry(struct bfa_ioc_s *ioc) 312bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
180{ 313{
181 bfa_ioc_hw_sem_get(ioc); 314 bfa_trc(ioc, event);
315
316 switch (event) {
317 case IOC_E_ENABLED:
318 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
319 break;
320
321 case IOC_E_FAILED:
322 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
323 break;
324
325 case IOC_E_HWERROR:
326 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
327 bfa_iocpf_initfail(ioc);
328 break;
329
330 case IOC_E_DISABLE:
331 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
332 break;
333
334 case IOC_E_DETACH:
335 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
336 bfa_iocpf_stop(ioc);
337 break;
338
339 case IOC_E_ENABLE:
340 break;
341
342 default:
343 bfa_sm_fault(ioc, event);
344 }
345}
346
347
348static void
349bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
350{
351 bfa_ioc_timer_start(ioc);
352 bfa_ioc_send_getattr(ioc);
182} 353}
183 354
184/** 355/**
185 * Awaiting h/w semaphore to continue with version check. 356 * IOC configuration in progress. Timer is active.
186 */ 357 */
187static void 358static void
188bfa_ioc_sm_fwcheck(struct bfa_ioc_s *ioc, enum ioc_event event) 359bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
189{ 360{
190 bfa_trc(ioc, event); 361 bfa_trc(ioc, event);
191 362
192 switch (event) { 363 switch (event) {
193 case IOC_E_SEMLOCKED: 364 case IOC_E_FWRSP_GETATTR:
194 if (bfa_ioc_firmware_lock(ioc)) { 365 bfa_ioc_timer_stop(ioc);
195 ioc->retry_count = 0; 366 bfa_ioc_check_attr_wwns(ioc);
196 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); 367 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
197 } else { 368 break;
198 bfa_ioc_hw_sem_release(ioc); 369
199 bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch); 370 case IOC_E_FAILED:
200 } 371 bfa_ioc_timer_stop(ioc);
372 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
373 break;
374
375 case IOC_E_HWERROR:
376 bfa_ioc_timer_stop(ioc);
377 /* fall through */
378
379 case IOC_E_TIMEOUT:
380 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
381 bfa_iocpf_getattrfail(ioc);
201 break; 382 break;
202 383
203 case IOC_E_DISABLE: 384 case IOC_E_DISABLE:
204 bfa_ioc_disable_comp(ioc); 385 bfa_ioc_timer_stop(ioc);
386 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
387 break;
388
389 case IOC_E_ENABLE:
390 break;
391
392 default:
393 bfa_sm_fault(ioc, event);
394 }
395}
396
397
398static void
399bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
400{
401 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
402
403 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
404 bfa_ioc_hb_monitor(ioc);
405 BFA_LOG(KERN_INFO, bfad, log_level, "IOC enabled\n");
406}
407
408static void
409bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
410{
411 bfa_trc(ioc, event);
412
413 switch (event) {
414 case IOC_E_ENABLE:
415 break;
416
417 case IOC_E_DISABLE:
418 bfa_ioc_hb_stop(ioc);
419 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
420 break;
421
422 case IOC_E_FAILED:
423 bfa_ioc_hb_stop(ioc);
424 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
425 break;
426
427 case IOC_E_HWERROR:
428 bfa_ioc_hb_stop(ioc);
429 /* !!! fall through !!! */
430
431 case IOC_E_HBFAIL:
432 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
433 bfa_iocpf_fail(ioc);
434 break;
435
436 default:
437 bfa_sm_fault(ioc, event);
438 }
439}
440
441
442static void
443bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
444{
445 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
446 bfa_iocpf_disable(ioc);
447 BFA_LOG(KERN_INFO, bfad, log_level, "IOC disabled\n");
448}
449
450/**
451 * IOC is being disabled
452 */
453static void
454bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
455{
456 bfa_trc(ioc, event);
457
458 switch (event) {
459 case IOC_E_DISABLED:
460 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
461 break;
462
463 case IOC_E_HWERROR:
205 /* 464 /*
206 * fall through 465 * No state change. Will move to disabled state
466 * after iocpf sm completes failure processing and
467 * moves to disabled state.
207 */ 468 */
469 bfa_iocpf_fail(ioc);
470 break;
208 471
209 case IOC_E_DETACH: 472 default:
210 bfa_ioc_hw_sem_get_cancel(ioc); 473 bfa_sm_fault(ioc, event);
211 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 474 }
475}
476
477/**
478 * IOC disable completion entry.
479 */
480static void
481bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
482{
483 bfa_ioc_disable_comp(ioc);
484}
485
486static void
487bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
488{
489 bfa_trc(ioc, event);
490
491 switch (event) {
492 case IOC_E_ENABLE:
493 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
494 break;
495
496 case IOC_E_DISABLE:
497 ioc->cbfn->disable_cbfn(ioc->bfa);
212 break; 498 break;
213 499
214 case IOC_E_FWREADY: 500 case IOC_E_DETACH:
501 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
502 bfa_iocpf_stop(ioc);
215 break; 503 break;
216 504
217 default: 505 default:
@@ -219,48 +507,138 @@ bfa_ioc_sm_fwcheck(struct bfa_ioc_s *ioc, enum ioc_event event)
219 } 507 }
220} 508}
221 509
510
511static void
512bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc)
513{
514 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
515}
516
222/** 517/**
223 * Notify enable completion callback and generate mismatch AEN. 518 * Hardware initialization failed.
224 */ 519 */
225static void 520static void
226bfa_ioc_sm_mismatch_entry(struct bfa_ioc_s *ioc) 521bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
522{
523 bfa_trc(ioc, event);
524
525 switch (event) {
526 case IOC_E_ENABLED:
527 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
528 break;
529
530 case IOC_E_FAILED:
531 /**
532 * Initialization failure during iocpf init retry.
533 */
534 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
535 break;
536
537 case IOC_E_DISABLE:
538 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
539 break;
540
541 case IOC_E_DETACH:
542 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
543 bfa_iocpf_stop(ioc);
544 break;
545
546 default:
547 bfa_sm_fault(ioc, event);
548 }
549}
550
551
552static void
553bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
227{ 554{
555 struct list_head *qe;
556 struct bfa_ioc_hbfail_notify_s *notify;
557 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
558
228 /** 559 /**
229 * Provide enable completion callback and AEN notification only once. 560 * Notify driver and common modules registered for notification.
230 */ 561 */
231 if (ioc->retry_count == 0) { 562 ioc->cbfn->hbfail_cbfn(ioc->bfa);
232 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 563 list_for_each(qe, &ioc->hb_notify_q) {
233 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH); 564 notify = (struct bfa_ioc_hbfail_notify_s *) qe;
565 notify->cbfn(notify->cbarg);
234 } 566 }
235 ioc->retry_count++; 567
236 bfa_ioc_timer_start(ioc); 568 BFA_LOG(KERN_CRIT, bfad, log_level,
569 "Heart Beat of IOC has failed\n");
237} 570}
238 571
239/** 572/**
240 * Awaiting firmware version match. 573 * IOC failure.
241 */ 574 */
242static void 575static void
243bfa_ioc_sm_mismatch(struct bfa_ioc_s *ioc, enum ioc_event event) 576bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
244{ 577{
245 bfa_trc(ioc, event); 578 bfa_trc(ioc, event);
246 579
247 switch (event) { 580 switch (event) {
248 case IOC_E_TIMEOUT: 581
249 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck); 582 case IOC_E_FAILED:
583 /**
584 * Initialization failure during iocpf recovery.
585 * !!! Fall through !!!
586 */
587 case IOC_E_ENABLE:
588 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
589 break;
590
591 case IOC_E_ENABLED:
592 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
250 break; 593 break;
251 594
252 case IOC_E_DISABLE: 595 case IOC_E_DISABLE:
253 bfa_ioc_disable_comp(ioc); 596 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
597 break;
598
599 case IOC_E_HWERROR:
254 /* 600 /*
255 * fall through 601 * HB failure notification, ignore.
256 */ 602 */
603 break;
604 default:
605 bfa_sm_fault(ioc, event);
606 }
607}
257 608
258 case IOC_E_DETACH: 609
259 bfa_ioc_timer_stop(ioc); 610
260 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 611/**
612 * IOCPF State Machine
613 */
614
615
616/**
617 * Reset entry actions -- initialize state machine
618 */
619static void
620bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
621{
622 iocpf->retry_count = 0;
623 iocpf->auto_recover = bfa_auto_recover;
624}
625
626/**
627 * Beginning state. IOC is in reset state.
628 */
629static void
630bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
631{
632 struct bfa_ioc_s *ioc = iocpf->ioc;
633
634 bfa_trc(ioc, event);
635
636 switch (event) {
637 case IOCPF_E_ENABLE:
638 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
261 break; 639 break;
262 640
263 case IOC_E_FWREADY: 641 case IOCPF_E_STOP:
264 break; 642 break;
265 643
266 default: 644 default:
@@ -269,31 +647,44 @@ bfa_ioc_sm_mismatch(struct bfa_ioc_s *ioc, enum ioc_event event)
269} 647}
270 648
271/** 649/**
272 * Request for semaphore. 650 * Semaphore should be acquired for version check.
273 */ 651 */
274static void 652static void
275bfa_ioc_sm_semwait_entry(struct bfa_ioc_s *ioc) 653bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
276{ 654{
277 bfa_ioc_hw_sem_get(ioc); 655 bfa_ioc_hw_sem_get(iocpf->ioc);
278} 656}
279 657
280/** 658/**
281 * Awaiting semaphore for h/w initialzation. 659 * Awaiting h/w semaphore to continue with version check.
282 */ 660 */
283static void 661static void
284bfa_ioc_sm_semwait(struct bfa_ioc_s *ioc, enum ioc_event event) 662bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
285{ 663{
664 struct bfa_ioc_s *ioc = iocpf->ioc;
665
286 bfa_trc(ioc, event); 666 bfa_trc(ioc, event);
287 667
288 switch (event) { 668 switch (event) {
289 case IOC_E_SEMLOCKED: 669 case IOCPF_E_SEMLOCKED:
290 ioc->retry_count = 0; 670 if (bfa_ioc_firmware_lock(ioc)) {
291 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); 671 iocpf->retry_count = 0;
672 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
673 } else {
674 bfa_ioc_hw_sem_release(ioc);
675 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
676 }
292 break; 677 break;
293 678
294 case IOC_E_DISABLE: 679 case IOCPF_E_DISABLE:
295 bfa_ioc_hw_sem_get_cancel(ioc); 680 bfa_ioc_hw_sem_get_cancel(ioc);
296 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 681 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
682 bfa_ioc_pf_disabled(ioc);
683 break;
684
685 case IOCPF_E_STOP:
686 bfa_ioc_hw_sem_get_cancel(ioc);
687 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
297 break; 688 break;
298 689
299 default: 690 default:
@@ -301,51 +692,81 @@ bfa_ioc_sm_semwait(struct bfa_ioc_s *ioc, enum ioc_event event)
301 } 692 }
302} 693}
303 694
304 695/**
696 * Notify enable completion callback.
697 */
305static void 698static void
306bfa_ioc_sm_hwinit_entry(struct bfa_ioc_s *ioc) 699bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
307{ 700{
308 bfa_ioc_timer_start(ioc); 701 /*
309 bfa_ioc_reset(ioc, BFA_FALSE); 702 * Call only the first time sm enters fwmismatch state.
703 */
704 if (iocpf->retry_count == 0)
705 bfa_ioc_pf_fwmismatch(iocpf->ioc);
706
707 iocpf->retry_count++;
708 bfa_iocpf_timer_start(iocpf->ioc);
310} 709}
311 710
312/** 711/**
313 * Hardware is being initialized. Interrupts are enabled. 712 * Awaiting firmware version match.
314 * Holding hardware semaphore lock.
315 */ 713 */
316static void 714static void
317bfa_ioc_sm_hwinit(struct bfa_ioc_s *ioc, enum ioc_event event) 715bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
318{ 716{
717 struct bfa_ioc_s *ioc = iocpf->ioc;
718
319 bfa_trc(ioc, event); 719 bfa_trc(ioc, event);
320 720
321 switch (event) { 721 switch (event) {
322 case IOC_E_FWREADY: 722 case IOCPF_E_TIMEOUT:
323 bfa_ioc_timer_stop(ioc); 723 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
324 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
325 break; 724 break;
326 725
327 case IOC_E_HWERROR: 726 case IOCPF_E_DISABLE:
328 bfa_ioc_timer_stop(ioc); 727 bfa_iocpf_timer_stop(ioc);
329 /* 728 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
330 * fall through 729 bfa_ioc_pf_disabled(ioc);
331 */ 730 break;
332 731
333 case IOC_E_TIMEOUT: 732 case IOCPF_E_STOP:
334 ioc->retry_count++; 733 bfa_iocpf_timer_stop(ioc);
335 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) { 734 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
336 bfa_ioc_timer_start(ioc); 735 break;
337 bfa_ioc_reset(ioc, BFA_TRUE);
338 break;
339 }
340 736
341 bfa_ioc_hw_sem_release(ioc); 737 default:
342 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); 738 bfa_sm_fault(ioc, event);
739 }
740}
741
742/**
743 * Request for semaphore.
744 */
745static void
746bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
747{
748 bfa_ioc_hw_sem_get(iocpf->ioc);
749}
750
751/**
752 * Awaiting semaphore for h/w initialzation.
753 */
754static void
755bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
756{
757 struct bfa_ioc_s *ioc = iocpf->ioc;
758
759 bfa_trc(ioc, event);
760
761 switch (event) {
762 case IOCPF_E_SEMLOCKED:
763 iocpf->retry_count = 0;
764 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
343 break; 765 break;
344 766
345 case IOC_E_DISABLE: 767 case IOCPF_E_DISABLE:
346 bfa_ioc_hw_sem_release(ioc); 768 bfa_ioc_hw_sem_get_cancel(ioc);
347 bfa_ioc_timer_stop(ioc); 769 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
348 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
349 break; 770 break;
350 771
351 default: 772 default:
@@ -355,55 +776,54 @@ bfa_ioc_sm_hwinit(struct bfa_ioc_s *ioc, enum ioc_event event)
355 776
356 777
357static void 778static void
358bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc) 779bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
359{ 780{
360 bfa_ioc_timer_start(ioc); 781 bfa_iocpf_timer_start(iocpf->ioc);
361 bfa_ioc_send_enable(ioc); 782 bfa_ioc_reset(iocpf->ioc, BFA_FALSE);
362} 783}
363 784
364/** 785/**
365 * Host IOC function is being enabled, awaiting response from firmware. 786 * Hardware is being initialized. Interrupts are enabled.
366 * Semaphore is acquired. 787 * Holding hardware semaphore lock.
367 */ 788 */
368static void 789static void
369bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event) 790bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
370{ 791{
792 struct bfa_ioc_s *ioc = iocpf->ioc;
793
371 bfa_trc(ioc, event); 794 bfa_trc(ioc, event);
372 795
373 switch (event) { 796 switch (event) {
374 case IOC_E_FWRSP_ENABLE: 797 case IOCPF_E_FWREADY:
375 bfa_ioc_timer_stop(ioc); 798 bfa_iocpf_timer_stop(ioc);
376 bfa_ioc_hw_sem_release(ioc); 799 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
377 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
378 break; 800 break;
379 801
380 case IOC_E_HWERROR: 802 case IOCPF_E_INITFAIL:
381 bfa_ioc_timer_stop(ioc); 803 bfa_iocpf_timer_stop(ioc);
382 /* 804 /*
383 * fall through 805 * !!! fall through !!!
384 */ 806 */
385 807
386 case IOC_E_TIMEOUT: 808 case IOCPF_E_TIMEOUT:
387 ioc->retry_count++; 809 iocpf->retry_count++;
388 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) { 810 if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
389 bfa_reg_write(ioc->ioc_regs.ioc_fwstate, 811 bfa_iocpf_timer_start(ioc);
390 BFI_IOC_UNINIT); 812 bfa_ioc_reset(ioc, BFA_TRUE);
391 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
392 break; 813 break;
393 } 814 }
394 815
395 bfa_ioc_hw_sem_release(ioc); 816 bfa_ioc_hw_sem_release(ioc);
396 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); 817 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
397 break;
398 818
399 case IOC_E_DISABLE: 819 if (event == IOCPF_E_TIMEOUT)
400 bfa_ioc_timer_stop(ioc); 820 bfa_ioc_pf_failed(ioc);
401 bfa_ioc_hw_sem_release(ioc);
402 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
403 break; 821 break;
404 822
405 case IOC_E_FWREADY: 823 case IOCPF_E_DISABLE:
406 bfa_ioc_send_enable(ioc); 824 bfa_ioc_hw_sem_release(ioc);
825 bfa_iocpf_timer_stop(ioc);
826 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
407 break; 827 break;
408 828
409 default: 829 default:
@@ -413,40 +833,60 @@ bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
413 833
414 834
415static void 835static void
416bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc) 836bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
417{ 837{
418 bfa_ioc_timer_start(ioc); 838 bfa_iocpf_timer_start(iocpf->ioc);
419 bfa_ioc_send_getattr(ioc); 839 bfa_ioc_send_enable(iocpf->ioc);
420} 840}
421 841
422/** 842/**
423 * IOC configuration in progress. Timer is active. 843 * Host IOC function is being enabled, awaiting response from firmware.
844 * Semaphore is acquired.
424 */ 845 */
425static void 846static void
426bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event) 847bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
427{ 848{
849 struct bfa_ioc_s *ioc = iocpf->ioc;
850
428 bfa_trc(ioc, event); 851 bfa_trc(ioc, event);
429 852
430 switch (event) { 853 switch (event) {
431 case IOC_E_FWRSP_GETATTR: 854 case IOCPF_E_FWRSP_ENABLE:
432 bfa_ioc_timer_stop(ioc); 855 bfa_iocpf_timer_stop(ioc);
433 bfa_ioc_check_attr_wwns(ioc); 856 bfa_ioc_hw_sem_release(ioc);
434 bfa_fsm_set_state(ioc, bfa_ioc_sm_op); 857 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
435 break; 858 break;
436 859
437 case IOC_E_HWERROR: 860 case IOCPF_E_INITFAIL:
438 bfa_ioc_timer_stop(ioc); 861 bfa_iocpf_timer_stop(ioc);
439 /* 862 /*
440 * fall through 863 * !!! fall through !!!
441 */ 864 */
442 865
443 case IOC_E_TIMEOUT: 866 case IOCPF_E_TIMEOUT:
444 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); 867 iocpf->retry_count++;
868 if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
869 bfa_reg_write(ioc->ioc_regs.ioc_fwstate,
870 BFI_IOC_UNINIT);
871 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
872 break;
873 }
874
875 bfa_ioc_hw_sem_release(ioc);
876 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
877
878 if (event == IOCPF_E_TIMEOUT)
879 bfa_ioc_pf_failed(ioc);
445 break; 880 break;
446 881
447 case IOC_E_DISABLE: 882 case IOCPF_E_DISABLE:
448 bfa_ioc_timer_stop(ioc); 883 bfa_iocpf_timer_stop(ioc);
449 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 884 bfa_ioc_hw_sem_release(ioc);
885 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
886 break;
887
888 case IOCPF_E_FWREADY:
889 bfa_ioc_send_enable(ioc);
450 break; 890 break;
451 891
452 default: 892 default:
@@ -455,41 +895,40 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
455} 895}
456 896
457 897
898
458static void 899static void
459bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc) 900bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
460{ 901{
461 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); 902 bfa_ioc_pf_enabled(iocpf->ioc);
462 bfa_ioc_hb_monitor(ioc);
463 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
464} 903}
465 904
466static void 905static void
467bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event) 906bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
468{ 907{
908 struct bfa_ioc_s *ioc = iocpf->ioc;
909
469 bfa_trc(ioc, event); 910 bfa_trc(ioc, event);
470 911
471 switch (event) { 912 switch (event) {
472 case IOC_E_ENABLE: 913 case IOCPF_E_DISABLE:
914 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
473 break; 915 break;
474 916
475 case IOC_E_DISABLE: 917 case IOCPF_E_GETATTRFAIL:
476 bfa_ioc_hb_stop(ioc); 918 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
477 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
478 break; 919 break;
479 920
480 case IOC_E_HWERROR: 921 case IOCPF_E_FAIL:
481 case IOC_E_FWREADY: 922 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
482 /** 923 break;
483 * Hard error or IOC recovery by other function.
484 * Treat it same as heartbeat failure.
485 */
486 bfa_ioc_hb_stop(ioc);
487 /*
488 * !!! fall through !!!
489 */
490 924
491 case IOC_E_HBFAIL: 925 case IOCPF_E_FWREADY:
492 bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail); 926 if (bfa_ioc_is_operational(ioc))
927 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
928 else
929 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
930
931 bfa_ioc_pf_failed(ioc);
493 break; 932 break;
494 933
495 default: 934 default:
@@ -499,36 +938,41 @@ bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
499 938
500 939
501static void 940static void
502bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc) 941bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
503{ 942{
504 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE); 943 bfa_iocpf_timer_start(iocpf->ioc);
505 bfa_ioc_timer_start(ioc); 944 bfa_ioc_send_disable(iocpf->ioc);
506 bfa_ioc_send_disable(ioc);
507} 945}
508 946
509/** 947/**
510 * IOC is being disabled 948 * IOC is being disabled
511 */ 949 */
512static void 950static void
513bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event) 951bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
514{ 952{
953 struct bfa_ioc_s *ioc = iocpf->ioc;
954
515 bfa_trc(ioc, event); 955 bfa_trc(ioc, event);
516 956
517 switch (event) { 957 switch (event) {
518 case IOC_E_FWRSP_DISABLE: 958 case IOCPF_E_FWRSP_DISABLE:
519 bfa_ioc_timer_stop(ioc); 959 case IOCPF_E_FWREADY:
520 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 960 bfa_iocpf_timer_stop(ioc);
961 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
521 break; 962 break;
522 963
523 case IOC_E_HWERROR: 964 case IOCPF_E_FAIL:
524 bfa_ioc_timer_stop(ioc); 965 bfa_iocpf_timer_stop(ioc);
525 /* 966 /*
526 * !!! fall through !!! 967 * !!! fall through !!!
527 */ 968 */
528 969
529 case IOC_E_TIMEOUT: 970 case IOCPF_E_TIMEOUT:
530 bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL); 971 bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
531 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 972 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
973 break;
974
975 case IOCPF_E_FWRSP_ENABLE:
532 break; 976 break;
533 977
534 default: 978 default:
@@ -540,31 +984,26 @@ bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
540 * IOC disable completion entry. 984 * IOC disable completion entry.
541 */ 985 */
542static void 986static void
543bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc) 987bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
544{ 988{
545 bfa_ioc_disable_comp(ioc); 989 bfa_ioc_pf_disabled(iocpf->ioc);
546} 990}
547 991
548static void 992static void
549bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event) 993bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
550{ 994{
995 struct bfa_ioc_s *ioc = iocpf->ioc;
996
551 bfa_trc(ioc, event); 997 bfa_trc(ioc, event);
552 998
553 switch (event) { 999 switch (event) {
554 case IOC_E_ENABLE: 1000 case IOCPF_E_ENABLE:
555 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); 1001 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
556 break; 1002 break;
557 1003
558 case IOC_E_DISABLE: 1004 case IOCPF_E_STOP:
559 ioc->cbfn->disable_cbfn(ioc->bfa);
560 break;
561
562 case IOC_E_FWREADY:
563 break;
564
565 case IOC_E_DETACH:
566 bfa_ioc_firmware_unlock(ioc); 1005 bfa_ioc_firmware_unlock(ioc);
567 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 1006 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
568 break; 1007 break;
569 1008
570 default: 1009 default:
@@ -574,34 +1013,35 @@ bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
574 1013
575 1014
576static void 1015static void
577bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc) 1016bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
578{ 1017{
579 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 1018 bfa_iocpf_timer_start(iocpf->ioc);
580 bfa_ioc_timer_start(ioc);
581} 1019}
582 1020
583/** 1021/**
584 * Hardware initialization failed. 1022 * Hardware initialization failed.
585 */ 1023 */
586static void 1024static void
587bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event) 1025bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
588{ 1026{
1027 struct bfa_ioc_s *ioc = iocpf->ioc;
1028
589 bfa_trc(ioc, event); 1029 bfa_trc(ioc, event);
590 1030
591 switch (event) { 1031 switch (event) {
592 case IOC_E_DISABLE: 1032 case IOCPF_E_DISABLE:
593 bfa_ioc_timer_stop(ioc); 1033 bfa_iocpf_timer_stop(ioc);
594 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 1034 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
595 break; 1035 break;
596 1036
597 case IOC_E_DETACH: 1037 case IOCPF_E_STOP:
598 bfa_ioc_timer_stop(ioc); 1038 bfa_iocpf_timer_stop(ioc);
599 bfa_ioc_firmware_unlock(ioc); 1039 bfa_ioc_firmware_unlock(ioc);
600 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 1040 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
601 break; 1041 break;
602 1042
603 case IOC_E_TIMEOUT: 1043 case IOCPF_E_TIMEOUT:
604 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); 1044 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
605 break; 1045 break;
606 1046
607 default: 1047 default:
@@ -611,80 +1051,47 @@ bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
611 1051
612 1052
613static void 1053static void
614bfa_ioc_sm_hbfail_entry(struct bfa_ioc_s *ioc) 1054bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
615{ 1055{
616 struct list_head *qe;
617 struct bfa_ioc_hbfail_notify_s *notify;
618
619 /** 1056 /**
620 * Mark IOC as failed in hardware and stop firmware. 1057 * Mark IOC as failed in hardware and stop firmware.
621 */ 1058 */
622 bfa_ioc_lpu_stop(ioc); 1059 bfa_ioc_lpu_stop(iocpf->ioc);
623 bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL); 1060 bfa_reg_write(iocpf->ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
624 1061
625 /** 1062 /**
626 * Notify other functions on HB failure. 1063 * Notify other functions on HB failure.
627 */ 1064 */
628 bfa_ioc_notify_hbfail(ioc); 1065 bfa_ioc_notify_hbfail(iocpf->ioc);
629
630 /**
631 * Notify driver and common modules registered for notification.
632 */
633 ioc->cbfn->hbfail_cbfn(ioc->bfa);
634 list_for_each(qe, &ioc->hb_notify_q) {
635 notify = (struct bfa_ioc_hbfail_notify_s *)qe;
636 notify->cbfn(notify->cbarg);
637 }
638 1066
639 /** 1067 /**
640 * Flush any queued up mailbox requests. 1068 * Flush any queued up mailbox requests.
641 */ 1069 */
642 bfa_ioc_mbox_hbfail(ioc); 1070 bfa_ioc_mbox_hbfail(iocpf->ioc);
643 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
644 1071
645 /** 1072 if (iocpf->auto_recover)
646 * Trigger auto-recovery after a delay. 1073 bfa_iocpf_recovery_timer_start(iocpf->ioc);
647 */
648 if (ioc->auto_recover) {
649 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer,
650 bfa_ioc_timeout, ioc, BFA_IOC_TOV_RECOVER);
651 }
652} 1074}
653 1075
654/** 1076/**
655 * IOC heartbeat failure. 1077 * IOC is in failed state.
656 */ 1078 */
657static void 1079static void
658bfa_ioc_sm_hbfail(struct bfa_ioc_s *ioc, enum ioc_event event) 1080bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
659{ 1081{
1082 struct bfa_ioc_s *ioc = iocpf->ioc;
1083
660 bfa_trc(ioc, event); 1084 bfa_trc(ioc, event);
661 1085
662 switch (event) { 1086 switch (event) {
663 1087 case IOCPF_E_DISABLE:
664 case IOC_E_ENABLE: 1088 if (iocpf->auto_recover)
665 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 1089 bfa_iocpf_timer_stop(ioc);
666 break; 1090 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
667
668 case IOC_E_DISABLE:
669 if (ioc->auto_recover)
670 bfa_ioc_timer_stop(ioc);
671 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
672 break; 1091 break;
673 1092
674 case IOC_E_TIMEOUT: 1093 case IOCPF_E_TIMEOUT:
675 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); 1094 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
676 break;
677
678 case IOC_E_FWREADY:
679 /**
680 * Recovery is already initiated by other function.
681 */
682 break;
683
684 case IOC_E_HWERROR:
685 /*
686 * HB failure notification, ignore.
687 */
688 break; 1095 break;
689 1096
690 default: 1097 default:
@@ -695,14 +1102,14 @@ bfa_ioc_sm_hbfail(struct bfa_ioc_s *ioc, enum ioc_event event)
695 1102
696 1103
697/** 1104/**
698 * bfa_ioc_pvt BFA IOC private functions 1105 * hal_ioc_pvt BFA IOC private functions
699 */ 1106 */
700 1107
701static void 1108static void
702bfa_ioc_disable_comp(struct bfa_ioc_s *ioc) 1109bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
703{ 1110{
704 struct list_head *qe; 1111 struct list_head *qe;
705 struct bfa_ioc_hbfail_notify_s *notify; 1112 struct bfa_ioc_hbfail_notify_s *notify;
706 1113
707 ioc->cbfn->disable_cbfn(ioc->bfa); 1114 ioc->cbfn->disable_cbfn(ioc->bfa);
708 1115
@@ -710,25 +1117,17 @@ bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
710 * Notify common modules registered for notification. 1117 * Notify common modules registered for notification.
711 */ 1118 */
712 list_for_each(qe, &ioc->hb_notify_q) { 1119 list_for_each(qe, &ioc->hb_notify_q) {
713 notify = (struct bfa_ioc_hbfail_notify_s *)qe; 1120 notify = (struct bfa_ioc_hbfail_notify_s *) qe;
714 notify->cbfn(notify->cbarg); 1121 notify->cbfn(notify->cbarg);
715 } 1122 }
716} 1123}
717 1124
718void
719bfa_ioc_sem_timeout(void *ioc_arg)
720{
721 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
722
723 bfa_ioc_hw_sem_get(ioc);
724}
725
726bfa_boolean_t 1125bfa_boolean_t
727bfa_ioc_sem_get(bfa_os_addr_t sem_reg) 1126bfa_ioc_sem_get(bfa_os_addr_t sem_reg)
728{ 1127{
729 u32 r32; 1128 u32 r32;
730 int cnt = 0; 1129 int cnt = 0;
731#define BFA_SEM_SPINCNT 3000 1130#define BFA_SEM_SPINCNT 3000
732 1131
733 r32 = bfa_reg_read(sem_reg); 1132 r32 = bfa_reg_read(sem_reg);
734 1133
@@ -754,7 +1153,7 @@ bfa_ioc_sem_release(bfa_os_addr_t sem_reg)
754static void 1153static void
755bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc) 1154bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
756{ 1155{
757 u32 r32; 1156 u32 r32;
758 1157
759 /** 1158 /**
760 * First read to the semaphore register will return 0, subsequent reads 1159 * First read to the semaphore register will return 0, subsequent reads
@@ -762,12 +1161,11 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
762 */ 1161 */
763 r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); 1162 r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
764 if (r32 == 0) { 1163 if (r32 == 0) {
765 bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED); 1164 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
766 return; 1165 return;
767 } 1166 }
768 1167
769 bfa_timer_begin(ioc->timer_mod, &ioc->sem_timer, bfa_ioc_sem_timeout, 1168 bfa_sem_timer_start(ioc);
770 ioc, BFA_IOC_HWSEM_TOV);
771} 1169}
772 1170
773void 1171void
@@ -779,7 +1177,7 @@ bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc)
779static void 1177static void
780bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc) 1178bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc)
781{ 1179{
782 bfa_timer_stop(&ioc->sem_timer); 1180 bfa_sem_timer_stop(ioc);
783} 1181}
784 1182
785/** 1183/**
@@ -788,14 +1186,18 @@ bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc)
788static void 1186static void
789bfa_ioc_lmem_init(struct bfa_ioc_s *ioc) 1187bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
790{ 1188{
791 u32 pss_ctl; 1189 u32 pss_ctl;
792 int i; 1190 int i;
793#define PSS_LMEM_INIT_TIME 10000 1191#define PSS_LMEM_INIT_TIME 10000
794 1192
795 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg); 1193 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
796 pss_ctl &= ~__PSS_LMEM_RESET; 1194 pss_ctl &= ~__PSS_LMEM_RESET;
797 pss_ctl |= __PSS_LMEM_INIT_EN; 1195 pss_ctl |= __PSS_LMEM_INIT_EN;
798 pss_ctl |= __PSS_I2C_CLK_DIV(3UL); /* i2c workaround 12.5khz clock */ 1196
1197 /*
1198 * i2c workaround 12.5khz clock
1199 */
1200 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
799 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl); 1201 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
800 1202
801 /** 1203 /**
@@ -821,7 +1223,7 @@ bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
821static void 1223static void
822bfa_ioc_lpu_start(struct bfa_ioc_s *ioc) 1224bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
823{ 1225{
824 u32 pss_ctl; 1226 u32 pss_ctl;
825 1227
826 /** 1228 /**
827 * Take processor out of reset. 1229 * Take processor out of reset.
@@ -835,7 +1237,7 @@ bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
835static void 1237static void
836bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc) 1238bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
837{ 1239{
838 u32 pss_ctl; 1240 u32 pss_ctl;
839 1241
840 /** 1242 /**
841 * Put processors in reset. 1243 * Put processors in reset.
@@ -852,10 +1254,10 @@ bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
852void 1254void
853bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) 1255bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
854{ 1256{
855 u32 pgnum, pgoff; 1257 u32 pgnum, pgoff;
856 u32 loff = 0; 1258 u32 loff = 0;
857 int i; 1259 int i;
858 u32 *fwsig = (u32 *) fwhdr; 1260 u32 *fwsig = (u32 *) fwhdr;
859 1261
860 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1262 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
861 pgoff = bfa_ioc_smem_pgoff(ioc, loff); 1263 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
@@ -863,7 +1265,8 @@ bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
863 1265
864 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32)); 1266 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
865 i++) { 1267 i++) {
866 fwsig[i] = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); 1268 fwsig[i] =
1269 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
867 loff += sizeof(u32); 1270 loff += sizeof(u32);
868 } 1271 }
869} 1272}
@@ -875,10 +1278,10 @@ bfa_boolean_t
875bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) 1278bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
876{ 1279{
877 struct bfi_ioc_image_hdr_s *drv_fwhdr; 1280 struct bfi_ioc_image_hdr_s *drv_fwhdr;
878 int i; 1281 int i;
879 1282
880 drv_fwhdr = (struct bfi_ioc_image_hdr_s *) 1283 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
881 bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); 1284 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
882 1285
883 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { 1286 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
884 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) { 1287 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
@@ -897,21 +1300,20 @@ bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
897 * Return true if current running version is valid. Firmware signature and 1300 * Return true if current running version is valid. Firmware signature and
898 * execution context (driver/bios) must match. 1301 * execution context (driver/bios) must match.
899 */ 1302 */
900static bfa_boolean_t 1303static bfa_boolean_t
901bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc) 1304bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
902{ 1305{
903 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr; 1306 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
904 1307
905 /** 1308 /**
906 * If bios/efi boot (flash based) -- return true 1309 * If bios/efi boot (flash based) -- return true
907 */ 1310 */
908 if (bfa_ioc_is_optrom(ioc)) 1311 if (bfa_ioc_is_bios_optrom(ioc))
909 return BFA_TRUE; 1312 return BFA_TRUE;
910 1313
911 bfa_ioc_fwver_get(ioc, &fwhdr); 1314 bfa_ioc_fwver_get(ioc, &fwhdr);
912 drv_fwhdr = (struct bfi_ioc_image_hdr_s *) 1315 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
913 bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); 1316 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
914
915 1317
916 if (fwhdr.signature != drv_fwhdr->signature) { 1318 if (fwhdr.signature != drv_fwhdr->signature) {
917 bfa_trc(ioc, fwhdr.signature); 1319 bfa_trc(ioc, fwhdr.signature);
@@ -919,9 +1321,9 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc)
919 return BFA_FALSE; 1321 return BFA_FALSE;
920 } 1322 }
921 1323
922 if (fwhdr.exec != drv_fwhdr->exec) { 1324 if (bfa_os_swap32(fwhdr.param) != boot_env) {
923 bfa_trc(ioc, fwhdr.exec); 1325 bfa_trc(ioc, fwhdr.param);
924 bfa_trc(ioc, drv_fwhdr->exec); 1326 bfa_trc(ioc, boot_env);
925 return BFA_FALSE; 1327 return BFA_FALSE;
926 } 1328 }
927 1329
@@ -934,7 +1336,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc)
934static void 1336static void
935bfa_ioc_msgflush(struct bfa_ioc_s *ioc) 1337bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
936{ 1338{
937 u32 r32; 1339 u32 r32;
938 1340
939 r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd); 1341 r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
940 if (r32) 1342 if (r32)
@@ -946,7 +1348,9 @@ static void
946bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) 1348bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
947{ 1349{
948 enum bfi_ioc_state ioc_fwstate; 1350 enum bfi_ioc_state ioc_fwstate;
949 bfa_boolean_t fwvalid; 1351 bfa_boolean_t fwvalid;
1352 u32 boot_type;
1353 u32 boot_env;
950 1354
951 ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate); 1355 ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
952 1356
@@ -955,14 +1359,33 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
955 1359
956 bfa_trc(ioc, ioc_fwstate); 1360 bfa_trc(ioc, ioc_fwstate);
957 1361
1362 boot_type = BFI_BOOT_TYPE_NORMAL;
1363 boot_env = BFI_BOOT_LOADER_OS;
1364
1365 /**
1366 * Flash based firmware boot BIOS env.
1367 */
1368 if (bfa_ioc_is_bios_optrom(ioc)) {
1369 boot_type = BFI_BOOT_TYPE_FLASH;
1370 boot_env = BFI_BOOT_LOADER_BIOS;
1371 }
1372
1373 /**
1374 * Flash based firmware boot UEFI env.
1375 */
1376 if (bfa_ioc_is_uefi(ioc)) {
1377 boot_type = BFI_BOOT_TYPE_FLASH;
1378 boot_env = BFI_BOOT_LOADER_UEFI;
1379 }
1380
958 /** 1381 /**
959 * check if firmware is valid 1382 * check if firmware is valid
960 */ 1383 */
961 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? 1384 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
962 BFA_FALSE : bfa_ioc_fwver_valid(ioc); 1385 BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
963 1386
964 if (!fwvalid) { 1387 if (!fwvalid) {
965 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id); 1388 bfa_ioc_boot(ioc, boot_type, boot_env);
966 return; 1389 return;
967 } 1390 }
968 1391
@@ -971,7 +1394,6 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
971 * just wait for an initialization completion interrupt. 1394 * just wait for an initialization completion interrupt.
972 */ 1395 */
973 if (ioc_fwstate == BFI_IOC_INITING) { 1396 if (ioc_fwstate == BFI_IOC_INITING) {
974 bfa_trc(ioc, ioc_fwstate);
975 ioc->cbfn->reset_cbfn(ioc->bfa); 1397 ioc->cbfn->reset_cbfn(ioc->bfa);
976 return; 1398 return;
977 } 1399 }
@@ -985,8 +1407,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
985 * is loaded. 1407 * is loaded.
986 */ 1408 */
987 if (ioc_fwstate == BFI_IOC_DISABLED || 1409 if (ioc_fwstate == BFI_IOC_DISABLED ||
988 (!bfa_ioc_is_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) { 1410 (!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
989 bfa_trc(ioc, ioc_fwstate);
990 1411
991 /** 1412 /**
992 * When using MSI-X any pending firmware ready event should 1413 * When using MSI-X any pending firmware ready event should
@@ -994,20 +1415,20 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
994 */ 1415 */
995 bfa_ioc_msgflush(ioc); 1416 bfa_ioc_msgflush(ioc);
996 ioc->cbfn->reset_cbfn(ioc->bfa); 1417 ioc->cbfn->reset_cbfn(ioc->bfa);
997 bfa_fsm_send_event(ioc, IOC_E_FWREADY); 1418 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
998 return; 1419 return;
999 } 1420 }
1000 1421
1001 /** 1422 /**
1002 * Initialize the h/w for any other states. 1423 * Initialize the h/w for any other states.
1003 */ 1424 */
1004 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id); 1425 bfa_ioc_boot(ioc, boot_type, boot_env);
1005} 1426}
1006 1427
1007static void 1428static void
1008bfa_ioc_timeout(void *ioc_arg) 1429bfa_ioc_timeout(void *ioc_arg)
1009{ 1430{
1010 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg; 1431 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
1011 1432
1012 bfa_trc(ioc, 0); 1433 bfa_trc(ioc, 0);
1013 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT); 1434 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
@@ -1016,8 +1437,8 @@ bfa_ioc_timeout(void *ioc_arg)
1016void 1437void
1017bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len) 1438bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1018{ 1439{
1019 u32 *msgp = (u32 *) ioc_msg; 1440 u32 *msgp = (u32 *) ioc_msg;
1020 u32 i; 1441 u32 i;
1021 1442
1022 bfa_trc(ioc, msgp[0]); 1443 bfa_trc(ioc, msgp[0]);
1023 bfa_trc(ioc, len); 1444 bfa_trc(ioc, len);
@@ -1038,17 +1459,20 @@ bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1038 * write 1 to mailbox CMD to trigger LPU event 1459 * write 1 to mailbox CMD to trigger LPU event
1039 */ 1460 */
1040 bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1); 1461 bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1);
1041 (void)bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd); 1462 (void) bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
1042} 1463}
1043 1464
1044static void 1465static void
1045bfa_ioc_send_enable(struct bfa_ioc_s *ioc) 1466bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1046{ 1467{
1047 struct bfi_ioc_ctrl_req_s enable_req; 1468 struct bfi_ioc_ctrl_req_s enable_req;
1469 struct bfa_timeval_s tv;
1048 1470
1049 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, 1471 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1050 bfa_ioc_portid(ioc)); 1472 bfa_ioc_portid(ioc));
1051 enable_req.ioc_class = ioc->ioc_mc; 1473 enable_req.ioc_class = ioc->ioc_mc;
1474 bfa_os_gettimeofday(&tv);
1475 enable_req.tv_sec = bfa_os_ntohl(tv.tv_sec);
1052 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s)); 1476 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1053} 1477}
1054 1478
@@ -1065,7 +1489,7 @@ bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1065static void 1489static void
1066bfa_ioc_send_getattr(struct bfa_ioc_s *ioc) 1490bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1067{ 1491{
1068 struct bfi_ioc_getattr_req_s attr_req; 1492 struct bfi_ioc_getattr_req_s attr_req;
1069 1493
1070 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ, 1494 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1071 bfa_ioc_portid(ioc)); 1495 bfa_ioc_portid(ioc));
@@ -1077,12 +1501,11 @@ static void
1077bfa_ioc_hb_check(void *cbarg) 1501bfa_ioc_hb_check(void *cbarg)
1078{ 1502{
1079 struct bfa_ioc_s *ioc = cbarg; 1503 struct bfa_ioc_s *ioc = cbarg;
1080 u32 hb_count; 1504 u32 hb_count;
1081 1505
1082 hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); 1506 hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
1083 if (ioc->hb_count == hb_count) { 1507 if (ioc->hb_count == hb_count) {
1084 bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE, 1508 printk(KERN_CRIT "Firmware heartbeat failure at %d", hb_count);
1085 hb_count);
1086 bfa_ioc_recover(ioc); 1509 bfa_ioc_recover(ioc);
1087 return; 1510 return;
1088 } else { 1511 } else {
@@ -1090,61 +1513,54 @@ bfa_ioc_hb_check(void *cbarg)
1090 } 1513 }
1091 1514
1092 bfa_ioc_mbox_poll(ioc); 1515 bfa_ioc_mbox_poll(ioc);
1093 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, 1516 bfa_hb_timer_start(ioc);
1094 ioc, BFA_IOC_HB_TOV);
1095} 1517}
1096 1518
1097static void 1519static void
1098bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc) 1520bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1099{ 1521{
1100 ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); 1522 ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
1101 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc, 1523 bfa_hb_timer_start(ioc);
1102 BFA_IOC_HB_TOV);
1103} 1524}
1104 1525
1105static void 1526static void
1106bfa_ioc_hb_stop(struct bfa_ioc_s *ioc) 1527bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
1107{ 1528{
1108 bfa_timer_stop(&ioc->ioc_timer); 1529 bfa_hb_timer_stop(ioc);
1109} 1530}
1110 1531
1532
1111/** 1533/**
1112 * Initiate a full firmware download. 1534 * Initiate a full firmware download.
1113 */ 1535 */
1114static void 1536static void
1115bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, 1537bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1116 u32 boot_param) 1538 u32 boot_env)
1117{ 1539{
1118 u32 *fwimg; 1540 u32 *fwimg;
1119 u32 pgnum, pgoff; 1541 u32 pgnum, pgoff;
1120 u32 loff = 0; 1542 u32 loff = 0;
1121 u32 chunkno = 0; 1543 u32 chunkno = 0;
1122 u32 i; 1544 u32 i;
1123 1545
1124 /** 1546 /**
1125 * Initialize LMEM first before code download 1547 * Initialize LMEM first before code download
1126 */ 1548 */
1127 bfa_ioc_lmem_init(ioc); 1549 bfa_ioc_lmem_init(ioc);
1128 1550
1129 /** 1551 bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
1130 * Flash based firmware boot 1552 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1131 */
1132 bfa_trc(ioc, bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
1133 if (bfa_ioc_is_optrom(ioc))
1134 boot_type = BFI_BOOT_TYPE_FLASH;
1135 fwimg = bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1136
1137 1553
1138 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1554 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1139 pgoff = bfa_ioc_smem_pgoff(ioc, loff); 1555 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1140 1556
1141 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); 1557 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1142 1558
1143 for (i = 0; i < bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) { 1559 for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1144 1560
1145 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { 1561 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1146 chunkno = BFA_IOC_FLASH_CHUNK_NO(i); 1562 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1147 fwimg = bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 1563 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
1148 BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); 1564 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1149 } 1565 }
1150 1566
@@ -1162,7 +1578,8 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1162 loff = PSS_SMEM_PGOFF(loff); 1578 loff = PSS_SMEM_PGOFF(loff);
1163 if (loff == 0) { 1579 if (loff == 0) {
1164 pgnum++; 1580 pgnum++;
1165 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); 1581 bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
1582 pgnum);
1166 } 1583 }
1167 } 1584 }
1168 1585
@@ -1171,11 +1588,11 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1171 1588
1172 /* 1589 /*
1173 * Set boot type and boot param at the end. 1590 * Set boot type and boot param at the end.
1174 */ 1591 */
1175 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF, 1592 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
1176 bfa_os_swap32(boot_type)); 1593 bfa_os_swap32(boot_type));
1177 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_PARAM_OFF, 1594 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF,
1178 bfa_os_swap32(boot_param)); 1595 bfa_os_swap32(boot_env));
1179} 1596}
1180 1597
1181static void 1598static void
@@ -1190,11 +1607,11 @@ bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1190static void 1607static void
1191bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc) 1608bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1192{ 1609{
1193 struct bfi_ioc_attr_s *attr = ioc->attr; 1610 struct bfi_ioc_attr_s *attr = ioc->attr;
1194 1611
1195 attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop); 1612 attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop);
1196 attr->card_type = bfa_os_ntohl(attr->card_type); 1613 attr->card_type = bfa_os_ntohl(attr->card_type);
1197 attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize); 1614 attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize);
1198 1615
1199 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); 1616 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1200} 1617}
@@ -1205,8 +1622,8 @@ bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1205static void 1622static void
1206bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc) 1623bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1207{ 1624{
1208 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; 1625 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1209 int mc; 1626 int mc;
1210 1627
1211 INIT_LIST_HEAD(&mod->cmd_q); 1628 INIT_LIST_HEAD(&mod->cmd_q);
1212 for (mc = 0; mc < BFI_MC_MAX; mc++) { 1629 for (mc = 0; mc < BFI_MC_MAX; mc++) {
@@ -1221,9 +1638,9 @@ bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1221static void 1638static void
1222bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc) 1639bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1223{ 1640{
1224 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; 1641 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1225 struct bfa_mbox_cmd_s *cmd; 1642 struct bfa_mbox_cmd_s *cmd;
1226 u32 stat; 1643 u32 stat;
1227 1644
1228 /** 1645 /**
1229 * If no command pending, do nothing 1646 * If no command pending, do nothing
@@ -1251,25 +1668,194 @@ bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1251static void 1668static void
1252bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc) 1669bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
1253{ 1670{
1254 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; 1671 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1255 struct bfa_mbox_cmd_s *cmd; 1672 struct bfa_mbox_cmd_s *cmd;
1256 1673
1257 while (!list_empty(&mod->cmd_q)) 1674 while (!list_empty(&mod->cmd_q))
1258 bfa_q_deq(&mod->cmd_q, &cmd); 1675 bfa_q_deq(&mod->cmd_q, &cmd);
1259} 1676}
1260 1677
1261/** 1678/**
1262 * bfa_ioc_public 1679 * Read data from SMEM to host through PCI memmap
1680 *
1681 * @param[in] ioc memory for IOC
1682 * @param[in] tbuf app memory to store data from smem
1683 * @param[in] soff smem offset
1684 * @param[in] sz size of smem in bytes
1685 */
1686static bfa_status_t
1687bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1688{
1689 u32 pgnum, loff, r32;
1690 int i, len;
1691 u32 *buf = tbuf;
1692
1693 pgnum = bfa_ioc_smem_pgnum(ioc, soff);
1694 loff = bfa_ioc_smem_pgoff(ioc, soff);
1695 bfa_trc(ioc, pgnum);
1696 bfa_trc(ioc, loff);
1697 bfa_trc(ioc, sz);
1698
1699 /*
1700 * Hold semaphore to serialize pll init and fwtrc.
1701 */
1702 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1703 bfa_trc(ioc, 0);
1704 return BFA_STATUS_FAILED;
1705 }
1706
1707 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1708
1709 len = sz/sizeof(u32);
1710 bfa_trc(ioc, len);
1711 for (i = 0; i < len; i++) {
1712 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1713 buf[i] = bfa_os_ntohl(r32);
1714 loff += sizeof(u32);
1715
1716 /**
1717 * handle page offset wrap around
1718 */
1719 loff = PSS_SMEM_PGOFF(loff);
1720 if (loff == 0) {
1721 pgnum++;
1722 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1723 }
1724 }
1725 bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
1726 bfa_ioc_smem_pgnum(ioc, 0));
1727 /*
1728 * release semaphore.
1729 */
1730 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1731
1732 bfa_trc(ioc, pgnum);
1733 return BFA_STATUS_OK;
1734}
1735
1736/**
1737 * Clear SMEM data from host through PCI memmap
1738 *
1739 * @param[in] ioc memory for IOC
1740 * @param[in] soff smem offset
1741 * @param[in] sz size of smem in bytes
1742 */
1743static bfa_status_t
1744bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1745{
1746 int i, len;
1747 u32 pgnum, loff;
1748
1749 pgnum = bfa_ioc_smem_pgnum(ioc, soff);
1750 loff = bfa_ioc_smem_pgoff(ioc, soff);
1751 bfa_trc(ioc, pgnum);
1752 bfa_trc(ioc, loff);
1753 bfa_trc(ioc, sz);
1754
1755 /*
1756 * Hold semaphore to serialize pll init and fwtrc.
1757 */
1758 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1759 bfa_trc(ioc, 0);
1760 return BFA_STATUS_FAILED;
1761 }
1762
1763 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1764
1765 len = sz/sizeof(u32); /* len in words */
1766 bfa_trc(ioc, len);
1767 for (i = 0; i < len; i++) {
1768 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
1769 loff += sizeof(u32);
1770
1771 /**
1772 * handle page offset wrap around
1773 */
1774 loff = PSS_SMEM_PGOFF(loff);
1775 if (loff == 0) {
1776 pgnum++;
1777 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1778 }
1779 }
1780 bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
1781 bfa_ioc_smem_pgnum(ioc, 0));
1782
1783 /*
1784 * release semaphore.
1785 */
1786 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1787 bfa_trc(ioc, pgnum);
1788 return BFA_STATUS_OK;
1789}
1790
1791/**
1792 * hal iocpf to ioc interface
1793 */
1794static void
1795bfa_ioc_pf_enabled(struct bfa_ioc_s *ioc)
1796{
1797 bfa_fsm_send_event(ioc, IOC_E_ENABLED);
1798}
1799
1800static void
1801bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc)
1802{
1803 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
1804}
1805
1806static void
1807bfa_ioc_pf_failed(struct bfa_ioc_s *ioc)
1808{
1809 bfa_fsm_send_event(ioc, IOC_E_FAILED);
1810}
1811
1812static void
1813bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1814{
1815 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1816 /**
1817 * Provide enable completion callback.
1818 */
1819 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1820 BFA_LOG(KERN_WARNING, bfad, log_level,
1821 "Running firmware version is incompatible "
1822 "with the driver version\n");
1823}
1824
1825
1826
1827/**
1828 * hal_ioc_public
1263 */ 1829 */
1264 1830
1831bfa_status_t
1832bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1833{
1834
1835 /*
1836 * Hold semaphore so that nobody can access the chip during init.
1837 */
1838 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1839
1840 bfa_ioc_pll_init_asic(ioc);
1841
1842 ioc->pllinit = BFA_TRUE;
1843 /*
1844 * release semaphore.
1845 */
1846 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1847
1848 return BFA_STATUS_OK;
1849}
1850
1265/** 1851/**
1266 * Interface used by diag module to do firmware boot with memory test 1852 * Interface used by diag module to do firmware boot with memory test
1267 * as the entry vector. 1853 * as the entry vector.
1268 */ 1854 */
1269void 1855void
1270bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param) 1856bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
1271{ 1857{
1272 bfa_os_addr_t rb; 1858 bfa_os_addr_t rb;
1273 1859
1274 bfa_ioc_stats(ioc, ioc_boots); 1860 bfa_ioc_stats(ioc, ioc_boots);
1275 1861
@@ -1280,7 +1866,7 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param)
1280 * Initialize IOC state of all functions on a chip reset. 1866 * Initialize IOC state of all functions on a chip reset.
1281 */ 1867 */
1282 rb = ioc->pcidev.pci_bar_kva; 1868 rb = ioc->pcidev.pci_bar_kva;
1283 if (boot_param == BFI_BOOT_TYPE_MEMTEST) { 1869 if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
1284 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST); 1870 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST);
1285 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST); 1871 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST);
1286 } else { 1872 } else {
@@ -1289,7 +1875,7 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param)
1289 } 1875 }
1290 1876
1291 bfa_ioc_msgflush(ioc); 1877 bfa_ioc_msgflush(ioc);
1292 bfa_ioc_download_fw(ioc, boot_type, boot_param); 1878 bfa_ioc_download_fw(ioc, boot_type, boot_env);
1293 1879
1294 /** 1880 /**
1295 * Enable interrupts just before starting LPU 1881 * Enable interrupts just before starting LPU
@@ -1308,18 +1894,29 @@ bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
1308} 1894}
1309 1895
1310 1896
1897
1311bfa_boolean_t 1898bfa_boolean_t
1312bfa_ioc_is_operational(struct bfa_ioc_s *ioc) 1899bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
1313{ 1900{
1314 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); 1901 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
1315} 1902}
1316 1903
1904bfa_boolean_t
1905bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
1906{
1907 u32 r32 = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
1908
1909 return ((r32 != BFI_IOC_UNINIT) &&
1910 (r32 != BFI_IOC_INITING) &&
1911 (r32 != BFI_IOC_MEMTEST));
1912}
1913
1317void 1914void
1318bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg) 1915bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
1319{ 1916{
1320 u32 *msgp = mbmsg; 1917 u32 *msgp = mbmsg;
1321 u32 r32; 1918 u32 r32;
1322 int i; 1919 int i;
1323 1920
1324 /** 1921 /**
1325 * read the MBOX msg 1922 * read the MBOX msg
@@ -1341,9 +1938,10 @@ bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
1341void 1938void
1342bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m) 1939bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
1343{ 1940{
1344 union bfi_ioc_i2h_msg_u *msg; 1941 union bfi_ioc_i2h_msg_u *msg;
1942 struct bfa_iocpf_s *iocpf = &ioc->iocpf;
1345 1943
1346 msg = (union bfi_ioc_i2h_msg_u *)m; 1944 msg = (union bfi_ioc_i2h_msg_u *) m;
1347 1945
1348 bfa_ioc_stats(ioc, ioc_isrs); 1946 bfa_ioc_stats(ioc, ioc_isrs);
1349 1947
@@ -1352,15 +1950,15 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
1352 break; 1950 break;
1353 1951
1354 case BFI_IOC_I2H_READY_EVENT: 1952 case BFI_IOC_I2H_READY_EVENT:
1355 bfa_fsm_send_event(ioc, IOC_E_FWREADY); 1953 bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
1356 break; 1954 break;
1357 1955
1358 case BFI_IOC_I2H_ENABLE_REPLY: 1956 case BFI_IOC_I2H_ENABLE_REPLY:
1359 bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE); 1957 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1360 break; 1958 break;
1361 1959
1362 case BFI_IOC_I2H_DISABLE_REPLY: 1960 case BFI_IOC_I2H_DISABLE_REPLY:
1363 bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE); 1961 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
1364 break; 1962 break;
1365 1963
1366 case BFI_IOC_I2H_GETATTR_REPLY: 1964 case BFI_IOC_I2H_GETATTR_REPLY:
@@ -1378,29 +1976,24 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
1378 * 1976 *
1379 * @param[in] ioc memory for IOC 1977 * @param[in] ioc memory for IOC
1380 * @param[in] bfa driver instance structure 1978 * @param[in] bfa driver instance structure
1381 * @param[in] trcmod kernel trace module
1382 * @param[in] aen kernel aen event module
1383 * @param[in] logm kernel logging module
1384 */ 1979 */
1385void 1980void
1386bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn, 1981bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
1387 struct bfa_timer_mod_s *timer_mod, struct bfa_trc_mod_s *trcmod, 1982 struct bfa_timer_mod_s *timer_mod)
1388 struct bfa_aen_s *aen, struct bfa_log_mod_s *logm) 1983{
1389{ 1984 ioc->bfa = bfa;
1390 ioc->bfa = bfa; 1985 ioc->cbfn = cbfn;
1391 ioc->cbfn = cbfn; 1986 ioc->timer_mod = timer_mod;
1392 ioc->timer_mod = timer_mod; 1987 ioc->fcmode = BFA_FALSE;
1393 ioc->trcmod = trcmod; 1988 ioc->pllinit = BFA_FALSE;
1394 ioc->aen = aen;
1395 ioc->logm = logm;
1396 ioc->fcmode = BFA_FALSE;
1397 ioc->pllinit = BFA_FALSE;
1398 ioc->dbg_fwsave_once = BFA_TRUE; 1989 ioc->dbg_fwsave_once = BFA_TRUE;
1990 ioc->iocpf.ioc = ioc;
1399 1991
1400 bfa_ioc_mbox_attach(ioc); 1992 bfa_ioc_mbox_attach(ioc);
1401 INIT_LIST_HEAD(&ioc->hb_notify_q); 1993 INIT_LIST_HEAD(&ioc->hb_notify_q);
1402 1994
1403 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 1995 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
1996 bfa_fsm_send_event(ioc, IOC_E_RESET);
1404} 1997}
1405 1998
1406/** 1999/**
@@ -1421,10 +2014,10 @@ void
1421bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, 2014bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
1422 enum bfi_mclass mc) 2015 enum bfi_mclass mc)
1423{ 2016{
1424 ioc->ioc_mc = mc; 2017 ioc->ioc_mc = mc;
1425 ioc->pcidev = *pcidev; 2018 ioc->pcidev = *pcidev;
1426 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id); 2019 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
1427 ioc->cna = ioc->ctdev && !ioc->fcmode; 2020 ioc->cna = ioc->ctdev && !ioc->fcmode;
1428 2021
1429 /** 2022 /**
1430 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c 2023 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
@@ -1445,14 +2038,14 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
1445 * @param[in] dm_pa physical address of IOC dma memory 2038 * @param[in] dm_pa physical address of IOC dma memory
1446 */ 2039 */
1447void 2040void
1448bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa) 2041bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
1449{ 2042{
1450 /** 2043 /**
1451 * dma memory for firmware attribute 2044 * dma memory for firmware attribute
1452 */ 2045 */
1453 ioc->attr_dma.kva = dm_kva; 2046 ioc->attr_dma.kva = dm_kva;
1454 ioc->attr_dma.pa = dm_pa; 2047 ioc->attr_dma.pa = dm_pa;
1455 ioc->attr = (struct bfi_ioc_attr_s *)dm_kva; 2048 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
1456} 2049}
1457 2050
1458/** 2051/**
@@ -1490,7 +2083,7 @@ bfa_ioc_disable(struct bfa_ioc_s *ioc)
1490int 2083int
1491bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover) 2084bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover)
1492{ 2085{
1493return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0; 2086 return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
1494} 2087}
1495 2088
1496/** 2089/**
@@ -1500,8 +2093,8 @@ return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
1500void 2093void
1501bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave) 2094bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
1502{ 2095{
1503 ioc->dbg_fwsave = dbg_fwsave; 2096 ioc->dbg_fwsave = dbg_fwsave;
1504 ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->auto_recover); 2097 ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->iocpf.auto_recover);
1505} 2098}
1506 2099
1507u32 2100u32
@@ -1525,8 +2118,8 @@ bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr)
1525void 2118void
1526bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs) 2119bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
1527{ 2120{
1528 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; 2121 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1529 int mc; 2122 int mc;
1530 2123
1531 for (mc = 0; mc < BFI_MC_MAX; mc++) 2124 for (mc = 0; mc < BFI_MC_MAX; mc++)
1532 mod->mbhdlr[mc].cbfn = mcfuncs[mc]; 2125 mod->mbhdlr[mc].cbfn = mcfuncs[mc];
@@ -1539,10 +2132,10 @@ void
1539bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc, 2132bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
1540 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg) 2133 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
1541{ 2134{
1542 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; 2135 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1543 2136
1544 mod->mbhdlr[mc].cbfn = cbfn; 2137 mod->mbhdlr[mc].cbfn = cbfn;
1545 mod->mbhdlr[mc].cbarg = cbarg; 2138 mod->mbhdlr[mc].cbarg = cbarg;
1546} 2139}
1547 2140
1548/** 2141/**
@@ -1555,8 +2148,8 @@ bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
1555void 2148void
1556bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd) 2149bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
1557{ 2150{
1558 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; 2151 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1559 u32 stat; 2152 u32 stat;
1560 2153
1561 /** 2154 /**
1562 * If a previous command is pending, queue new command 2155 * If a previous command is pending, queue new command
@@ -1587,9 +2180,9 @@ bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
1587void 2180void
1588bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc) 2181bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
1589{ 2182{
1590 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; 2183 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1591 struct bfi_mbmsg_s m; 2184 struct bfi_mbmsg_s m;
1592 int mc; 2185 int mc;
1593 2186
1594 bfa_ioc_msgget(ioc, &m); 2187 bfa_ioc_msgget(ioc, &m);
1595 2188
@@ -1621,16 +2214,14 @@ bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
1621 ioc->port_id = bfa_ioc_pcifn(ioc); 2214 ioc->port_id = bfa_ioc_pcifn(ioc);
1622} 2215}
1623 2216
1624#ifndef BFA_BIOS_BUILD
1625
1626/** 2217/**
1627 * return true if IOC is disabled 2218 * return true if IOC is disabled
1628 */ 2219 */
1629bfa_boolean_t 2220bfa_boolean_t
1630bfa_ioc_is_disabled(struct bfa_ioc_s *ioc) 2221bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
1631{ 2222{
1632 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) 2223 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
1633 || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); 2224 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
1634} 2225}
1635 2226
1636/** 2227/**
@@ -1639,9 +2230,9 @@ bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
1639bfa_boolean_t 2230bfa_boolean_t
1640bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc) 2231bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
1641{ 2232{
1642 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) 2233 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
1643 || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck) 2234 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
1644 || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch); 2235 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
1645} 2236}
1646 2237
1647#define bfa_ioc_state_disabled(__sm) \ 2238#define bfa_ioc_state_disabled(__sm) \
@@ -1659,8 +2250,8 @@ bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
1659bfa_boolean_t 2250bfa_boolean_t
1660bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc) 2251bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
1661{ 2252{
1662 u32 ioc_state; 2253 u32 ioc_state;
1663 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; 2254 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
1664 2255
1665 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled)) 2256 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
1666 return BFA_FALSE; 2257 return BFA_FALSE;
@@ -1669,16 +2260,18 @@ bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
1669 if (!bfa_ioc_state_disabled(ioc_state)) 2260 if (!bfa_ioc_state_disabled(ioc_state))
1670 return BFA_FALSE; 2261 return BFA_FALSE;
1671 2262
1672 ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG); 2263 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
1673 if (!bfa_ioc_state_disabled(ioc_state)) 2264 ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG);
1674 return BFA_FALSE; 2265 if (!bfa_ioc_state_disabled(ioc_state))
2266 return BFA_FALSE;
2267 }
1675 2268
1676 return BFA_TRUE; 2269 return BFA_TRUE;
1677} 2270}
1678 2271
1679/** 2272/**
1680 * Add to IOC heartbeat failure notification queue. To be used by common 2273 * Add to IOC heartbeat failure notification queue. To be used by common
1681 * modules such as 2274 * modules such as cee, port, diag.
1682 */ 2275 */
1683void 2276void
1684bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc, 2277bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
@@ -1692,7 +2285,7 @@ void
1692bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, 2285bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
1693 struct bfa_adapter_attr_s *ad_attr) 2286 struct bfa_adapter_attr_s *ad_attr)
1694{ 2287{
1695 struct bfi_ioc_attr_s *ioc_attr; 2288 struct bfi_ioc_attr_s *ioc_attr;
1696 2289
1697 ioc_attr = ioc->attr; 2290 ioc_attr = ioc->attr;
1698 2291
@@ -1719,7 +2312,7 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
1719 ad_attr->prototype = 0; 2312 ad_attr->prototype = 0;
1720 2313
1721 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc); 2314 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
1722 ad_attr->mac = bfa_ioc_get_mac(ioc); 2315 ad_attr->mac = bfa_ioc_get_mac(ioc);
1723 2316
1724 ad_attr->pcie_gen = ioc_attr->pcie_gen; 2317 ad_attr->pcie_gen = ioc_attr->pcie_gen;
1725 ad_attr->pcie_lanes = ioc_attr->pcie_lanes; 2318 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
@@ -1729,6 +2322,7 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
1729 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); 2322 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
1730 2323
1731 ad_attr->cna_capable = ioc->cna; 2324 ad_attr->cna_capable = ioc->cna;
2325 ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
1732} 2326}
1733 2327
1734enum bfa_ioc_type_e 2328enum bfa_ioc_type_e
@@ -1782,7 +2376,7 @@ bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
1782{ 2376{
1783 bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN); 2377 bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
1784 bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version, 2378 bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version,
1785 BFA_VERSION_LEN); 2379 BFA_VERSION_LEN);
1786} 2380}
1787 2381
1788void 2382void
@@ -1795,7 +2389,7 @@ bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
1795void 2389void
1796bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model) 2390bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
1797{ 2391{
1798 struct bfi_ioc_attr_s *ioc_attr; 2392 struct bfi_ioc_attr_s *ioc_attr;
1799 2393
1800 bfa_assert(model); 2394 bfa_assert(model);
1801 bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN); 2395 bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
@@ -1805,14 +2399,48 @@ bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
1805 /** 2399 /**
1806 * model name 2400 * model name
1807 */ 2401 */
1808 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", 2402 bfa_os_snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
1809 BFA_MFG_NAME, ioc_attr->card_type); 2403 BFA_MFG_NAME, ioc_attr->card_type);
1810} 2404}
1811 2405
1812enum bfa_ioc_state 2406enum bfa_ioc_state
1813bfa_ioc_get_state(struct bfa_ioc_s *ioc) 2407bfa_ioc_get_state(struct bfa_ioc_s *ioc)
1814{ 2408{
1815 return bfa_sm_to_state(ioc_sm_table, ioc->fsm); 2409 enum bfa_iocpf_state iocpf_st;
2410 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2411
2412 if (ioc_st == BFA_IOC_ENABLING ||
2413 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2414
2415 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2416
2417 switch (iocpf_st) {
2418 case BFA_IOCPF_SEMWAIT:
2419 ioc_st = BFA_IOC_SEMWAIT;
2420 break;
2421
2422 case BFA_IOCPF_HWINIT:
2423 ioc_st = BFA_IOC_HWINIT;
2424 break;
2425
2426 case BFA_IOCPF_FWMISMATCH:
2427 ioc_st = BFA_IOC_FWMISMATCH;
2428 break;
2429
2430 case BFA_IOCPF_FAIL:
2431 ioc_st = BFA_IOC_FAIL;
2432 break;
2433
2434 case BFA_IOCPF_INITFAIL:
2435 ioc_st = BFA_IOC_INITFAIL;
2436 break;
2437
2438 default:
2439 break;
2440 }
2441 }
2442
2443 return ioc_st;
1816} 2444}
1817 2445
1818void 2446void
@@ -1833,7 +2461,7 @@ bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
1833} 2461}
1834 2462
1835/** 2463/**
1836 * bfa_wwn_public 2464 * hal_wwn_public
1837 */ 2465 */
1838wwn_t 2466wwn_t
1839bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc) 2467bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc)
@@ -1857,10 +2485,10 @@ mac_t
1857bfa_ioc_get_mac(struct bfa_ioc_s *ioc) 2485bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
1858{ 2486{
1859 /* 2487 /*
1860 * Currently mfg mac is used as FCoE enode mac (not configured by PBC) 2488 * Check the IOC type and return the appropriate MAC
1861 */ 2489 */
1862 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE) 2490 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
1863 return bfa_ioc_get_mfg_mac(ioc); 2491 return ioc->attr->fcoe_mac;
1864 else 2492 else
1865 return ioc->attr->mac; 2493 return ioc->attr->mac;
1866} 2494}
@@ -1880,12 +2508,16 @@ bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc)
1880mac_t 2508mac_t
1881bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc) 2509bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
1882{ 2510{
1883 mac_t mac; 2511 mac_t m;
1884 2512
1885 mac = ioc->attr->mfg_mac; 2513 m = ioc->attr->mfg_mac;
1886 mac.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc); 2514 if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2515 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2516 else
2517 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2518 bfa_ioc_pcifn(ioc));
1887 2519
1888 return mac; 2520 return m;
1889} 2521}
1890 2522
1891bfa_boolean_t 2523bfa_boolean_t
@@ -1895,46 +2527,12 @@ bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
1895} 2527}
1896 2528
1897/** 2529/**
1898 * Send AEN notification
1899 */
1900void
1901bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
1902{
1903 union bfa_aen_data_u aen_data;
1904 struct bfa_log_mod_s *logmod = ioc->logm;
1905 s32 inst_num = 0;
1906 enum bfa_ioc_type_e ioc_type;
1907
1908 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, event), inst_num);
1909
1910 memset(&aen_data.ioc.pwwn, 0, sizeof(aen_data.ioc.pwwn));
1911 memset(&aen_data.ioc.mac, 0, sizeof(aen_data.ioc.mac));
1912 ioc_type = bfa_ioc_get_type(ioc);
1913 switch (ioc_type) {
1914 case BFA_IOC_TYPE_FC:
1915 aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
1916 break;
1917 case BFA_IOC_TYPE_FCoE:
1918 aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
1919 aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
1920 break;
1921 case BFA_IOC_TYPE_LL:
1922 aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
1923 break;
1924 default:
1925 bfa_assert(ioc_type == BFA_IOC_TYPE_FC);
1926 break;
1927 }
1928 aen_data.ioc.ioc_type = ioc_type;
1929}
1930
1931/**
1932 * Retrieve saved firmware trace from a prior IOC failure. 2530 * Retrieve saved firmware trace from a prior IOC failure.
1933 */ 2531 */
1934bfa_status_t 2532bfa_status_t
1935bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen) 2533bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
1936{ 2534{
1937 int tlen; 2535 int tlen;
1938 2536
1939 if (ioc->dbg_fwsave_len == 0) 2537 if (ioc->dbg_fwsave_len == 0)
1940 return BFA_STATUS_ENOFSAVE; 2538 return BFA_STATUS_ENOFSAVE;
@@ -1963,57 +2561,145 @@ bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc)
1963bfa_status_t 2561bfa_status_t
1964bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen) 2562bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
1965{ 2563{
1966 u32 pgnum; 2564 u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
1967 u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc)); 2565 int tlen;
1968 int i, tlen; 2566 bfa_status_t status;
1969 u32 *tbuf = trcdata, r32;
1970 2567
1971 bfa_trc(ioc, *trclen); 2568 bfa_trc(ioc, *trclen);
1972 2569
1973 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1974 loff = bfa_ioc_smem_pgoff(ioc, loff);
1975
1976 /*
1977 * Hold semaphore to serialize pll init and fwtrc.
1978 */
1979 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg))
1980 return BFA_STATUS_FAILED;
1981
1982 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1983
1984 tlen = *trclen; 2570 tlen = *trclen;
1985 if (tlen > BFA_DBG_FWTRC_LEN) 2571 if (tlen > BFA_DBG_FWTRC_LEN)
1986 tlen = BFA_DBG_FWTRC_LEN; 2572 tlen = BFA_DBG_FWTRC_LEN;
1987 tlen /= sizeof(u32);
1988 2573
1989 bfa_trc(ioc, tlen); 2574 status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2575 *trclen = tlen;
2576 return status;
2577}
1990 2578
1991 for (i = 0; i < tlen; i++) { 2579static void
1992 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); 2580bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
1993 tbuf[i] = bfa_os_ntohl(r32); 2581{
1994 loff += sizeof(u32); 2582 struct bfa_mbox_cmd_s cmd;
2583 struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
1995 2584
1996 /** 2585 bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
1997 * handle page offset wrap around 2586 bfa_ioc_portid(ioc));
1998 */ 2587 req->ioc_class = ioc->ioc_mc;
1999 loff = PSS_SMEM_PGOFF(loff); 2588 bfa_ioc_mbox_queue(ioc, &cmd);
2000 if (loff == 0) { 2589}
2001 pgnum++; 2590
2002 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); 2591static void
2003 } 2592bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2593{
2594 u32 fwsync_iter = 1000;
2595
2596 bfa_ioc_send_fwsync(ioc);
2597
2598 /**
2599 * After sending a fw sync mbox command wait for it to
2600 * take effect. We will not wait for a response because
2601 * 1. fw_sync mbox cmd doesn't have a response.
2602 * 2. Even if we implement that, interrupts might not
2603 * be enabled when we call this function.
2604 * So, just keep checking if any mbox cmd is pending, and
2605 * after waiting for a reasonable amount of time, go ahead.
2606 * It is possible that fw has crashed and the mbox command
2607 * is never acknowledged.
2608 */
2609 while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
2610 fwsync_iter--;
2611}
2612
2613/**
2614 * Dump firmware smem
2615 */
2616bfa_status_t
2617bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2618 u32 *offset, int *buflen)
2619{
2620 u32 loff;
2621 int dlen;
2622 bfa_status_t status;
2623 u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
2624
2625 if (*offset >= smem_len) {
2626 *offset = *buflen = 0;
2627 return BFA_STATUS_EINVAL;
2004 } 2628 }
2005 bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
2006 bfa_ioc_smem_pgnum(ioc, 0));
2007 2629
2008 /* 2630 loff = *offset;
2009 * release semaphore. 2631 dlen = *buflen;
2632
2633 /**
2634 * First smem read, sync smem before proceeding
2635 * No need to sync before reading every chunk.
2010 */ 2636 */
2011 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); 2637 if (loff == 0)
2638 bfa_ioc_fwsync(ioc);
2012 2639
2013 bfa_trc(ioc, pgnum); 2640 if ((loff + dlen) >= smem_len)
2641 dlen = smem_len - loff;
2014 2642
2015 *trclen = tlen * sizeof(u32); 2643 status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
2016 return BFA_STATUS_OK; 2644
2645 if (status != BFA_STATUS_OK) {
2646 *offset = *buflen = 0;
2647 return status;
2648 }
2649
2650 *offset += dlen;
2651
2652 if (*offset >= smem_len)
2653 *offset = 0;
2654
2655 *buflen = dlen;
2656
2657 return status;
2658}
2659
2660/**
2661 * Firmware statistics
2662 */
2663bfa_status_t
2664bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
2665{
2666 u32 loff = BFI_IOC_FWSTATS_OFF + \
2667 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2668 int tlen;
2669 bfa_status_t status;
2670
2671 if (ioc->stats_busy) {
2672 bfa_trc(ioc, ioc->stats_busy);
2673 return BFA_STATUS_DEVBUSY;
2674 }
2675 ioc->stats_busy = BFA_TRUE;
2676
2677 tlen = sizeof(struct bfa_fw_stats_s);
2678 status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
2679
2680 ioc->stats_busy = BFA_FALSE;
2681 return status;
2682}
2683
2684bfa_status_t
2685bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
2686{
2687 u32 loff = BFI_IOC_FWSTATS_OFF + \
2688 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2689 int tlen;
2690 bfa_status_t status;
2691
2692 if (ioc->stats_busy) {
2693 bfa_trc(ioc, ioc->stats_busy);
2694 return BFA_STATUS_DEVBUSY;
2695 }
2696 ioc->stats_busy = BFA_TRUE;
2697
2698 tlen = sizeof(struct bfa_fw_stats_s);
2699 status = bfa_ioc_smem_clr(ioc, loff, tlen);
2700
2701 ioc->stats_busy = BFA_FALSE;
2702 return status;
2017} 2703}
2018 2704
2019/** 2705/**
@@ -2022,7 +2708,7 @@ bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2022static void 2708static void
2023bfa_ioc_debug_save(struct bfa_ioc_s *ioc) 2709bfa_ioc_debug_save(struct bfa_ioc_s *ioc)
2024{ 2710{
2025 int tlen; 2711 int tlen;
2026 2712
2027 if (ioc->dbg_fwsave_len) { 2713 if (ioc->dbg_fwsave_len) {
2028 tlen = ioc->dbg_fwsave_len; 2714 tlen = ioc->dbg_fwsave_len;
@@ -2050,11 +2736,135 @@ bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
2050{ 2736{
2051 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL) 2737 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2052 return; 2738 return;
2739}
2740
2741/**
2742 * hal_iocpf_pvt BFA IOC PF private functions
2743 */
2053 2744
2054 if (ioc->attr->nwwn == 0) 2745static void
2055 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN); 2746bfa_iocpf_enable(struct bfa_ioc_s *ioc)
2056 if (ioc->attr->pwwn == 0) 2747{
2057 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN); 2748 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
2058} 2749}
2059 2750
2060#endif 2751static void
2752bfa_iocpf_disable(struct bfa_ioc_s *ioc)
2753{
2754 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
2755}
2756
2757static void
2758bfa_iocpf_fail(struct bfa_ioc_s *ioc)
2759{
2760 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
2761}
2762
2763static void
2764bfa_iocpf_initfail(struct bfa_ioc_s *ioc)
2765{
2766 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
2767}
2768
2769static void
2770bfa_iocpf_getattrfail(struct bfa_ioc_s *ioc)
2771{
2772 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
2773}
2774
2775static void
2776bfa_iocpf_stop(struct bfa_ioc_s *ioc)
2777{
2778 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
2779}
2780
2781static void
2782bfa_iocpf_timeout(void *ioc_arg)
2783{
2784 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2785
2786 bfa_trc(ioc, 0);
2787 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2788}
2789
2790static void
2791bfa_iocpf_sem_timeout(void *ioc_arg)
2792{
2793 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2794
2795 bfa_ioc_hw_sem_get(ioc);
2796}
2797
2798/**
2799 * bfa timer function
2800 */
2801void
2802bfa_timer_init(struct bfa_timer_mod_s *mod)
2803{
2804 INIT_LIST_HEAD(&mod->timer_q);
2805}
2806
2807void
2808bfa_timer_beat(struct bfa_timer_mod_s *mod)
2809{
2810 struct list_head *qh = &mod->timer_q;
2811 struct list_head *qe, *qe_next;
2812 struct bfa_timer_s *elem;
2813 struct list_head timedout_q;
2814
2815 INIT_LIST_HEAD(&timedout_q);
2816
2817 qe = bfa_q_next(qh);
2818
2819 while (qe != qh) {
2820 qe_next = bfa_q_next(qe);
2821
2822 elem = (struct bfa_timer_s *) qe;
2823 if (elem->timeout <= BFA_TIMER_FREQ) {
2824 elem->timeout = 0;
2825 list_del(&elem->qe);
2826 list_add_tail(&elem->qe, &timedout_q);
2827 } else {
2828 elem->timeout -= BFA_TIMER_FREQ;
2829 }
2830
2831 qe = qe_next; /* go to next elem */
2832 }
2833
2834 /*
2835 * Pop all the timeout entries
2836 */
2837 while (!list_empty(&timedout_q)) {
2838 bfa_q_deq(&timedout_q, &elem);
2839 elem->timercb(elem->arg);
2840 }
2841}
2842
2843/**
2844 * Should be called with lock protection
2845 */
2846void
2847bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
2848 void (*timercb) (void *), void *arg, unsigned int timeout)
2849{
2850
2851 bfa_assert(timercb != NULL);
2852 bfa_assert(!bfa_q_is_on_q(&mod->timer_q, timer));
2853
2854 timer->timeout = timeout;
2855 timer->timercb = timercb;
2856 timer->arg = arg;
2857
2858 list_add_tail(&timer->qe, &mod->timer_q);
2859}
2860
2861/**
2862 * Should be called with lock protection
2863 */
2864void
2865bfa_timer_stop(struct bfa_timer_s *timer)
2866{
2867 bfa_assert(!list_empty(&timer->qe));
2868
2869 list_del(&timer->qe);
2870}
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index cae05b251c99..288c5801aace 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -18,18 +18,74 @@
18#ifndef __BFA_IOC_H__ 18#ifndef __BFA_IOC_H__
19#define __BFA_IOC_H__ 19#define __BFA_IOC_H__
20 20
21#include <cs/bfa_sm.h> 21#include "bfa_os_inc.h"
22#include <bfi/bfi.h> 22#include "bfa_cs.h"
23#include <bfi/bfi_ioc.h> 23#include "bfi.h"
24#include <bfi/bfi_boot.h> 24
25#include <bfa_timer.h> 25/**
26 * BFA timer declarations
27 */
28typedef void (*bfa_timer_cbfn_t)(void *);
29
30/**
31 * BFA timer data structure
32 */
33struct bfa_timer_s {
34 struct list_head qe;
35 bfa_timer_cbfn_t timercb;
36 void *arg;
37 int timeout; /**< in millisecs. */
38};
39
40/**
41 * Timer module structure
42 */
43struct bfa_timer_mod_s {
44 struct list_head timer_q;
45};
46
47#define BFA_TIMER_FREQ 200 /**< specified in millisecs */
48
49void bfa_timer_beat(struct bfa_timer_mod_s *mod);
50void bfa_timer_init(struct bfa_timer_mod_s *mod);
51void bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
52 bfa_timer_cbfn_t timercb, void *arg,
53 unsigned int timeout);
54void bfa_timer_stop(struct bfa_timer_s *timer);
55
56/**
57 * Generic Scatter Gather Element used by driver
58 */
59struct bfa_sge_s {
60 u32 sg_len;
61 void *sg_addr;
62};
63
64#define bfa_sge_word_swap(__sge) do { \
65 ((u32 *)(__sge))[0] = bfa_os_swap32(((u32 *)(__sge))[0]); \
66 ((u32 *)(__sge))[1] = bfa_os_swap32(((u32 *)(__sge))[1]); \
67 ((u32 *)(__sge))[2] = bfa_os_swap32(((u32 *)(__sge))[2]); \
68} while (0)
69
70#define bfa_swap_words(_x) ( \
71 ((_x) << 32) | ((_x) >> 32))
72
73#ifdef __BIGENDIAN
74#define bfa_sge_to_be(_x)
75#define bfa_sge_to_le(_x) bfa_sge_word_swap(_x)
76#define bfa_sgaddr_le(_x) bfa_swap_words(_x)
77#else
78#define bfa_sge_to_be(_x) bfa_sge_word_swap(_x)
79#define bfa_sge_to_le(_x)
80#define bfa_sgaddr_le(_x) (_x)
81#endif
26 82
27/** 83/**
28 * PCI device information required by IOC 84 * PCI device information required by IOC
29 */ 85 */
30struct bfa_pcidev_s { 86struct bfa_pcidev_s {
31 int pci_slot; 87 int pci_slot;
32 u8 pci_func; 88 u8 pci_func;
33 u16 device_id; 89 u16 device_id;
34 bfa_os_addr_t pci_bar_kva; 90 bfa_os_addr_t pci_bar_kva;
35}; 91};
@@ -39,13 +95,18 @@ struct bfa_pcidev_s {
39 * Address 95 * Address
40 */ 96 */
41struct bfa_dma_s { 97struct bfa_dma_s {
42 void *kva; /*! Kernel virtual address */ 98 void *kva; /* ! Kernel virtual address */
43 u64 pa; /*! Physical address */ 99 u64 pa; /* ! Physical address */
44}; 100};
45 101
46#define BFA_DMA_ALIGN_SZ 256 102#define BFA_DMA_ALIGN_SZ 256
47#define BFA_ROUNDUP(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1)) 103#define BFA_ROUNDUP(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1))
48 104
105/**
106 * smem size for Crossbow and Catapult
107 */
108#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */
109#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */
49 110
50 111
51#define bfa_dma_addr_set(dma_addr, pa) \ 112#define bfa_dma_addr_set(dma_addr, pa) \
@@ -101,7 +162,7 @@ struct bfa_ioc_regs_s {
101 * IOC Mailbox structures 162 * IOC Mailbox structures
102 */ 163 */
103struct bfa_mbox_cmd_s { 164struct bfa_mbox_cmd_s {
104 struct list_head qe; 165 struct list_head qe;
105 u32 msg[BFI_IOC_MSGSZ]; 166 u32 msg[BFI_IOC_MSGSZ];
106}; 167};
107 168
@@ -110,8 +171,8 @@ struct bfa_mbox_cmd_s {
110 */ 171 */
111typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg_s *m); 172typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg_s *m);
112struct bfa_ioc_mbox_mod_s { 173struct bfa_ioc_mbox_mod_s {
113 struct list_head cmd_q; /* pending mbox queue */ 174 struct list_head cmd_q; /* pending mbox queue */
114 int nmclass; /* number of handlers */ 175 int nmclass; /* number of handlers */
115 struct { 176 struct {
116 bfa_ioc_mbox_mcfunc_t cbfn; /* message handlers */ 177 bfa_ioc_mbox_mcfunc_t cbfn; /* message handlers */
117 void *cbarg; 178 void *cbarg;
@@ -149,49 +210,54 @@ struct bfa_ioc_hbfail_notify_s {
149 (__notify)->cbarg = (__cbarg); \ 210 (__notify)->cbarg = (__cbarg); \
150} while (0) 211} while (0)
151 212
213struct bfa_iocpf_s {
214 bfa_fsm_t fsm;
215 struct bfa_ioc_s *ioc;
216 u32 retry_count;
217 bfa_boolean_t auto_recover;
218};
219
152struct bfa_ioc_s { 220struct bfa_ioc_s {
153 bfa_fsm_t fsm; 221 bfa_fsm_t fsm;
154 struct bfa_s *bfa; 222 struct bfa_s *bfa;
155 struct bfa_pcidev_s pcidev; 223 struct bfa_pcidev_s pcidev;
156 struct bfa_timer_mod_s *timer_mod; 224 struct bfa_timer_mod_s *timer_mod;
157 struct bfa_timer_s ioc_timer; 225 struct bfa_timer_s ioc_timer;
158 struct bfa_timer_s sem_timer; 226 struct bfa_timer_s sem_timer;
227 struct bfa_timer_s hb_timer;
159 u32 hb_count; 228 u32 hb_count;
160 u32 retry_count;
161 struct list_head hb_notify_q; 229 struct list_head hb_notify_q;
162 void *dbg_fwsave; 230 void *dbg_fwsave;
163 int dbg_fwsave_len; 231 int dbg_fwsave_len;
164 bfa_boolean_t dbg_fwsave_once; 232 bfa_boolean_t dbg_fwsave_once;
165 enum bfi_mclass ioc_mc; 233 enum bfi_mclass ioc_mc;
166 struct bfa_ioc_regs_s ioc_regs; 234 struct bfa_ioc_regs_s ioc_regs;
167 struct bfa_trc_mod_s *trcmod; 235 struct bfa_trc_mod_s *trcmod;
168 struct bfa_aen_s *aen;
169 struct bfa_log_mod_s *logm;
170 struct bfa_ioc_drv_stats_s stats; 236 struct bfa_ioc_drv_stats_s stats;
171 bfa_boolean_t auto_recover;
172 bfa_boolean_t fcmode; 237 bfa_boolean_t fcmode;
173 bfa_boolean_t ctdev; 238 bfa_boolean_t ctdev;
174 bfa_boolean_t cna; 239 bfa_boolean_t cna;
175 bfa_boolean_t pllinit; 240 bfa_boolean_t pllinit;
241 bfa_boolean_t stats_busy; /* outstanding stats */
176 u8 port_id; 242 u8 port_id;
177
178 struct bfa_dma_s attr_dma; 243 struct bfa_dma_s attr_dma;
179 struct bfi_ioc_attr_s *attr; 244 struct bfi_ioc_attr_s *attr;
180 struct bfa_ioc_cbfn_s *cbfn; 245 struct bfa_ioc_cbfn_s *cbfn;
181 struct bfa_ioc_mbox_mod_s mbox_mod; 246 struct bfa_ioc_mbox_mod_s mbox_mod;
182 struct bfa_ioc_hwif_s *ioc_hwif; 247 struct bfa_ioc_hwif_s *ioc_hwif;
248 struct bfa_iocpf_s iocpf;
183}; 249};
184 250
185struct bfa_ioc_hwif_s { 251struct bfa_ioc_hwif_s {
186 bfa_status_t (*ioc_pll_init) (struct bfa_ioc_s *ioc); 252 bfa_status_t (*ioc_pll_init) (bfa_os_addr_t rb, bfa_boolean_t fcmode);
187 bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc); 253 bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc);
188 void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc); 254 void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc);
189 void (*ioc_reg_init) (struct bfa_ioc_s *ioc); 255 void (*ioc_reg_init) (struct bfa_ioc_s *ioc);
190 void (*ioc_map_port) (struct bfa_ioc_s *ioc); 256 void (*ioc_map_port) (struct bfa_ioc_s *ioc);
191 void (*ioc_isr_mode_set) (struct bfa_ioc_s *ioc, 257 void (*ioc_isr_mode_set) (struct bfa_ioc_s *ioc,
192 bfa_boolean_t msix); 258 bfa_boolean_t msix);
193 void (*ioc_notify_hbfail) (struct bfa_ioc_s *ioc); 259 void (*ioc_notify_hbfail) (struct bfa_ioc_s *ioc);
194 void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc); 260 void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc);
195}; 261};
196 262
197#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) 263#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
@@ -206,18 +272,19 @@ struct bfa_ioc_hwif_s {
206#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit) 272#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit)
207#define bfa_ioc_speed_sup(__ioc) \ 273#define bfa_ioc_speed_sup(__ioc) \
208 BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop) 274 BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
209#define bfa_ioc_get_nports(__ioc) \ 275#define bfa_ioc_get_nports(__ioc) \
210 BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop) 276 BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
211 277
212#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++) 278#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++)
213#define BFA_IOC_FWIMG_MINSZ (16 * 1024) 279#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
214#define BFA_IOC_FWIMG_TYPE(__ioc) \ 280#define BFA_IOC_FWIMG_TYPE(__ioc) \
215 (((__ioc)->ctdev) ? \ 281 (((__ioc)->ctdev) ? \
216 (((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) : \ 282 (((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) : \
217 BFI_IMAGE_CB_FC) 283 BFI_IMAGE_CB_FC)
218 284#define BFA_IOC_FW_SMEM_SIZE(__ioc) \
219#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS) 285 (((__ioc)->ctdev) ? BFI_SMEM_CT_SIZE : BFI_SMEM_CB_SIZE)
220#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) 286#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
287#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
221#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS) 288#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
222 289
223/** 290/**
@@ -235,18 +302,28 @@ void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
235/** 302/**
236 * IOC interfaces 303 * IOC interfaces
237 */ 304 */
238#define bfa_ioc_pll_init(__ioc) ((__ioc)->ioc_hwif->ioc_pll_init(__ioc)) 305
239#define bfa_ioc_isr_mode_set(__ioc, __msix) \ 306#define bfa_ioc_pll_init_asic(__ioc) \
307 ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
308 (__ioc)->fcmode))
309
310bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc);
311bfa_status_t bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode);
312bfa_boolean_t bfa_ioc_ct_pll_init_complete(bfa_os_addr_t rb);
313bfa_status_t bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode);
314
315#define bfa_ioc_isr_mode_set(__ioc, __msix) \
240 ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)) 316 ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
241#define bfa_ioc_ownership_reset(__ioc) \ 317#define bfa_ioc_ownership_reset(__ioc) \
242 ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc)) 318 ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
243 319
320
244void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc); 321void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc);
245void bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc); 322void bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc);
323
246void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, 324void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa,
247 struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod, 325 struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod);
248 struct bfa_trc_mod_s *trcmod, 326void bfa_ioc_auto_recover(bfa_boolean_t auto_recover);
249 struct bfa_aen_s *aen, struct bfa_log_mod_s *logm);
250void bfa_ioc_detach(struct bfa_ioc_s *ioc); 327void bfa_ioc_detach(struct bfa_ioc_s *ioc);
251void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, 328void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
252 enum bfi_mclass mc); 329 enum bfi_mclass mc);
@@ -256,21 +333,22 @@ void bfa_ioc_enable(struct bfa_ioc_s *ioc);
256void bfa_ioc_disable(struct bfa_ioc_s *ioc); 333void bfa_ioc_disable(struct bfa_ioc_s *ioc);
257bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc); 334bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc);
258 335
259void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param); 336void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type,
337 u32 boot_param);
260void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg); 338void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg);
261void bfa_ioc_error_isr(struct bfa_ioc_s *ioc); 339void bfa_ioc_error_isr(struct bfa_ioc_s *ioc);
262bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc); 340bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc);
341bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc);
263bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc); 342bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc);
264bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc); 343bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc);
265bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc); 344bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc);
266void bfa_ioc_cfg_complete(struct bfa_ioc_s *ioc);
267enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc); 345enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc);
268void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num); 346void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num);
269void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver); 347void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver);
270void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver); 348void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver);
271void bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model); 349void bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model);
272void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, 350void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc,
273 char *manufacturer); 351 char *manufacturer);
274void bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev); 352void bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev);
275enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc_s *ioc); 353enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc_s *ioc);
276 354
@@ -284,6 +362,8 @@ bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata,
284void bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc); 362void bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc);
285bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, 363bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata,
286 int *trclen); 364 int *trclen);
365bfa_status_t bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
366 u32 *offset, int *buflen);
287u32 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr); 367u32 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr);
288u32 bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr); 368u32 bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr);
289void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc); 369void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc);
@@ -297,7 +377,8 @@ void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
297 struct bfi_ioc_image_hdr_s *fwhdr); 377 struct bfi_ioc_image_hdr_s *fwhdr);
298bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, 378bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
299 struct bfi_ioc_image_hdr_s *fwhdr); 379 struct bfi_ioc_image_hdr_s *fwhdr);
300void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event); 380bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats);
381bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc);
301 382
302/* 383/*
303 * bfa mfg wwn API functions 384 * bfa mfg wwn API functions
@@ -310,5 +391,68 @@ wwn_t bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc);
310mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc); 391mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc);
311u64 bfa_ioc_get_adid(struct bfa_ioc_s *ioc); 392u64 bfa_ioc_get_adid(struct bfa_ioc_s *ioc);
312 393
313#endif /* __BFA_IOC_H__ */ 394/*
395 * F/W Image Size & Chunk
396 */
397extern u32 bfi_image_ct_fc_size;
398extern u32 bfi_image_ct_cna_size;
399extern u32 bfi_image_cb_fc_size;
400extern u32 *bfi_image_ct_fc;
401extern u32 *bfi_image_ct_cna;
402extern u32 *bfi_image_cb_fc;
403
404static inline u32 *
405bfi_image_ct_fc_get_chunk(u32 off)
406{ return (u32 *)(bfi_image_ct_fc + off); }
407
408static inline u32 *
409bfi_image_ct_cna_get_chunk(u32 off)
410{ return (u32 *)(bfi_image_ct_cna + off); }
314 411
412static inline u32 *
413bfi_image_cb_fc_get_chunk(u32 off)
414{ return (u32 *)(bfi_image_cb_fc + off); }
415
416static inline u32*
417bfa_cb_image_get_chunk(int type, u32 off)
418{
419 switch (type) {
420 case BFI_IMAGE_CT_FC:
421 return bfi_image_ct_fc_get_chunk(off); break;
422 case BFI_IMAGE_CT_CNA:
423 return bfi_image_ct_cna_get_chunk(off); break;
424 case BFI_IMAGE_CB_FC:
425 return bfi_image_cb_fc_get_chunk(off); break;
426 default: return 0;
427 }
428}
429
430static inline u32
431bfa_cb_image_get_size(int type)
432{
433 switch (type) {
434 case BFI_IMAGE_CT_FC:
435 return bfi_image_ct_fc_size; break;
436 case BFI_IMAGE_CT_CNA:
437 return bfi_image_ct_cna_size; break;
438 case BFI_IMAGE_CB_FC:
439 return bfi_image_cb_fc_size; break;
440 default: return 0;
441 }
442}
443
444/**
445 * CNA TRCMOD declaration
446 */
447/*
448 * !!! Only append to the enums defined here to avoid any versioning
449 * !!! needed between trace utility and driver version
450 */
451enum {
452 BFA_TRC_CNA_PORT = 1,
453 BFA_TRC_CNA_IOC = 2,
454 BFA_TRC_CNA_IOC_CB = 3,
455 BFA_TRC_CNA_IOC_CT = 4,
456};
457
458#endif /* __BFA_IOC_H__ */
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
index 324bdde7ea2e..d7ac864d8539 100644
--- a/drivers/scsi/bfa/bfa_ioc_cb.c
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -15,22 +15,15 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#include <bfa.h> 18#include "bfa_ioc.h"
19#include <bfa_ioc.h> 19#include "bfi_cbreg.h"
20#include <bfa_fwimg_priv.h> 20#include "bfa_defs.h"
21#include <cna/bfa_cna_trcmod.h>
22#include <cs/bfa_debug.h>
23#include <bfi/bfi_ioc.h>
24#include <bfi/bfi_cbreg.h>
25#include <log/bfa_log_hal.h>
26#include <defs/bfa_defs_pci.h>
27 21
28BFA_TRC_FILE(CNA, IOC_CB); 22BFA_TRC_FILE(CNA, IOC_CB);
29 23
30/* 24/*
31 * forward declarations 25 * forward declarations
32 */ 26 */
33static bfa_status_t bfa_ioc_cb_pll_init(struct bfa_ioc_s *ioc);
34static bfa_boolean_t bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc); 27static bfa_boolean_t bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc);
35static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc); 28static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc);
36static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc); 29static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc);
@@ -95,6 +88,7 @@ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
95 * Host <-> LPU mailbox command/status registers 88 * Host <-> LPU mailbox command/status registers
96 */ 89 */
97static struct { u32 hfn, lpu; } iocreg_mbcmd[] = { 90static struct { u32 hfn, lpu; } iocreg_mbcmd[] = {
91
98 { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT }, 92 { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
99 { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT } 93 { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT }
100}; 94};
@@ -154,6 +148,7 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
154/** 148/**
155 * Initialize IOC to port mapping. 149 * Initialize IOC to port mapping.
156 */ 150 */
151
157static void 152static void
158bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc) 153bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
159{ 154{
@@ -161,6 +156,7 @@ bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
161 * For crossbow, port id is same as pci function. 156 * For crossbow, port id is same as pci function.
162 */ 157 */
163 ioc->port_id = bfa_ioc_pcifn(ioc); 158 ioc->port_id = bfa_ioc_pcifn(ioc);
159
164 bfa_trc(ioc, ioc->port_id); 160 bfa_trc(ioc, ioc->port_id);
165} 161}
166 162
@@ -172,87 +168,69 @@ bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
172{ 168{
173} 169}
174 170
175static bfa_status_t 171/**
176bfa_ioc_cb_pll_init(struct bfa_ioc_s *ioc) 172 * Cleanup hw semaphore and usecnt registers
173 */
174static void
175bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc)
177{ 176{
178 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
179 u32 pll_sclk, pll_fclk;
180 177
181 /* 178 /*
182 * Hold semaphore so that nobody can access the chip during init. 179 * Read the hw sem reg to make sure that it is locked
180 * before we clear it. If it is not locked, writing 1
181 * will lock it instead of clearing it.
183 */ 182 */
184 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); 183 bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
184 bfa_ioc_hw_sem_release(ioc);
185}
186
187
188
189bfa_status_t
190bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode)
191{
192 u32 pll_sclk, pll_fclk;
185 193
186 pll_sclk = __APP_PLL_212_ENABLE | __APP_PLL_212_LRESETN | 194 pll_sclk = __APP_PLL_212_ENABLE | __APP_PLL_212_LRESETN |
187 __APP_PLL_212_P0_1(3U) | 195 __APP_PLL_212_P0_1(3U) |
188 __APP_PLL_212_JITLMT0_1(3U) | 196 __APP_PLL_212_JITLMT0_1(3U) |
189 __APP_PLL_212_CNTLMT0_1(3U); 197 __APP_PLL_212_CNTLMT0_1(3U);
190 pll_fclk = __APP_PLL_400_ENABLE | __APP_PLL_400_LRESETN | 198 pll_fclk = __APP_PLL_400_ENABLE | __APP_PLL_400_LRESETN |
191 __APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) | 199 __APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) |
192 __APP_PLL_400_JITLMT0_1(3U) | 200 __APP_PLL_400_JITLMT0_1(3U) |
193 __APP_PLL_400_CNTLMT0_1(3U); 201 __APP_PLL_400_CNTLMT0_1(3U);
194
195 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT); 202 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
196 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT); 203 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
197
198 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); 204 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
199 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); 205 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
200 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); 206 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
201 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); 207 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
202 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); 208 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
203 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); 209 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
204 210 bfa_reg_write(rb + APP_PLL_212_CTL_REG,
205 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, 211 __APP_PLL_212_LOGIC_SOFT_RESET);
206 __APP_PLL_212_LOGIC_SOFT_RESET); 212 bfa_reg_write(rb + APP_PLL_212_CTL_REG,
207 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, 213 __APP_PLL_212_BYPASS |
208 __APP_PLL_212_BYPASS | 214 __APP_PLL_212_LOGIC_SOFT_RESET);
209 __APP_PLL_212_LOGIC_SOFT_RESET); 215 bfa_reg_write(rb + APP_PLL_400_CTL_REG,
210 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, 216 __APP_PLL_400_LOGIC_SOFT_RESET);
211 __APP_PLL_400_LOGIC_SOFT_RESET); 217 bfa_reg_write(rb + APP_PLL_400_CTL_REG,
212 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, 218 __APP_PLL_400_BYPASS |
213 __APP_PLL_400_BYPASS | 219 __APP_PLL_400_LOGIC_SOFT_RESET);
214 __APP_PLL_400_LOGIC_SOFT_RESET);
215 bfa_os_udelay(2); 220 bfa_os_udelay(2);
216 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, 221 bfa_reg_write(rb + APP_PLL_212_CTL_REG,
217 __APP_PLL_212_LOGIC_SOFT_RESET); 222 __APP_PLL_212_LOGIC_SOFT_RESET);
218 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, 223 bfa_reg_write(rb + APP_PLL_400_CTL_REG,
219 __APP_PLL_400_LOGIC_SOFT_RESET); 224 __APP_PLL_400_LOGIC_SOFT_RESET);
220 225 bfa_reg_write(rb + APP_PLL_212_CTL_REG,
221 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, 226 pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET);
222 pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET); 227 bfa_reg_write(rb + APP_PLL_400_CTL_REG,
223 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, 228 pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET);
224 pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET);
225
226 /**
227 * Wait for PLLs to lock.
228 */
229 bfa_os_udelay(2000); 229 bfa_os_udelay(2000);
230 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); 230 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
231 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); 231 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
232 232 bfa_reg_write((rb + APP_PLL_212_CTL_REG), pll_sclk);
233 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk); 233 bfa_reg_write((rb + APP_PLL_400_CTL_REG), pll_fclk);
234 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk);
235
236 /*
237 * release semaphore.
238 */
239 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
240 234
241 return BFA_STATUS_OK; 235 return BFA_STATUS_OK;
242} 236}
243
244/**
245 * Cleanup hw semaphore and usecnt registers
246 */
247static void
248bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc)
249{
250
251 /*
252 * Read the hw sem reg to make sure that it is locked
253 * before we clear it. If it is not locked, writing 1
254 * will lock it instead of clearing it.
255 */
256 bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
257 bfa_ioc_hw_sem_release(ioc);
258}
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index 68f027da001e..f21b82c5f64c 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -15,22 +15,15 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#include <bfa.h> 18#include "bfa_ioc.h"
19#include <bfa_ioc.h> 19#include "bfi_ctreg.h"
20#include <bfa_fwimg_priv.h> 20#include "bfa_defs.h"
21#include <cna/bfa_cna_trcmod.h>
22#include <cs/bfa_debug.h>
23#include <bfi/bfi_ioc.h>
24#include <bfi/bfi_ctreg.h>
25#include <log/bfa_log_hal.h>
26#include <defs/bfa_defs_pci.h>
27 21
28BFA_TRC_FILE(CNA, IOC_CT); 22BFA_TRC_FILE(CNA, IOC_CT);
29 23
30/* 24/*
31 * forward declarations 25 * forward declarations
32 */ 26 */
33static bfa_status_t bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc);
34static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc); 27static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
35static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc); 28static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
36static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc); 29static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc);
@@ -78,7 +71,8 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
78 /** 71 /**
79 * If bios boot (flash based) -- do not increment usage count 72 * If bios boot (flash based) -- do not increment usage count
80 */ 73 */
81 if (bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < BFA_IOC_FWIMG_MINSZ) 74 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
75 BFA_IOC_FWIMG_MINSZ)
82 return BFA_TRUE; 76 return BFA_TRUE;
83 77
84 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 78 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
@@ -136,7 +130,8 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
136 /** 130 /**
137 * If bios boot (flash based) -- do not decrement usage count 131 * If bios boot (flash based) -- do not decrement usage count
138 */ 132 */
139 if (bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < BFA_IOC_FWIMG_MINSZ) 133 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
134 BFA_IOC_FWIMG_MINSZ)
140 return; 135 return;
141 136
142 /** 137 /**
@@ -308,16 +303,47 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
308 bfa_reg_write(rb + FNC_PERS_REG, r32); 303 bfa_reg_write(rb + FNC_PERS_REG, r32);
309} 304}
310 305
311static bfa_status_t 306/**
312bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc) 307 * Cleanup hw semaphore and usecnt registers
308 */
309static void
310bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
313{ 311{
314 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; 312
315 u32 pll_sclk, pll_fclk, r32; 313 if (ioc->cna) {
314 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
315 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 0);
316 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
317 }
316 318
317 /* 319 /*
318 * Hold semaphore so that nobody can access the chip during init. 320 * Read the hw sem reg to make sure that it is locked
321 * before we clear it. If it is not locked, writing 1
322 * will lock it instead of clearing it.
319 */ 323 */
320 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); 324 bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
325 bfa_ioc_hw_sem_release(ioc);
326}
327
328
329
330/*
331 * Check the firmware state to know if pll_init has been completed already
332 */
333bfa_boolean_t
334bfa_ioc_ct_pll_init_complete(bfa_os_addr_t rb)
335{
336 if ((bfa_reg_read(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) ||
337 (bfa_reg_read(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP))
338 return BFA_TRUE;
339
340 return BFA_FALSE;
341}
342
343bfa_status_t
344bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode)
345{
346 u32 pll_sclk, pll_fclk, r32;
321 347
322 pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST | 348 pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
323 __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) | 349 __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
@@ -327,70 +353,50 @@ bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc)
327 __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) | 353 __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
328 __APP_PLL_425_JITLMT0_1(3U) | 354 __APP_PLL_425_JITLMT0_1(3U) |
329 __APP_PLL_425_CNTLMT0_1(1U); 355 __APP_PLL_425_CNTLMT0_1(1U);
330 356 if (fcmode) {
331 /**
332 * For catapult, choose operational mode FC/FCoE
333 */
334 if (ioc->fcmode) {
335 bfa_reg_write((rb + OP_MODE), 0); 357 bfa_reg_write((rb + OP_MODE), 0);
336 bfa_reg_write((rb + ETH_MAC_SER_REG), 358 bfa_reg_write((rb + ETH_MAC_SER_REG),
337 __APP_EMS_CMLCKSEL | 359 __APP_EMS_CMLCKSEL |
338 __APP_EMS_REFCKBUFEN2 | 360 __APP_EMS_REFCKBUFEN2 |
339 __APP_EMS_CHANNEL_SEL); 361 __APP_EMS_CHANNEL_SEL);
340 } else { 362 } else {
341 ioc->pllinit = BFA_TRUE;
342 bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE); 363 bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE);
343 bfa_reg_write((rb + ETH_MAC_SER_REG), 364 bfa_reg_write((rb + ETH_MAC_SER_REG),
344 __APP_EMS_REFCKBUFEN1); 365 __APP_EMS_REFCKBUFEN1);
345 } 366 }
346
347 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT); 367 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
348 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT); 368 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
349
350 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); 369 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
351 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); 370 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
352 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); 371 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
353 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); 372 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
354 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); 373 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
355 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); 374 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
356 375 bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
357 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk |
358 __APP_PLL_312_LOGIC_SOFT_RESET); 376 __APP_PLL_312_LOGIC_SOFT_RESET);
359 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk | 377 bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
360 __APP_PLL_425_LOGIC_SOFT_RESET); 378 __APP_PLL_425_LOGIC_SOFT_RESET);
361 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk | 379 bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
362 __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE); 380 __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE);
363 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk | 381 bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
364 __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE); 382 __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE);
365
366 /**
367 * Wait for PLLs to lock.
368 */
369 bfa_reg_read(rb + HOSTFN0_INT_MSK); 383 bfa_reg_read(rb + HOSTFN0_INT_MSK);
370 bfa_os_udelay(2000); 384 bfa_os_udelay(2000);
371 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); 385 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
372 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); 386 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
373 387 bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
374 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk |
375 __APP_PLL_312_ENABLE); 388 __APP_PLL_312_ENABLE);
376 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk | 389 bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
377 __APP_PLL_425_ENABLE); 390 __APP_PLL_425_ENABLE);
378 391 if (!fcmode) {
379 /**
380 * PSS memory reset is asserted at power-on-reset. Need to clear
381 * this before running EDRAM BISTR
382 */
383 if (ioc->cna) {
384 bfa_reg_write((rb + PMM_1T_RESET_REG_P0), __PMM_1T_RESET_P); 392 bfa_reg_write((rb + PMM_1T_RESET_REG_P0), __PMM_1T_RESET_P);
385 bfa_reg_write((rb + PMM_1T_RESET_REG_P1), __PMM_1T_RESET_P); 393 bfa_reg_write((rb + PMM_1T_RESET_REG_P1), __PMM_1T_RESET_P);
386 } 394 }
387
388 r32 = bfa_reg_read((rb + PSS_CTL_REG)); 395 r32 = bfa_reg_read((rb + PSS_CTL_REG));
389 r32 &= ~__PSS_LMEM_RESET; 396 r32 &= ~__PSS_LMEM_RESET;
390 bfa_reg_write((rb + PSS_CTL_REG), r32); 397 bfa_reg_write((rb + PSS_CTL_REG), r32);
391 bfa_os_udelay(1000); 398 bfa_os_udelay(1000);
392 399 if (!fcmode) {
393 if (ioc->cna) {
394 bfa_reg_write((rb + PMM_1T_RESET_REG_P0), 0); 400 bfa_reg_write((rb + PMM_1T_RESET_REG_P0), 0);
395 bfa_reg_write((rb + PMM_1T_RESET_REG_P1), 0); 401 bfa_reg_write((rb + PMM_1T_RESET_REG_P1), 0);
396 } 402 }
@@ -398,39 +404,6 @@ bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc)
398 bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START); 404 bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START);
399 bfa_os_udelay(1000); 405 bfa_os_udelay(1000);
400 r32 = bfa_reg_read((rb + MBIST_STAT_REG)); 406 r32 = bfa_reg_read((rb + MBIST_STAT_REG));
401 bfa_trc(ioc, r32);
402
403 /**
404 * Clear BISTR
405 */
406 bfa_reg_write((rb + MBIST_CTL_REG), 0); 407 bfa_reg_write((rb + MBIST_CTL_REG), 0);
407
408 /*
409 * release semaphore.
410 */
411 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
412
413 return BFA_STATUS_OK; 408 return BFA_STATUS_OK;
414} 409}
415
416/**
417 * Cleanup hw semaphore and usecnt registers
418 */
419static void
420bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
421{
422
423 if (ioc->cna) {
424 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
425 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 0);
426 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
427 }
428
429 /*
430 * Read the hw sem reg to make sure that it is locked
431 * before we clear it. If it is not locked, writing 1
432 * will lock it instead of clearing it.
433 */
434 bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
435 bfa_ioc_hw_sem_release(ioc);
436}
diff --git a/drivers/scsi/bfa/bfa_iocfc.c b/drivers/scsi/bfa/bfa_iocfc.c
deleted file mode 100644
index 90820be99864..000000000000
--- a/drivers/scsi/bfa/bfa_iocfc.c
+++ /dev/null
@@ -1,927 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <cs/bfa_debug.h>
19#include <bfa_priv.h>
20#include <log/bfa_log_hal.h>
21#include <bfi/bfi_boot.h>
22#include <bfi/bfi_cbreg.h>
23#include <aen/bfa_aen_ioc.h>
24#include <defs/bfa_defs_iocfc.h>
25#include <defs/bfa_defs_pci.h>
26#include "bfa_callback_priv.h"
27#include "bfad_drv.h"
28
29BFA_TRC_FILE(HAL, IOCFC);
30
31/**
32 * IOC local definitions
33 */
34#define BFA_IOCFC_TOV 5000 /* msecs */
35
36enum {
37 BFA_IOCFC_ACT_NONE = 0,
38 BFA_IOCFC_ACT_INIT = 1,
39 BFA_IOCFC_ACT_STOP = 2,
40 BFA_IOCFC_ACT_DISABLE = 3,
41};
42
43/*
44 * forward declarations
45 */
46static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
47static void bfa_iocfc_disable_cbfn(void *bfa_arg);
48static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
49static void bfa_iocfc_reset_cbfn(void *bfa_arg);
50static void bfa_iocfc_stats_clear(void *bfa_arg);
51static void bfa_iocfc_stats_swap(struct bfa_fw_stats_s *d,
52 struct bfa_fw_stats_s *s);
53static void bfa_iocfc_stats_clr_cb(void *bfa_arg, bfa_boolean_t complete);
54static void bfa_iocfc_stats_clr_timeout(void *bfa_arg);
55static void bfa_iocfc_stats_cb(void *bfa_arg, bfa_boolean_t complete);
56static void bfa_iocfc_stats_timeout(void *bfa_arg);
57
58static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
59
60/**
61 * bfa_ioc_pvt BFA IOC private functions
62 */
63
64static void
65bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
66{
67 int i, per_reqq_sz, per_rspq_sz;
68
69 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
70 BFA_DMA_ALIGN_SZ);
71 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
72 BFA_DMA_ALIGN_SZ);
73
74 /*
75 * Calculate CQ size
76 */
77 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
78 *dm_len = *dm_len + per_reqq_sz;
79 *dm_len = *dm_len + per_rspq_sz;
80 }
81
82 /*
83 * Calculate Shadow CI/PI size
84 */
85 for (i = 0; i < cfg->fwcfg.num_cqs; i++)
86 *dm_len += (2 * BFA_CACHELINE_SZ);
87}
88
89static void
90bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
91{
92 *dm_len +=
93 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
94 *dm_len +=
95 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
96 BFA_CACHELINE_SZ);
97 *dm_len += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ);
98}
99
100/**
101 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
102 */
103static void
104bfa_iocfc_send_cfg(void *bfa_arg)
105{
106 struct bfa_s *bfa = bfa_arg;
107 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
108 struct bfi_iocfc_cfg_req_s cfg_req;
109 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
110 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
111 int i;
112
113 bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS);
114 bfa_trc(bfa, cfg->fwcfg.num_cqs);
115
116 bfa_iocfc_reset_queues(bfa);
117
118 /**
119 * initialize IOC configuration info
120 */
121 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
122 cfg_info->num_cqs = cfg->fwcfg.num_cqs;
123
124 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
125 bfa_dma_be_addr_set(cfg_info->stats_addr, iocfc->stats_pa);
126
127 /**
128 * dma map REQ and RSP circular queues and shadow pointers
129 */
130 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
131 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
132 iocfc->req_cq_ba[i].pa);
133 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
134 iocfc->req_cq_shadow_ci[i].pa);
135 cfg_info->req_cq_elems[i] =
136 bfa_os_htons(cfg->drvcfg.num_reqq_elems);
137
138 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
139 iocfc->rsp_cq_ba[i].pa);
140 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
141 iocfc->rsp_cq_shadow_pi[i].pa);
142 cfg_info->rsp_cq_elems[i] =
143 bfa_os_htons(cfg->drvcfg.num_rspq_elems);
144 }
145
146 /**
147 * Enable interrupt coalescing if it is driver init path
148 * and not ioc disable/enable path.
149 */
150 if (!iocfc->cfgdone)
151 cfg_info->intr_attr.coalesce = BFA_TRUE;
152
153 iocfc->cfgdone = BFA_FALSE;
154
155 /**
156 * dma map IOC configuration itself
157 */
158 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
159 bfa_lpuid(bfa));
160 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
161
162 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
163 sizeof(struct bfi_iocfc_cfg_req_s));
164}
165
166static void
167bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
168 struct bfa_pcidev_s *pcidev)
169{
170 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
171
172 bfa->bfad = bfad;
173 iocfc->bfa = bfa;
174 iocfc->action = BFA_IOCFC_ACT_NONE;
175
176 bfa_os_assign(iocfc->cfg, *cfg);
177
178 /**
179 * Initialize chip specific handlers.
180 */
181 if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) {
182 iocfc->hwif.hw_reginit = bfa_hwct_reginit;
183 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
184 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
185 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
186 iocfc->hwif.hw_msix_install = bfa_hwct_msix_install;
187 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
188 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
189 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
190 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
191 } else {
192 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
193 iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
194 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
195 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
196 iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install;
197 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
198 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
199 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
200 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
201 }
202
203 iocfc->hwif.hw_reginit(bfa);
204 bfa->msix.nvecs = 0;
205}
206
207static void
208bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
209 struct bfa_meminfo_s *meminfo)
210{
211 u8 *dm_kva;
212 u64 dm_pa;
213 int i, per_reqq_sz, per_rspq_sz;
214 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
215 int dbgsz;
216
217 dm_kva = bfa_meminfo_dma_virt(meminfo);
218 dm_pa = bfa_meminfo_dma_phys(meminfo);
219
220 /*
221 * First allocate dma memory for IOC.
222 */
223 bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
224 dm_kva += bfa_ioc_meminfo();
225 dm_pa += bfa_ioc_meminfo();
226
227 /*
228 * Claim DMA-able memory for the request/response queues and for shadow
229 * ci/pi registers
230 */
231 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
232 BFA_DMA_ALIGN_SZ);
233 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
234 BFA_DMA_ALIGN_SZ);
235
236 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
237 iocfc->req_cq_ba[i].kva = dm_kva;
238 iocfc->req_cq_ba[i].pa = dm_pa;
239 bfa_os_memset(dm_kva, 0, per_reqq_sz);
240 dm_kva += per_reqq_sz;
241 dm_pa += per_reqq_sz;
242
243 iocfc->rsp_cq_ba[i].kva = dm_kva;
244 iocfc->rsp_cq_ba[i].pa = dm_pa;
245 bfa_os_memset(dm_kva, 0, per_rspq_sz);
246 dm_kva += per_rspq_sz;
247 dm_pa += per_rspq_sz;
248 }
249
250 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
251 iocfc->req_cq_shadow_ci[i].kva = dm_kva;
252 iocfc->req_cq_shadow_ci[i].pa = dm_pa;
253 dm_kva += BFA_CACHELINE_SZ;
254 dm_pa += BFA_CACHELINE_SZ;
255
256 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
257 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
258 dm_kva += BFA_CACHELINE_SZ;
259 dm_pa += BFA_CACHELINE_SZ;
260 }
261
262 /*
263 * Claim DMA-able memory for the config info page
264 */
265 bfa->iocfc.cfg_info.kva = dm_kva;
266 bfa->iocfc.cfg_info.pa = dm_pa;
267 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
268 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
269 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
270
271 /*
272 * Claim DMA-able memory for the config response
273 */
274 bfa->iocfc.cfgrsp_dma.kva = dm_kva;
275 bfa->iocfc.cfgrsp_dma.pa = dm_pa;
276 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
277
278 dm_kva +=
279 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
280 BFA_CACHELINE_SZ);
281 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
282 BFA_CACHELINE_SZ);
283
284 /*
285 * Claim DMA-able memory for iocfc stats
286 */
287 bfa->iocfc.stats_kva = dm_kva;
288 bfa->iocfc.stats_pa = dm_pa;
289 bfa->iocfc.fw_stats = (struct bfa_fw_stats_s *) dm_kva;
290 dm_kva += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ);
291 dm_pa += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ);
292
293 bfa_meminfo_dma_virt(meminfo) = dm_kva;
294 bfa_meminfo_dma_phys(meminfo) = dm_pa;
295
296 dbgsz = bfa_ioc_debug_trcsz(bfa_auto_recover);
297 if (dbgsz > 0) {
298 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
299 bfa_meminfo_kva(meminfo) += dbgsz;
300 }
301}
302
303/**
304 * Start BFA submodules.
305 */
306static void
307bfa_iocfc_start_submod(struct bfa_s *bfa)
308{
309 int i;
310
311 bfa->rme_process = BFA_TRUE;
312
313 for (i = 0; hal_mods[i]; i++)
314 hal_mods[i]->start(bfa);
315}
316
317/**
318 * Disable BFA submodules.
319 */
320static void
321bfa_iocfc_disable_submod(struct bfa_s *bfa)
322{
323 int i;
324
325 for (i = 0; hal_mods[i]; i++)
326 hal_mods[i]->iocdisable(bfa);
327}
328
329static void
330bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
331{
332 struct bfa_s *bfa = bfa_arg;
333
334 if (complete) {
335 if (bfa->iocfc.cfgdone)
336 bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
337 else
338 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
339 } else {
340 if (bfa->iocfc.cfgdone)
341 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
342 }
343}
344
345static void
346bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
347{
348 struct bfa_s *bfa = bfa_arg;
349 struct bfad_s *bfad = bfa->bfad;
350
351 if (compl)
352 complete(&bfad->comp);
353
354 else
355 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
356}
357
358static void
359bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
360{
361 struct bfa_s *bfa = bfa_arg;
362 struct bfad_s *bfad = bfa->bfad;
363
364 if (compl)
365 complete(&bfad->disable_comp);
366}
367
368/**
369 * Update BFA configuration from firmware configuration.
370 */
371static void
372bfa_iocfc_cfgrsp(struct bfa_s *bfa)
373{
374 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
375 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
376 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
377
378 fwcfg->num_cqs = fwcfg->num_cqs;
379 fwcfg->num_ioim_reqs = bfa_os_ntohs(fwcfg->num_ioim_reqs);
380 fwcfg->num_tskim_reqs = bfa_os_ntohs(fwcfg->num_tskim_reqs);
381 fwcfg->num_fcxp_reqs = bfa_os_ntohs(fwcfg->num_fcxp_reqs);
382 fwcfg->num_uf_bufs = bfa_os_ntohs(fwcfg->num_uf_bufs);
383 fwcfg->num_rports = bfa_os_ntohs(fwcfg->num_rports);
384
385 iocfc->cfgdone = BFA_TRUE;
386
387 /**
388 * Configuration is complete - initialize/start submodules
389 */
390 bfa_fcport_init(bfa);
391
392 if (iocfc->action == BFA_IOCFC_ACT_INIT)
393 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
394 else
395 bfa_iocfc_start_submod(bfa);
396}
397
398static void
399bfa_iocfc_stats_clear(void *bfa_arg)
400{
401 struct bfa_s *bfa = bfa_arg;
402 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
403 struct bfi_iocfc_stats_req_s stats_req;
404
405 bfa_timer_start(bfa, &iocfc->stats_timer,
406 bfa_iocfc_stats_clr_timeout, bfa,
407 BFA_IOCFC_TOV);
408
409 bfi_h2i_set(stats_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CLEAR_STATS_REQ,
410 bfa_lpuid(bfa));
411 bfa_ioc_mbox_send(&bfa->ioc, &stats_req,
412 sizeof(struct bfi_iocfc_stats_req_s));
413}
414
415static void
416bfa_iocfc_stats_swap(struct bfa_fw_stats_s *d, struct bfa_fw_stats_s *s)
417{
418 u32 *dip = (u32 *) d;
419 u32 *sip = (u32 *) s;
420 int i;
421
422 for (i = 0; i < (sizeof(struct bfa_fw_stats_s) / sizeof(u32)); i++)
423 dip[i] = bfa_os_ntohl(sip[i]);
424}
425
426static void
427bfa_iocfc_stats_clr_cb(void *bfa_arg, bfa_boolean_t complete)
428{
429 struct bfa_s *bfa = bfa_arg;
430 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
431
432 if (complete) {
433 bfa_ioc_clr_stats(&bfa->ioc);
434 iocfc->stats_cbfn(iocfc->stats_cbarg, iocfc->stats_status);
435 } else {
436 iocfc->stats_busy = BFA_FALSE;
437 iocfc->stats_status = BFA_STATUS_OK;
438 }
439}
440
441static void
442bfa_iocfc_stats_clr_timeout(void *bfa_arg)
443{
444 struct bfa_s *bfa = bfa_arg;
445 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
446
447 bfa_trc(bfa, 0);
448
449 iocfc->stats_status = BFA_STATUS_ETIMER;
450 bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_clr_cb, bfa);
451}
452
453static void
454bfa_iocfc_stats_cb(void *bfa_arg, bfa_boolean_t complete)
455{
456 struct bfa_s *bfa = bfa_arg;
457 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
458
459 if (complete) {
460 if (iocfc->stats_status == BFA_STATUS_OK) {
461 bfa_os_memset(iocfc->stats_ret, 0,
462 sizeof(*iocfc->stats_ret));
463 bfa_iocfc_stats_swap(&iocfc->stats_ret->fw_stats,
464 iocfc->fw_stats);
465 }
466 iocfc->stats_cbfn(iocfc->stats_cbarg, iocfc->stats_status);
467 } else {
468 iocfc->stats_busy = BFA_FALSE;
469 iocfc->stats_status = BFA_STATUS_OK;
470 }
471}
472
473static void
474bfa_iocfc_stats_timeout(void *bfa_arg)
475{
476 struct bfa_s *bfa = bfa_arg;
477 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
478
479 bfa_trc(bfa, 0);
480
481 iocfc->stats_status = BFA_STATUS_ETIMER;
482 bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_cb, bfa);
483}
484
485static void
486bfa_iocfc_stats_query(struct bfa_s *bfa)
487{
488 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
489 struct bfi_iocfc_stats_req_s stats_req;
490
491 bfa_timer_start(bfa, &iocfc->stats_timer,
492 bfa_iocfc_stats_timeout, bfa, BFA_IOCFC_TOV);
493
494 bfi_h2i_set(stats_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_GET_STATS_REQ,
495 bfa_lpuid(bfa));
496 bfa_ioc_mbox_send(&bfa->ioc, &stats_req,
497 sizeof(struct bfi_iocfc_stats_req_s));
498}
499
500void
501bfa_iocfc_reset_queues(struct bfa_s *bfa)
502{
503 int q;
504
505 for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
506 bfa_reqq_ci(bfa, q) = 0;
507 bfa_reqq_pi(bfa, q) = 0;
508 bfa_rspq_ci(bfa, q) = 0;
509 bfa_rspq_pi(bfa, q) = 0;
510 }
511}
512
513/**
514 * IOC enable request is complete
515 */
516static void
517bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
518{
519 struct bfa_s *bfa = bfa_arg;
520
521 if (status != BFA_STATUS_OK) {
522 bfa_isr_disable(bfa);
523 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
524 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
525 bfa_iocfc_init_cb, bfa);
526 return;
527 }
528
529 bfa_iocfc_send_cfg(bfa);
530}
531
532/**
533 * IOC disable request is complete
534 */
535static void
536bfa_iocfc_disable_cbfn(void *bfa_arg)
537{
538 struct bfa_s *bfa = bfa_arg;
539
540 bfa_isr_disable(bfa);
541 bfa_iocfc_disable_submod(bfa);
542
543 if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
544 bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
545 bfa);
546 else {
547 bfa_assert(bfa->iocfc.action == BFA_IOCFC_ACT_DISABLE);
548 bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
549 bfa);
550 }
551}
552
553/**
554 * Notify sub-modules of hardware failure.
555 */
556static void
557bfa_iocfc_hbfail_cbfn(void *bfa_arg)
558{
559 struct bfa_s *bfa = bfa_arg;
560
561 bfa->rme_process = BFA_FALSE;
562
563 bfa_isr_disable(bfa);
564 bfa_iocfc_disable_submod(bfa);
565
566 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
567 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
568 bfa);
569}
570
571/**
572 * Actions on chip-reset completion.
573 */
574static void
575bfa_iocfc_reset_cbfn(void *bfa_arg)
576{
577 struct bfa_s *bfa = bfa_arg;
578
579 bfa_iocfc_reset_queues(bfa);
580 bfa_isr_enable(bfa);
581}
582
583
584
585/**
586 * bfa_ioc_public
587 */
588
589/**
590 * Query IOC memory requirement information.
591 */
592void
593bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
594 u32 *dm_len)
595{
596 /* dma memory for IOC */
597 *dm_len += bfa_ioc_meminfo();
598
599 bfa_iocfc_fw_cfg_sz(cfg, dm_len);
600 bfa_iocfc_cqs_sz(cfg, dm_len);
601 *km_len += bfa_ioc_debug_trcsz(bfa_auto_recover);
602}
603
604/**
605 * Query IOC memory requirement information.
606 */
607void
608bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
609 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
610{
611 int i;
612
613 bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
614 bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
615 bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
616 bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
617
618 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod,
619 bfa->trcmod, bfa->aen, bfa->logm);
620
621 /**
622 * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
623 */
624 if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
625 bfa_ioc_set_fcmode(&bfa->ioc);
626
627 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
628 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
629
630 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
631 bfa_iocfc_mem_claim(bfa, cfg, meminfo);
632 bfa_timer_init(&bfa->timer_mod);
633
634 INIT_LIST_HEAD(&bfa->comp_q);
635 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
636 INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
637}
638
639/**
640 * Query IOC memory requirement information.
641 */
642void
643bfa_iocfc_detach(struct bfa_s *bfa)
644{
645 bfa_ioc_detach(&bfa->ioc);
646}
647
648/**
649 * Query IOC memory requirement information.
650 */
651void
652bfa_iocfc_init(struct bfa_s *bfa)
653{
654 bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
655 bfa_ioc_enable(&bfa->ioc);
656}
657
658/**
659 * IOC start called from bfa_start(). Called to start IOC operations
660 * at driver instantiation for this instance.
661 */
662void
663bfa_iocfc_start(struct bfa_s *bfa)
664{
665 if (bfa->iocfc.cfgdone)
666 bfa_iocfc_start_submod(bfa);
667}
668
669/**
670 * IOC stop called from bfa_stop(). Called only when driver is unloaded
671 * for this instance.
672 */
673void
674bfa_iocfc_stop(struct bfa_s *bfa)
675{
676 bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
677
678 bfa->rme_process = BFA_FALSE;
679 bfa_ioc_disable(&bfa->ioc);
680}
681
682void
683bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
684{
685 struct bfa_s *bfa = bfaarg;
686 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
687 union bfi_iocfc_i2h_msg_u *msg;
688
689 msg = (union bfi_iocfc_i2h_msg_u *) m;
690 bfa_trc(bfa, msg->mh.msg_id);
691
692 switch (msg->mh.msg_id) {
693 case BFI_IOCFC_I2H_CFG_REPLY:
694 iocfc->cfg_reply = &msg->cfg_reply;
695 bfa_iocfc_cfgrsp(bfa);
696 break;
697
698 case BFI_IOCFC_I2H_GET_STATS_RSP:
699 if (iocfc->stats_busy == BFA_FALSE
700 || iocfc->stats_status == BFA_STATUS_ETIMER)
701 break;
702
703 bfa_timer_stop(&iocfc->stats_timer);
704 iocfc->stats_status = BFA_STATUS_OK;
705 bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_cb,
706 bfa);
707 break;
708 case BFI_IOCFC_I2H_CLEAR_STATS_RSP:
709 /*
710 * check for timer pop before processing the rsp
711 */
712 if (iocfc->stats_busy == BFA_FALSE
713 || iocfc->stats_status == BFA_STATUS_ETIMER)
714 break;
715
716 bfa_timer_stop(&iocfc->stats_timer);
717 iocfc->stats_status = BFA_STATUS_OK;
718 bfa_cb_queue(bfa, &iocfc->stats_hcb_qe,
719 bfa_iocfc_stats_clr_cb, bfa);
720 break;
721 case BFI_IOCFC_I2H_UPDATEQ_RSP:
722 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
723 break;
724 default:
725 bfa_assert(0);
726 }
727}
728
729#ifndef BFA_BIOS_BUILD
730void
731bfa_adapter_get_attr(struct bfa_s *bfa, struct bfa_adapter_attr_s *ad_attr)
732{
733 bfa_ioc_get_adapter_attr(&bfa->ioc, ad_attr);
734}
735
736u64
737bfa_adapter_get_id(struct bfa_s *bfa)
738{
739 return bfa_ioc_get_adid(&bfa->ioc);
740}
741
742void
743bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
744{
745 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
746
747 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
748
749 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
750 bfa_os_ntohs(iocfc->cfginfo->intr_attr.delay) :
751 bfa_os_ntohs(iocfc->cfgrsp->intr_attr.delay);
752
753 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
754 bfa_os_ntohs(iocfc->cfginfo->intr_attr.latency) :
755 bfa_os_ntohs(iocfc->cfgrsp->intr_attr.latency);
756
757 attr->config = iocfc->cfg;
758
759}
760
761bfa_status_t
762bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
763{
764 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
765 struct bfi_iocfc_set_intr_req_s *m;
766
767 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
768 iocfc->cfginfo->intr_attr.delay = bfa_os_htons(attr->delay);
769 iocfc->cfginfo->intr_attr.latency = bfa_os_htons(attr->latency);
770
771 if (!bfa_iocfc_is_operational(bfa))
772 return BFA_STATUS_OK;
773
774 m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
775 if (!m)
776 return BFA_STATUS_DEVBUSY;
777
778 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
779 bfa_lpuid(bfa));
780 m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
781 m->delay = iocfc->cfginfo->intr_attr.delay;
782 m->latency = iocfc->cfginfo->intr_attr.latency;
783
784
785 bfa_trc(bfa, attr->delay);
786 bfa_trc(bfa, attr->latency);
787
788 bfa_reqq_produce(bfa, BFA_REQQ_IOC);
789 return BFA_STATUS_OK;
790}
791
792void
793bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
794{
795 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
796
797 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
798 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
799}
800
801bfa_status_t
802bfa_iocfc_get_stats(struct bfa_s *bfa, struct bfa_iocfc_stats_s *stats,
803 bfa_cb_ioc_t cbfn, void *cbarg)
804{
805 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
806
807 if (iocfc->stats_busy) {
808 bfa_trc(bfa, iocfc->stats_busy);
809 return BFA_STATUS_DEVBUSY;
810 }
811
812 if (!bfa_iocfc_is_operational(bfa)) {
813 bfa_trc(bfa, 0);
814 return BFA_STATUS_IOC_NON_OP;
815 }
816
817 iocfc->stats_busy = BFA_TRUE;
818 iocfc->stats_ret = stats;
819 iocfc->stats_cbfn = cbfn;
820 iocfc->stats_cbarg = cbarg;
821
822 bfa_iocfc_stats_query(bfa);
823
824 return BFA_STATUS_OK;
825}
826
827bfa_status_t
828bfa_iocfc_clear_stats(struct bfa_s *bfa, bfa_cb_ioc_t cbfn, void *cbarg)
829{
830 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
831
832 if (iocfc->stats_busy) {
833 bfa_trc(bfa, iocfc->stats_busy);
834 return BFA_STATUS_DEVBUSY;
835 }
836
837 if (!bfa_iocfc_is_operational(bfa)) {
838 bfa_trc(bfa, 0);
839 return BFA_STATUS_IOC_NON_OP;
840 }
841
842 iocfc->stats_busy = BFA_TRUE;
843 iocfc->stats_cbfn = cbfn;
844 iocfc->stats_cbarg = cbarg;
845
846 bfa_iocfc_stats_clear(bfa);
847 return BFA_STATUS_OK;
848}
849
850/**
851 * Enable IOC after it is disabled.
852 */
853void
854bfa_iocfc_enable(struct bfa_s *bfa)
855{
856 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
857 "IOC Enable");
858 bfa_ioc_enable(&bfa->ioc);
859}
860
861void
862bfa_iocfc_disable(struct bfa_s *bfa)
863{
864 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
865 "IOC Disable");
866 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
867
868 bfa->rme_process = BFA_FALSE;
869 bfa_ioc_disable(&bfa->ioc);
870}
871
872
873bfa_boolean_t
874bfa_iocfc_is_operational(struct bfa_s *bfa)
875{
876 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
877}
878
879/**
880 * Return boot target port wwns -- read from boot information in flash.
881 */
882void
883bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
884{
885 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
886 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
887 int i;
888
889 if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
890 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
891 *nwwns = cfgrsp->pbc_cfg.nbluns;
892 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
893 wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
894
895 return;
896 }
897
898 *nwwns = cfgrsp->bootwwns.nwwns;
899 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
900}
901
902void
903bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa, struct bfa_boot_pbc_s *pbcfg)
904{
905 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
906 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
907
908 pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
909 pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
910 pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
911 memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
912}
913
914int
915bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
916{
917 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
918 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
919
920 memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
921 return cfgrsp->pbc_cfg.nvports;
922}
923
924
925#endif
926
927
diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
deleted file mode 100644
index 74a6a048d1fd..000000000000
--- a/drivers/scsi/bfa/bfa_iocfc.h
+++ /dev/null
@@ -1,184 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_IOCFC_H__
19#define __BFA_IOCFC_H__
20
21#include <bfa_ioc.h>
22#include <bfa.h>
23#include <bfi/bfi_iocfc.h>
24#include <bfi/bfi_pbc.h>
25#include <bfa_callback_priv.h>
26
27#define BFA_REQQ_NELEMS_MIN (4)
28#define BFA_RSPQ_NELEMS_MIN (4)
29
30struct bfa_iocfc_regs_s {
31 bfa_os_addr_t intr_status;
32 bfa_os_addr_t intr_mask;
33 bfa_os_addr_t cpe_q_pi[BFI_IOC_MAX_CQS];
34 bfa_os_addr_t cpe_q_ci[BFI_IOC_MAX_CQS];
35 bfa_os_addr_t cpe_q_depth[BFI_IOC_MAX_CQS];
36 bfa_os_addr_t cpe_q_ctrl[BFI_IOC_MAX_CQS];
37 bfa_os_addr_t rme_q_ci[BFI_IOC_MAX_CQS];
38 bfa_os_addr_t rme_q_pi[BFI_IOC_MAX_CQS];
39 bfa_os_addr_t rme_q_depth[BFI_IOC_MAX_CQS];
40 bfa_os_addr_t rme_q_ctrl[BFI_IOC_MAX_CQS];
41};
42
43/**
44 * MSIX vector handlers
45 */
46#define BFA_MSIX_MAX_VECTORS 22
47typedef void (*bfa_msix_handler_t)(struct bfa_s *bfa, int vec);
48struct bfa_msix_s {
49 int nvecs;
50 bfa_msix_handler_t handler[BFA_MSIX_MAX_VECTORS];
51};
52
53/**
54 * Chip specific interfaces
55 */
56struct bfa_hwif_s {
57 void (*hw_reginit)(struct bfa_s *bfa);
58 void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq);
59 void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq);
60 void (*hw_msix_init)(struct bfa_s *bfa, int nvecs);
61 void (*hw_msix_install)(struct bfa_s *bfa);
62 void (*hw_msix_uninstall)(struct bfa_s *bfa);
63 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
64 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
65 u32 *nvecs, u32 *maxvec);
66 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
67 u32 *end);
68};
69typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
70
71struct bfa_iocfc_s {
72 struct bfa_s *bfa;
73 struct bfa_iocfc_cfg_s cfg;
74 int action;
75
76 u32 req_cq_pi[BFI_IOC_MAX_CQS];
77 u32 rsp_cq_ci[BFI_IOC_MAX_CQS];
78
79 struct bfa_cb_qe_s init_hcb_qe;
80 struct bfa_cb_qe_s stop_hcb_qe;
81 struct bfa_cb_qe_s dis_hcb_qe;
82 struct bfa_cb_qe_s stats_hcb_qe;
83 bfa_boolean_t cfgdone;
84
85 struct bfa_dma_s cfg_info;
86 struct bfi_iocfc_cfg_s *cfginfo;
87 struct bfa_dma_s cfgrsp_dma;
88 struct bfi_iocfc_cfgrsp_s *cfgrsp;
89 struct bfi_iocfc_cfg_reply_s *cfg_reply;
90
91 u8 *stats_kva;
92 u64 stats_pa;
93 struct bfa_fw_stats_s *fw_stats;
94 struct bfa_timer_s stats_timer; /* timer */
95 struct bfa_iocfc_stats_s *stats_ret; /* driver stats location */
96 bfa_status_t stats_status; /* stats/statsclr status */
97 bfa_boolean_t stats_busy; /* outstanding stats */
98 bfa_cb_ioc_t stats_cbfn; /* driver callback function */
99 void *stats_cbarg; /* user callback arg */
100
101 struct bfa_dma_s req_cq_ba[BFI_IOC_MAX_CQS];
102 struct bfa_dma_s req_cq_shadow_ci[BFI_IOC_MAX_CQS];
103 struct bfa_dma_s rsp_cq_ba[BFI_IOC_MAX_CQS];
104 struct bfa_dma_s rsp_cq_shadow_pi[BFI_IOC_MAX_CQS];
105 struct bfa_iocfc_regs_s bfa_regs; /* BFA device registers */
106 struct bfa_hwif_s hwif;
107
108 bfa_cb_iocfc_t updateq_cbfn; /* bios callback function */
109 void *updateq_cbarg; /* bios callback arg */
110 u32 intr_mask;
111};
112
113#define bfa_lpuid(__bfa) bfa_ioc_portid(&(__bfa)->ioc)
114#define bfa_msix_init(__bfa, __nvecs) \
115 ((__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs))
116#define bfa_msix_install(__bfa) \
117 ((__bfa)->iocfc.hwif.hw_msix_install(__bfa))
118#define bfa_msix_uninstall(__bfa) \
119 ((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa))
120#define bfa_isr_mode_set(__bfa, __msix) \
121 ((__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix))
122#define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) \
123 ((__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap, \
124 __nvecs, __maxvec))
125#define bfa_msix_get_rme_range(__bfa, __start, __end) \
126 ((__bfa)->iocfc.hwif.hw_msix_get_rme_range(__bfa, __start, __end))
127
128/*
129 * FC specific IOC functions.
130 */
131void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
132 u32 *dm_len);
133void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad,
134 struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
135 struct bfa_pcidev_s *pcidev);
136void bfa_iocfc_detach(struct bfa_s *bfa);
137void bfa_iocfc_init(struct bfa_s *bfa);
138void bfa_iocfc_start(struct bfa_s *bfa);
139void bfa_iocfc_stop(struct bfa_s *bfa);
140void bfa_iocfc_isr(void *bfa, struct bfi_mbmsg_s *msg);
141void bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa);
142bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa);
143void bfa_iocfc_reset_queues(struct bfa_s *bfa);
144void bfa_iocfc_updateq(struct bfa_s *bfa, u32 reqq_ba, u32 rspq_ba,
145 u32 reqq_sci, u32 rspq_spi,
146 bfa_cb_iocfc_t cbfn, void *cbarg);
147
148void bfa_msix_all(struct bfa_s *bfa, int vec);
149void bfa_msix_reqq(struct bfa_s *bfa, int vec);
150void bfa_msix_rspq(struct bfa_s *bfa, int vec);
151void bfa_msix_lpu_err(struct bfa_s *bfa, int vec);
152
153void bfa_hwcb_reginit(struct bfa_s *bfa);
154void bfa_hwcb_reqq_ack(struct bfa_s *bfa, int rspq);
155void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq);
156void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs);
157void bfa_hwcb_msix_install(struct bfa_s *bfa);
158void bfa_hwcb_msix_uninstall(struct bfa_s *bfa);
159void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
160void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap,
161 u32 *nvecs, u32 *maxvec);
162void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end);
163void bfa_hwct_reginit(struct bfa_s *bfa);
164void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
165void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq);
166void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs);
167void bfa_hwct_msix_install(struct bfa_s *bfa);
168void bfa_hwct_msix_uninstall(struct bfa_s *bfa);
169void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
170void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap,
171 u32 *nvecs, u32 *maxvec);
172void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end);
173
174void bfa_com_meminfo(bfa_boolean_t mincfg, u32 *dm_len);
175void bfa_com_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi,
176 bfa_boolean_t mincfg);
177void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns);
178void bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa,
179 struct bfa_boot_pbc_s *pbcfg);
180int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
181 struct bfi_pbc_vport_s *pbc_vport);
182
183#endif /* __BFA_IOCFC_H__ */
184
diff --git a/drivers/scsi/bfa/bfa_iocfc_q.c b/drivers/scsi/bfa/bfa_iocfc_q.c
deleted file mode 100644
index 500a17df40b2..000000000000
--- a/drivers/scsi/bfa/bfa_iocfc_q.c
+++ /dev/null
@@ -1,44 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include "bfa_intr_priv.h"
20
21BFA_TRC_FILE(HAL, IOCFC_Q);
22
23void
24bfa_iocfc_updateq(struct bfa_s *bfa, u32 reqq_ba, u32 rspq_ba,
25 u32 reqq_sci, u32 rspq_spi, bfa_cb_iocfc_t cbfn,
26 void *cbarg)
27{
28 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
29 struct bfi_iocfc_updateq_req_s updateq_req;
30
31 iocfc->updateq_cbfn = cbfn;
32 iocfc->updateq_cbarg = cbarg;
33
34 bfi_h2i_set(updateq_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_UPDATEQ_REQ,
35 bfa_lpuid(bfa));
36
37 updateq_req.reqq_ba = bfa_os_htonl(reqq_ba);
38 updateq_req.rspq_ba = bfa_os_htonl(rspq_ba);
39 updateq_req.reqq_sci = bfa_os_htonl(reqq_sci);
40 updateq_req.rspq_spi = bfa_os_htonl(rspq_spi);
41
42 bfa_ioc_mbox_send(&bfa->ioc, &updateq_req,
43 sizeof(struct bfi_iocfc_updateq_req_s));
44}
diff --git a/drivers/scsi/bfa/bfa_ioim.c b/drivers/scsi/bfa/bfa_ioim.c
deleted file mode 100644
index bdfdc19915f8..000000000000
--- a/drivers/scsi/bfa/bfa_ioim.c
+++ /dev/null
@@ -1,1364 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <cs/bfa_debug.h>
20#include <bfa_cb_ioim_macros.h>
21
22BFA_TRC_FILE(HAL, IOIM);
23
24/*
25 * forward declarations.
26 */
27static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
28static bfa_boolean_t bfa_ioim_sge_setup(struct bfa_ioim_s *ioim);
29static void bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim);
30static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
31static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
32static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
33static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
34static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
35static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
36static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
37
38/**
39 * bfa_ioim_sm
40 */
41
42/**
43 * IO state machine events
44 */
45enum bfa_ioim_event {
46 BFA_IOIM_SM_START = 1, /* io start request from host */
47 BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
48 BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
49 BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
50 BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
51 BFA_IOIM_SM_FREE = 6, /* io resource is freed */
52 BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
53 BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
54 BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
55 BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
56 BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
57 BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
58 BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
59 BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
60 BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
61 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
62 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
63 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
64};
65
66/*
67 * forward declaration of IO state machine
68 */
69static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
70 enum bfa_ioim_event event);
71static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
72 enum bfa_ioim_event event);
73static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
74 enum bfa_ioim_event event);
75static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
76 enum bfa_ioim_event event);
77static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
78 enum bfa_ioim_event event);
79static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
80 enum bfa_ioim_event event);
81static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
82 enum bfa_ioim_event event);
83static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
84 enum bfa_ioim_event event);
85static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
86 enum bfa_ioim_event event);
87static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
88 enum bfa_ioim_event event);
89static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
90 enum bfa_ioim_event event);
91
92/**
93 * IO is not started (unallocated).
94 */
95static void
96bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
97{
98 bfa_trc_fp(ioim->bfa, ioim->iotag);
99 bfa_trc_fp(ioim->bfa, event);
100
101 switch (event) {
102 case BFA_IOIM_SM_START:
103 if (!bfa_itnim_is_online(ioim->itnim)) {
104 if (!bfa_itnim_hold_io(ioim->itnim)) {
105 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
106 list_del(&ioim->qe);
107 list_add_tail(&ioim->qe,
108 &ioim->fcpim->ioim_comp_q);
109 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
110 __bfa_cb_ioim_pathtov, ioim);
111 } else {
112 list_del(&ioim->qe);
113 list_add_tail(&ioim->qe,
114 &ioim->itnim->pending_q);
115 }
116 break;
117 }
118
119 if (ioim->nsges > BFI_SGE_INLINE) {
120 if (!bfa_ioim_sge_setup(ioim)) {
121 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
122 return;
123 }
124 }
125
126 if (!bfa_ioim_send_ioreq(ioim)) {
127 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
128 break;
129 }
130
131 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
132 break;
133
134 case BFA_IOIM_SM_IOTOV:
135 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
136 list_del(&ioim->qe);
137 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
138 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
139 __bfa_cb_ioim_pathtov, ioim);
140 break;
141
142 case BFA_IOIM_SM_ABORT:
143 /**
144 * IO in pending queue can get abort requests. Complete abort
145 * requests immediately.
146 */
147 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
148 bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
149 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
150 ioim);
151 break;
152
153 default:
154 bfa_sm_fault(ioim->bfa, event);
155 }
156}
157
158/**
159 * IO is waiting for SG pages.
160 */
161static void
162bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
163{
164 bfa_trc(ioim->bfa, ioim->iotag);
165 bfa_trc(ioim->bfa, event);
166
167 switch (event) {
168 case BFA_IOIM_SM_SGALLOCED:
169 if (!bfa_ioim_send_ioreq(ioim)) {
170 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
171 break;
172 }
173 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
174 break;
175
176 case BFA_IOIM_SM_CLEANUP:
177 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
178 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
179 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
180 ioim);
181 bfa_ioim_notify_cleanup(ioim);
182 break;
183
184 case BFA_IOIM_SM_ABORT:
185 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
186 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
187 list_del(&ioim->qe);
188 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
189 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
190 ioim);
191 break;
192
193 case BFA_IOIM_SM_HWFAIL:
194 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
195 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
196 list_del(&ioim->qe);
197 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
198 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
199 ioim);
200 break;
201
202 default:
203 bfa_sm_fault(ioim->bfa, event);
204 }
205}
206
207/**
208 * IO is active.
209 */
210static void
211bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
212{
213 bfa_trc_fp(ioim->bfa, ioim->iotag);
214 bfa_trc_fp(ioim->bfa, event);
215
216 switch (event) {
217 case BFA_IOIM_SM_COMP_GOOD:
218 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
219 list_del(&ioim->qe);
220 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
221 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
222 __bfa_cb_ioim_good_comp, ioim);
223 break;
224
225 case BFA_IOIM_SM_COMP:
226 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
227 list_del(&ioim->qe);
228 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
229 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
230 ioim);
231 break;
232
233 case BFA_IOIM_SM_DONE:
234 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
235 list_del(&ioim->qe);
236 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
237 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
238 ioim);
239 break;
240
241 case BFA_IOIM_SM_ABORT:
242 ioim->iosp->abort_explicit = BFA_TRUE;
243 ioim->io_cbfn = __bfa_cb_ioim_abort;
244
245 if (bfa_ioim_send_abort(ioim))
246 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
247 else {
248 bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
249 bfa_reqq_wait(ioim->bfa, ioim->reqq,
250 &ioim->iosp->reqq_wait);
251 }
252 break;
253
254 case BFA_IOIM_SM_CLEANUP:
255 ioim->iosp->abort_explicit = BFA_FALSE;
256 ioim->io_cbfn = __bfa_cb_ioim_failed;
257
258 if (bfa_ioim_send_abort(ioim))
259 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
260 else {
261 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
262 bfa_reqq_wait(ioim->bfa, ioim->reqq,
263 &ioim->iosp->reqq_wait);
264 }
265 break;
266
267 case BFA_IOIM_SM_HWFAIL:
268 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
269 list_del(&ioim->qe);
270 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
271 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
272 ioim);
273 break;
274
275 default:
276 bfa_sm_fault(ioim->bfa, event);
277 }
278}
279
280/**
281 * IO is being aborted, waiting for completion from firmware.
282 */
283static void
284bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
285{
286 bfa_trc(ioim->bfa, ioim->iotag);
287 bfa_trc(ioim->bfa, event);
288
289 switch (event) {
290 case BFA_IOIM_SM_COMP_GOOD:
291 case BFA_IOIM_SM_COMP:
292 case BFA_IOIM_SM_DONE:
293 case BFA_IOIM_SM_FREE:
294 break;
295
296 case BFA_IOIM_SM_ABORT_DONE:
297 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
298 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
299 ioim);
300 break;
301
302 case BFA_IOIM_SM_ABORT_COMP:
303 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
304 list_del(&ioim->qe);
305 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
306 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
307 ioim);
308 break;
309
310 case BFA_IOIM_SM_COMP_UTAG:
311 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
312 list_del(&ioim->qe);
313 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
314 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
315 ioim);
316 break;
317
318 case BFA_IOIM_SM_CLEANUP:
319 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
320 ioim->iosp->abort_explicit = BFA_FALSE;
321
322 if (bfa_ioim_send_abort(ioim))
323 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
324 else {
325 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
326 bfa_reqq_wait(ioim->bfa, ioim->reqq,
327 &ioim->iosp->reqq_wait);
328 }
329 break;
330
331 case BFA_IOIM_SM_HWFAIL:
332 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
333 list_del(&ioim->qe);
334 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
335 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
336 ioim);
337 break;
338
339 default:
340 bfa_sm_fault(ioim->bfa, event);
341 }
342}
343
344/**
345 * IO is being cleaned up (implicit abort), waiting for completion from
346 * firmware.
347 */
348static void
349bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
350{
351 bfa_trc(ioim->bfa, ioim->iotag);
352 bfa_trc(ioim->bfa, event);
353
354 switch (event) {
355 case BFA_IOIM_SM_COMP_GOOD:
356 case BFA_IOIM_SM_COMP:
357 case BFA_IOIM_SM_DONE:
358 case BFA_IOIM_SM_FREE:
359 break;
360
361 case BFA_IOIM_SM_ABORT:
362 /**
363 * IO is already being aborted implicitly
364 */
365 ioim->io_cbfn = __bfa_cb_ioim_abort;
366 break;
367
368 case BFA_IOIM_SM_ABORT_DONE:
369 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
370 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
371 bfa_ioim_notify_cleanup(ioim);
372 break;
373
374 case BFA_IOIM_SM_ABORT_COMP:
375 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
376 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
377 bfa_ioim_notify_cleanup(ioim);
378 break;
379
380 case BFA_IOIM_SM_COMP_UTAG:
381 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
382 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
383 bfa_ioim_notify_cleanup(ioim);
384 break;
385
386 case BFA_IOIM_SM_HWFAIL:
387 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
388 list_del(&ioim->qe);
389 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
390 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
391 ioim);
392 break;
393
394 case BFA_IOIM_SM_CLEANUP:
395 /**
396 * IO can be in cleanup state already due to TM command. 2nd cleanup
397 * request comes from ITN offline event.
398 */
399 break;
400
401 default:
402 bfa_sm_fault(ioim->bfa, event);
403 }
404}
405
406/**
407 * IO is waiting for room in request CQ
408 */
409static void
410bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
411{
412 bfa_trc(ioim->bfa, ioim->iotag);
413 bfa_trc(ioim->bfa, event);
414
415 switch (event) {
416 case BFA_IOIM_SM_QRESUME:
417 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
418 bfa_ioim_send_ioreq(ioim);
419 break;
420
421 case BFA_IOIM_SM_ABORT:
422 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
423 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
424 list_del(&ioim->qe);
425 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
426 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
427 ioim);
428 break;
429
430 case BFA_IOIM_SM_CLEANUP:
431 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
432 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
433 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
434 ioim);
435 bfa_ioim_notify_cleanup(ioim);
436 break;
437
438 case BFA_IOIM_SM_HWFAIL:
439 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
440 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
441 list_del(&ioim->qe);
442 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
443 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
444 ioim);
445 break;
446
447 default:
448 bfa_sm_fault(ioim->bfa, event);
449 }
450}
451
452/**
453 * Active IO is being aborted, waiting for room in request CQ.
454 */
455static void
456bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
457{
458 bfa_trc(ioim->bfa, ioim->iotag);
459 bfa_trc(ioim->bfa, event);
460
461 switch (event) {
462 case BFA_IOIM_SM_QRESUME:
463 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
464 bfa_ioim_send_abort(ioim);
465 break;
466
467 case BFA_IOIM_SM_CLEANUP:
468 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
469 ioim->iosp->abort_explicit = BFA_FALSE;
470 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
471 break;
472
473 case BFA_IOIM_SM_COMP_GOOD:
474 case BFA_IOIM_SM_COMP:
475 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
476 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
477 list_del(&ioim->qe);
478 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
479 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
480 ioim);
481 break;
482
483 case BFA_IOIM_SM_DONE:
484 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
485 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
486 list_del(&ioim->qe);
487 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
488 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
489 ioim);
490 break;
491
492 case BFA_IOIM_SM_HWFAIL:
493 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
494 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
495 list_del(&ioim->qe);
496 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
497 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
498 ioim);
499 break;
500
501 default:
502 bfa_sm_fault(ioim->bfa, event);
503 }
504}
505
506/**
507 * Active IO is being cleaned up, waiting for room in request CQ.
508 */
509static void
510bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
511{
512 bfa_trc(ioim->bfa, ioim->iotag);
513 bfa_trc(ioim->bfa, event);
514
515 switch (event) {
516 case BFA_IOIM_SM_QRESUME:
517 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
518 bfa_ioim_send_abort(ioim);
519 break;
520
521 case BFA_IOIM_SM_ABORT:
522 /**
523 * IO is already being cleaned up implicitly
524 */
525 ioim->io_cbfn = __bfa_cb_ioim_abort;
526 break;
527
528 case BFA_IOIM_SM_COMP_GOOD:
529 case BFA_IOIM_SM_COMP:
530 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
531 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
532 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
533 bfa_ioim_notify_cleanup(ioim);
534 break;
535
536 case BFA_IOIM_SM_DONE:
537 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
538 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
539 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
540 bfa_ioim_notify_cleanup(ioim);
541 break;
542
543 case BFA_IOIM_SM_HWFAIL:
544 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
545 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
546 list_del(&ioim->qe);
547 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
548 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
549 ioim);
550 break;
551
552 default:
553 bfa_sm_fault(ioim->bfa, event);
554 }
555}
556
557/**
558 * IO bfa callback is pending.
559 */
560static void
561bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
562{
563 bfa_trc_fp(ioim->bfa, ioim->iotag);
564 bfa_trc_fp(ioim->bfa, event);
565
566 switch (event) {
567 case BFA_IOIM_SM_HCB:
568 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
569 bfa_ioim_free(ioim);
570 bfa_cb_ioim_resfree(ioim->bfa->bfad);
571 break;
572
573 case BFA_IOIM_SM_CLEANUP:
574 bfa_ioim_notify_cleanup(ioim);
575 break;
576
577 case BFA_IOIM_SM_HWFAIL:
578 break;
579
580 default:
581 bfa_sm_fault(ioim->bfa, event);
582 }
583}
584
585/**
586 * IO bfa callback is pending. IO resource cannot be freed.
587 */
588static void
589bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
590{
591 bfa_trc(ioim->bfa, ioim->iotag);
592 bfa_trc(ioim->bfa, event);
593
594 switch (event) {
595 case BFA_IOIM_SM_HCB:
596 bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
597 list_del(&ioim->qe);
598 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
599 break;
600
601 case BFA_IOIM_SM_FREE:
602 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
603 break;
604
605 case BFA_IOIM_SM_CLEANUP:
606 bfa_ioim_notify_cleanup(ioim);
607 break;
608
609 case BFA_IOIM_SM_HWFAIL:
610 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
611 break;
612
613 default:
614 bfa_sm_fault(ioim->bfa, event);
615 }
616}
617
618/**
619 * IO is completed, waiting resource free from firmware.
620 */
621static void
622bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
623{
624 bfa_trc(ioim->bfa, ioim->iotag);
625 bfa_trc(ioim->bfa, event);
626
627 switch (event) {
628 case BFA_IOIM_SM_FREE:
629 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
630 bfa_ioim_free(ioim);
631 bfa_cb_ioim_resfree(ioim->bfa->bfad);
632 break;
633
634 case BFA_IOIM_SM_CLEANUP:
635 bfa_ioim_notify_cleanup(ioim);
636 break;
637
638 case BFA_IOIM_SM_HWFAIL:
639 break;
640
641 default:
642 bfa_sm_fault(ioim->bfa, event);
643 }
644}
645
646
647
648/**
649 * bfa_ioim_private
650 */
651
652static void
653__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
654{
655 struct bfa_ioim_s *ioim = cbarg;
656
657 if (!complete) {
658 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
659 return;
660 }
661
662 bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
663}
664
665static void
666__bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
667{
668 struct bfa_ioim_s *ioim = cbarg;
669 struct bfi_ioim_rsp_s *m;
670 u8 *snsinfo = NULL;
671 u8 sns_len = 0;
672 s32 residue = 0;
673
674 if (!complete) {
675 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
676 return;
677 }
678
679 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
680 if (m->io_status == BFI_IOIM_STS_OK) {
681 /**
682 * setup sense information, if present
683 */
684 if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION
685 && m->sns_len) {
686 sns_len = m->sns_len;
687 snsinfo = ioim->iosp->snsinfo;
688 }
689
690 /**
691 * setup residue value correctly for normal completions
692 */
693 if (m->resid_flags == FCP_RESID_UNDER)
694 residue = bfa_os_ntohl(m->residue);
695 if (m->resid_flags == FCP_RESID_OVER) {
696 residue = bfa_os_ntohl(m->residue);
697 residue = -residue;
698 }
699 }
700
701 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
702 m->scsi_status, sns_len, snsinfo, residue);
703}
704
705static void
706__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
707{
708 struct bfa_ioim_s *ioim = cbarg;
709
710 if (!complete) {
711 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
712 return;
713 }
714
715 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
716 0, 0, NULL, 0);
717}
718
719static void
720__bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
721{
722 struct bfa_ioim_s *ioim = cbarg;
723
724 if (!complete) {
725 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
726 return;
727 }
728
729 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
730 0, 0, NULL, 0);
731}
732
733static void
734__bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
735{
736 struct bfa_ioim_s *ioim = cbarg;
737
738 if (!complete) {
739 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
740 return;
741 }
742
743 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
744}
745
746static void
747bfa_ioim_sgpg_alloced(void *cbarg)
748{
749 struct bfa_ioim_s *ioim = cbarg;
750
751 ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
752 list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
753 bfa_ioim_sgpg_setup(ioim);
754 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
755}
756
757/**
758 * Send I/O request to firmware.
759 */
760static bfa_boolean_t
761bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
762{
763 struct bfa_itnim_s *itnim = ioim->itnim;
764 struct bfi_ioim_req_s *m;
765 static struct fcp_cmnd_s cmnd_z0 = { 0 };
766 struct bfi_sge_s *sge;
767 u32 pgdlen = 0;
768 u64 addr;
769 struct scatterlist *sg;
770 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
771
772 /**
773 * check for room in queue to send request now
774 */
775 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
776 if (!m) {
777 bfa_reqq_wait(ioim->bfa, ioim->reqq,
778 &ioim->iosp->reqq_wait);
779 return BFA_FALSE;
780 }
781
782 /**
783 * build i/o request message next
784 */
785 m->io_tag = bfa_os_htons(ioim->iotag);
786 m->rport_hdl = ioim->itnim->rport->fw_handle;
787 m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);
788
789 /**
790 * build inline IO SG element here
791 */
792 sge = &m->sges[0];
793 if (ioim->nsges) {
794 sg = (struct scatterlist *)scsi_sglist(cmnd);
795 addr = bfa_os_sgaddr(sg_dma_address(sg));
796 sge->sga = *(union bfi_addr_u *) &addr;
797 pgdlen = sg_dma_len(sg);
798 sge->sg_len = pgdlen;
799 sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
800 BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
801 bfa_sge_to_be(sge);
802 sge++;
803 }
804
805 if (ioim->nsges > BFI_SGE_INLINE) {
806 sge->sga = ioim->sgpg->sgpg_pa;
807 } else {
808 sge->sga.a32.addr_lo = 0;
809 sge->sga.a32.addr_hi = 0;
810 }
811 sge->sg_len = pgdlen;
812 sge->flags = BFI_SGE_PGDLEN;
813 bfa_sge_to_be(sge);
814
815 /**
816 * set up I/O command parameters
817 */
818 bfa_os_assign(m->cmnd, cmnd_z0);
819 m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio);
820 m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio);
821 bfa_os_assign(m->cmnd.cdb,
822 *(struct scsi_cdb_s *)bfa_cb_ioim_get_cdb(ioim->dio));
823 m->cmnd.fcp_dl = bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
824
825 /**
826 * set up I/O message header
827 */
828 switch (m->cmnd.iodir) {
829 case FCP_IODIR_READ:
830 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
831 bfa_stats(itnim, input_reqs);
832 break;
833 case FCP_IODIR_WRITE:
834 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
835 bfa_stats(itnim, output_reqs);
836 break;
837 case FCP_IODIR_RW:
838 bfa_stats(itnim, input_reqs);
839 bfa_stats(itnim, output_reqs);
840 default:
841 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
842 }
843 if (itnim->seq_rec ||
844 (bfa_cb_ioim_get_size(ioim->dio) & (sizeof(u32) - 1)))
845 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
846
847#ifdef IOIM_ADVANCED
848 m->cmnd.crn = bfa_cb_ioim_get_crn(ioim->dio);
849 m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio);
850 m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio);
851
852 /**
853 * Handle large CDB (>16 bytes).
854 */
855 m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) -
856 FCP_CMND_CDB_LEN) / sizeof(u32);
857 if (m->cmnd.addl_cdb_len) {
858 bfa_os_memcpy(&m->cmnd.cdb + 1, (struct scsi_cdb_s *)
859 bfa_cb_ioim_get_cdb(ioim->dio) + 1,
860 m->cmnd.addl_cdb_len * sizeof(u32));
861 fcp_cmnd_fcpdl(&m->cmnd) =
862 bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
863 }
864#endif
865
866 /**
867 * queue I/O message to firmware
868 */
869 bfa_reqq_produce(ioim->bfa, ioim->reqq);
870 return BFA_TRUE;
871}
872
873/**
874 * Setup any additional SG pages needed.Inline SG element is setup
875 * at queuing time.
876 */
877static bfa_boolean_t
878bfa_ioim_sge_setup(struct bfa_ioim_s *ioim)
879{
880 u16 nsgpgs;
881
882 bfa_assert(ioim->nsges > BFI_SGE_INLINE);
883
884 /**
885 * allocate SG pages needed
886 */
887 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
888 if (!nsgpgs)
889 return BFA_TRUE;
890
891 if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
892 != BFA_STATUS_OK) {
893 bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
894 return BFA_FALSE;
895 }
896
897 ioim->nsgpgs = nsgpgs;
898 bfa_ioim_sgpg_setup(ioim);
899
900 return BFA_TRUE;
901}
902
903static void
904bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
905{
906 int sgeid, nsges, i;
907 struct bfi_sge_s *sge;
908 struct bfa_sgpg_s *sgpg;
909 u32 pgcumsz;
910 u64 addr;
911 struct scatterlist *sg;
912 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
913
914 sgeid = BFI_SGE_INLINE;
915 ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q);
916
917 sg = scsi_sglist(cmnd);
918 sg = sg_next(sg);
919
920 do {
921 sge = sgpg->sgpg->sges;
922 nsges = ioim->nsges - sgeid;
923 if (nsges > BFI_SGPG_DATA_SGES)
924 nsges = BFI_SGPG_DATA_SGES;
925
926 pgcumsz = 0;
927 for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) {
928 addr = bfa_os_sgaddr(sg_dma_address(sg));
929 sge->sga = *(union bfi_addr_u *) &addr;
930 sge->sg_len = sg_dma_len(sg);
931 pgcumsz += sge->sg_len;
932
933 /**
934 * set flags
935 */
936 if (i < (nsges - 1))
937 sge->flags = BFI_SGE_DATA;
938 else if (sgeid < (ioim->nsges - 1))
939 sge->flags = BFI_SGE_DATA_CPL;
940 else
941 sge->flags = BFI_SGE_DATA_LAST;
942 }
943
944 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
945
946 /**
947 * set the link element of each page
948 */
949 if (sgeid == ioim->nsges) {
950 sge->flags = BFI_SGE_PGDLEN;
951 sge->sga.a32.addr_lo = 0;
952 sge->sga.a32.addr_hi = 0;
953 } else {
954 sge->flags = BFI_SGE_LINK;
955 sge->sga = sgpg->sgpg_pa;
956 }
957 sge->sg_len = pgcumsz;
958 } while (sgeid < ioim->nsges);
959}
960
961/**
962 * Send I/O abort request to firmware.
963 */
964static bfa_boolean_t
965bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
966{
967 struct bfi_ioim_abort_req_s *m;
968 enum bfi_ioim_h2i msgop;
969
970 /**
971 * check for room in queue to send request now
972 */
973 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
974 if (!m)
975 return BFA_FALSE;
976
977 /**
978 * build i/o request message next
979 */
980 if (ioim->iosp->abort_explicit)
981 msgop = BFI_IOIM_H2I_IOABORT_REQ;
982 else
983 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
984
985 bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
986 m->io_tag = bfa_os_htons(ioim->iotag);
987 m->abort_tag = ++ioim->abort_tag;
988
989 /**
990 * queue I/O message to firmware
991 */
992 bfa_reqq_produce(ioim->bfa, ioim->reqq);
993 return BFA_TRUE;
994}
995
996/**
997 * Call to resume any I/O requests waiting for room in request queue.
998 */
999static void
1000bfa_ioim_qresume(void *cbarg)
1001{
1002 struct bfa_ioim_s *ioim = cbarg;
1003
1004 bfa_fcpim_stats(ioim->fcpim, qresumes);
1005 bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
1006}
1007
1008
1009static void
1010bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
1011{
1012 /**
1013 * Move IO from itnim queue to fcpim global queue since itnim will be
1014 * freed.
1015 */
1016 list_del(&ioim->qe);
1017 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1018
1019 if (!ioim->iosp->tskim) {
1020 if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
1021 bfa_cb_dequeue(&ioim->hcb_qe);
1022 list_del(&ioim->qe);
1023 list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
1024 }
1025 bfa_itnim_iodone(ioim->itnim);
1026 } else
1027 bfa_tskim_iodone(ioim->iosp->tskim);
1028}
1029
1030/**
1031 * or after the link comes back.
1032 */
1033void
1034bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
1035{
1036 /**
1037 * If path tov timer expired, failback with PATHTOV status - these
1038 * IO requests are not normally retried by IO stack.
1039 *
1040 * Otherwise device cameback online and fail it with normal failed
1041 * status so that IO stack retries these failed IO requests.
1042 */
1043 if (iotov)
1044 ioim->io_cbfn = __bfa_cb_ioim_pathtov;
1045 else
1046 ioim->io_cbfn = __bfa_cb_ioim_failed;
1047
1048 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1049
1050 /**
1051 * Move IO to fcpim global queue since itnim will be
1052 * freed.
1053 */
1054 list_del(&ioim->qe);
1055 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1056}
1057
1058
1059
1060/**
1061 * bfa_ioim_friend
1062 */
1063
1064/**
1065 * Memory allocation and initialization.
1066 */
1067void
1068bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
1069{
1070 struct bfa_ioim_s *ioim;
1071 struct bfa_ioim_sp_s *iosp;
1072 u16 i;
1073 u8 *snsinfo;
1074 u32 snsbufsz;
1075
1076 /**
1077 * claim memory first
1078 */
1079 ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
1080 fcpim->ioim_arr = ioim;
1081 bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs);
1082
1083 iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
1084 fcpim->ioim_sp_arr = iosp;
1085 bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
1086
1087 /**
1088 * Claim DMA memory for per IO sense data.
1089 */
1090 snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
1091 fcpim->snsbase.pa = bfa_meminfo_dma_phys(minfo);
1092 bfa_meminfo_dma_phys(minfo) += snsbufsz;
1093
1094 fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo);
1095 bfa_meminfo_dma_virt(minfo) += snsbufsz;
1096 snsinfo = fcpim->snsbase.kva;
1097 bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
1098
1099 /**
1100 * Initialize ioim free queues
1101 */
1102 INIT_LIST_HEAD(&fcpim->ioim_free_q);
1103 INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
1104 INIT_LIST_HEAD(&fcpim->ioim_comp_q);
1105
1106 for (i = 0; i < fcpim->num_ioim_reqs;
1107 i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) {
1108 /*
1109 * initialize IOIM
1110 */
1111 bfa_os_memset(ioim, 0, sizeof(struct bfa_ioim_s));
1112 ioim->iotag = i;
1113 ioim->bfa = fcpim->bfa;
1114 ioim->fcpim = fcpim;
1115 ioim->iosp = iosp;
1116 iosp->snsinfo = snsinfo;
1117 INIT_LIST_HEAD(&ioim->sgpg_q);
1118 bfa_reqq_winit(&ioim->iosp->reqq_wait,
1119 bfa_ioim_qresume, ioim);
1120 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
1121 bfa_ioim_sgpg_alloced, ioim);
1122 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
1123
1124 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
1125 }
1126}
1127
1128/**
1129 * Driver detach time call.
1130 */
1131void
1132bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim)
1133{
1134}
1135
1136void
1137bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1138{
1139 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1140 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
1141 struct bfa_ioim_s *ioim;
1142 u16 iotag;
1143 enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
1144
1145 iotag = bfa_os_ntohs(rsp->io_tag);
1146
1147 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
1148 bfa_assert(ioim->iotag == iotag);
1149
1150 bfa_trc(ioim->bfa, ioim->iotag);
1151 bfa_trc(ioim->bfa, rsp->io_status);
1152 bfa_trc(ioim->bfa, rsp->reuse_io_tag);
1153
1154 if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
1155 bfa_os_assign(ioim->iosp->comp_rspmsg, *m);
1156
1157 switch (rsp->io_status) {
1158 case BFI_IOIM_STS_OK:
1159 bfa_fcpim_stats(fcpim, iocomp_ok);
1160 if (rsp->reuse_io_tag == 0)
1161 evt = BFA_IOIM_SM_DONE;
1162 else
1163 evt = BFA_IOIM_SM_COMP;
1164 break;
1165
1166 case BFI_IOIM_STS_TIMEDOUT:
1167 case BFI_IOIM_STS_ABORTED:
1168 rsp->io_status = BFI_IOIM_STS_ABORTED;
1169 bfa_fcpim_stats(fcpim, iocomp_aborted);
1170 if (rsp->reuse_io_tag == 0)
1171 evt = BFA_IOIM_SM_DONE;
1172 else
1173 evt = BFA_IOIM_SM_COMP;
1174 break;
1175
1176 case BFI_IOIM_STS_PROTO_ERR:
1177 bfa_fcpim_stats(fcpim, iocom_proto_err);
1178 bfa_assert(rsp->reuse_io_tag);
1179 evt = BFA_IOIM_SM_COMP;
1180 break;
1181
1182 case BFI_IOIM_STS_SQER_NEEDED:
1183 bfa_fcpim_stats(fcpim, iocom_sqer_needed);
1184 bfa_assert(rsp->reuse_io_tag == 0);
1185 evt = BFA_IOIM_SM_SQRETRY;
1186 break;
1187
1188 case BFI_IOIM_STS_RES_FREE:
1189 bfa_fcpim_stats(fcpim, iocom_res_free);
1190 evt = BFA_IOIM_SM_FREE;
1191 break;
1192
1193 case BFI_IOIM_STS_HOST_ABORTED:
1194 bfa_fcpim_stats(fcpim, iocom_hostabrts);
1195 if (rsp->abort_tag != ioim->abort_tag) {
1196 bfa_trc(ioim->bfa, rsp->abort_tag);
1197 bfa_trc(ioim->bfa, ioim->abort_tag);
1198 return;
1199 }
1200
1201 if (rsp->reuse_io_tag)
1202 evt = BFA_IOIM_SM_ABORT_COMP;
1203 else
1204 evt = BFA_IOIM_SM_ABORT_DONE;
1205 break;
1206
1207 case BFI_IOIM_STS_UTAG:
1208 bfa_fcpim_stats(fcpim, iocom_utags);
1209 evt = BFA_IOIM_SM_COMP_UTAG;
1210 break;
1211
1212 default:
1213 bfa_assert(0);
1214 }
1215
1216 bfa_sm_send_event(ioim, evt);
1217}
1218
1219void
1220bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1221{
1222 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1223 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
1224 struct bfa_ioim_s *ioim;
1225 u16 iotag;
1226
1227 iotag = bfa_os_ntohs(rsp->io_tag);
1228
1229 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
1230 bfa_assert(ioim->iotag == iotag);
1231
1232 bfa_trc_fp(ioim->bfa, ioim->iotag);
1233 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
1234}
1235
1236/**
1237 * Called by itnim to clean up IO while going offline.
1238 */
1239void
1240bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
1241{
1242 bfa_trc(ioim->bfa, ioim->iotag);
1243 bfa_fcpim_stats(ioim->fcpim, io_cleanups);
1244
1245 ioim->iosp->tskim = NULL;
1246 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
1247}
1248
1249void
1250bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
1251{
1252 bfa_trc(ioim->bfa, ioim->iotag);
1253 bfa_fcpim_stats(ioim->fcpim, io_tmaborts);
1254
1255 ioim->iosp->tskim = tskim;
1256 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
1257}
1258
1259/**
1260 * IOC failure handling.
1261 */
1262void
1263bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
1264{
1265 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
1266}
1267
1268/**
1269 * IO offline TOV popped. Fail the pending IO.
1270 */
1271void
1272bfa_ioim_tov(struct bfa_ioim_s *ioim)
1273{
1274 bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
1275}
1276
1277
1278
1279/**
1280 * bfa_ioim_api
1281 */
1282
1283/**
1284 * Allocate IOIM resource for initiator mode I/O request.
1285 */
1286struct bfa_ioim_s *
1287bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
1288 struct bfa_itnim_s *itnim, u16 nsges)
1289{
1290 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1291 struct bfa_ioim_s *ioim;
1292
1293 /**
1294 * alocate IOIM resource
1295 */
1296 bfa_q_deq(&fcpim->ioim_free_q, &ioim);
1297 if (!ioim) {
1298 bfa_fcpim_stats(fcpim, no_iotags);
1299 return NULL;
1300 }
1301
1302 ioim->dio = dio;
1303 ioim->itnim = itnim;
1304 ioim->nsges = nsges;
1305 ioim->nsgpgs = 0;
1306
1307 bfa_stats(fcpim, total_ios);
1308 bfa_stats(itnim, ios);
1309 fcpim->ios_active++;
1310
1311 list_add_tail(&ioim->qe, &itnim->io_q);
1312 bfa_trc_fp(ioim->bfa, ioim->iotag);
1313
1314 return ioim;
1315}
1316
1317void
1318bfa_ioim_free(struct bfa_ioim_s *ioim)
1319{
1320 struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
1321
1322 bfa_trc_fp(ioim->bfa, ioim->iotag);
1323 bfa_assert_fp(bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit));
1324
1325 bfa_assert_fp(list_empty(&ioim->sgpg_q)
1326 || (ioim->nsges > BFI_SGE_INLINE));
1327
1328 if (ioim->nsgpgs > 0)
1329 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
1330
1331 bfa_stats(ioim->itnim, io_comps);
1332 fcpim->ios_active--;
1333
1334 list_del(&ioim->qe);
1335 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
1336}
1337
1338void
1339bfa_ioim_start(struct bfa_ioim_s *ioim)
1340{
1341 bfa_trc_fp(ioim->bfa, ioim->iotag);
1342
1343 /**
1344 * Obtain the queue over which this request has to be issued
1345 */
1346 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
1347 bfa_cb_ioim_get_reqq(ioim->dio) :
1348 bfa_itnim_get_reqq(ioim);
1349
1350 bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
1351}
1352
1353/**
1354 * Driver I/O abort request.
1355 */
1356void
1357bfa_ioim_abort(struct bfa_ioim_s *ioim)
1358{
1359 bfa_trc(ioim->bfa, ioim->iotag);
1360 bfa_fcpim_stats(ioim->fcpim, io_aborts);
1361 bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
1362}
1363
1364
diff --git a/drivers/scsi/bfa/bfa_itnim.c b/drivers/scsi/bfa/bfa_itnim.c
deleted file mode 100644
index a914ff255135..000000000000
--- a/drivers/scsi/bfa/bfa_itnim.c
+++ /dev/null
@@ -1,1088 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfa_fcpim.h>
20#include "bfa_fcpim_priv.h"
21
22BFA_TRC_FILE(HAL, ITNIM);
23
24#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
25 ((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1)))
26
27#define bfa_fcpim_additn(__itnim) \
28 list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
29#define bfa_fcpim_delitn(__itnim) do { \
30 bfa_assert(bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \
31 list_del(&(__itnim)->qe); \
32 bfa_assert(list_empty(&(__itnim)->io_q)); \
33 bfa_assert(list_empty(&(__itnim)->io_cleanup_q)); \
34 bfa_assert(list_empty(&(__itnim)->pending_q)); \
35} while (0)
36
37#define bfa_itnim_online_cb(__itnim) do { \
38 if ((__itnim)->bfa->fcs) \
39 bfa_cb_itnim_online((__itnim)->ditn); \
40 else { \
41 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
42 __bfa_cb_itnim_online, (__itnim)); \
43 } \
44} while (0)
45
46#define bfa_itnim_offline_cb(__itnim) do { \
47 if ((__itnim)->bfa->fcs) \
48 bfa_cb_itnim_offline((__itnim)->ditn); \
49 else { \
50 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
51 __bfa_cb_itnim_offline, (__itnim)); \
52 } \
53} while (0)
54
55#define bfa_itnim_sler_cb(__itnim) do { \
56 if ((__itnim)->bfa->fcs) \
57 bfa_cb_itnim_sler((__itnim)->ditn); \
58 else { \
59 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
60 __bfa_cb_itnim_sler, (__itnim)); \
61 } \
62} while (0)
63
64/*
65 * forward declarations
66 */
67static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
68static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
69static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
70static void bfa_itnim_cleanp_comp(void *itnim_cbarg);
71static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
72static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
73static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
74static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
75static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
76static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
77static void bfa_itnim_iotov(void *itnim_arg);
78static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
79static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
80static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
81
82/**
83 * bfa_itnim_sm BFA itnim state machine
84 */
85
86
87enum bfa_itnim_event {
88 BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
89 BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
90 BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
91 BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
92 BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
93 BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
94 BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
95 BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
96 BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
97};
98
99static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
100 enum bfa_itnim_event event);
101static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
102 enum bfa_itnim_event event);
103static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
104 enum bfa_itnim_event event);
105static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
106 enum bfa_itnim_event event);
107static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
108 enum bfa_itnim_event event);
109static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
110 enum bfa_itnim_event event);
111static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
112 enum bfa_itnim_event event);
113static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
114 enum bfa_itnim_event event);
115static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
116 enum bfa_itnim_event event);
117static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
118 enum bfa_itnim_event event);
119static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
120 enum bfa_itnim_event event);
121static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
122 enum bfa_itnim_event event);
123static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
124 enum bfa_itnim_event event);
125static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
126 enum bfa_itnim_event event);
127static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
128 enum bfa_itnim_event event);
129
130/**
131 * Beginning/unallocated state - no events expected.
132 */
133static void
134bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
135{
136 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
137 bfa_trc(itnim->bfa, event);
138
139 switch (event) {
140 case BFA_ITNIM_SM_CREATE:
141 bfa_sm_set_state(itnim, bfa_itnim_sm_created);
142 itnim->is_online = BFA_FALSE;
143 bfa_fcpim_additn(itnim);
144 break;
145
146 default:
147 bfa_sm_fault(itnim->bfa, event);
148 }
149}
150
151/**
152 * Beginning state, only online event expected.
153 */
154static void
155bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
156{
157 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
158 bfa_trc(itnim->bfa, event);
159
160 switch (event) {
161 case BFA_ITNIM_SM_ONLINE:
162 if (bfa_itnim_send_fwcreate(itnim))
163 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
164 else
165 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
166 break;
167
168 case BFA_ITNIM_SM_DELETE:
169 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
170 bfa_fcpim_delitn(itnim);
171 break;
172
173 case BFA_ITNIM_SM_HWFAIL:
174 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
175 break;
176
177 default:
178 bfa_sm_fault(itnim->bfa, event);
179 }
180}
181
182/**
183 * Waiting for itnim create response from firmware.
184 */
185static void
186bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
187{
188 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
189 bfa_trc(itnim->bfa, event);
190
191 switch (event) {
192 case BFA_ITNIM_SM_FWRSP:
193 bfa_sm_set_state(itnim, bfa_itnim_sm_online);
194 itnim->is_online = BFA_TRUE;
195 bfa_itnim_iotov_online(itnim);
196 bfa_itnim_online_cb(itnim);
197 break;
198
199 case BFA_ITNIM_SM_DELETE:
200 bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
201 break;
202
203 case BFA_ITNIM_SM_OFFLINE:
204 if (bfa_itnim_send_fwdelete(itnim))
205 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
206 else
207 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
208 break;
209
210 case BFA_ITNIM_SM_HWFAIL:
211 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
212 break;
213
214 default:
215 bfa_sm_fault(itnim->bfa, event);
216 }
217}
218
219static void
220bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
221 enum bfa_itnim_event event)
222{
223 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
224 bfa_trc(itnim->bfa, event);
225
226 switch (event) {
227 case BFA_ITNIM_SM_QRESUME:
228 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
229 bfa_itnim_send_fwcreate(itnim);
230 break;
231
232 case BFA_ITNIM_SM_DELETE:
233 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
234 bfa_reqq_wcancel(&itnim->reqq_wait);
235 bfa_fcpim_delitn(itnim);
236 break;
237
238 case BFA_ITNIM_SM_OFFLINE:
239 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
240 bfa_reqq_wcancel(&itnim->reqq_wait);
241 bfa_itnim_offline_cb(itnim);
242 break;
243
244 case BFA_ITNIM_SM_HWFAIL:
245 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
246 bfa_reqq_wcancel(&itnim->reqq_wait);
247 break;
248
249 default:
250 bfa_sm_fault(itnim->bfa, event);
251 }
252}
253
254/**
255 * Waiting for itnim create response from firmware, a delete is pending.
256 */
257static void
258bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
259 enum bfa_itnim_event event)
260{
261 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
262 bfa_trc(itnim->bfa, event);
263
264 switch (event) {
265 case BFA_ITNIM_SM_FWRSP:
266 if (bfa_itnim_send_fwdelete(itnim))
267 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
268 else
269 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
270 break;
271
272 case BFA_ITNIM_SM_HWFAIL:
273 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
274 bfa_fcpim_delitn(itnim);
275 break;
276
277 default:
278 bfa_sm_fault(itnim->bfa, event);
279 }
280}
281
282/**
283 * Online state - normal parking state.
284 */
285static void
286bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
287{
288 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
289 bfa_trc(itnim->bfa, event);
290
291 switch (event) {
292 case BFA_ITNIM_SM_OFFLINE:
293 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
294 itnim->is_online = BFA_FALSE;
295 bfa_itnim_iotov_start(itnim);
296 bfa_itnim_cleanup(itnim);
297 break;
298
299 case BFA_ITNIM_SM_DELETE:
300 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
301 itnim->is_online = BFA_FALSE;
302 bfa_itnim_cleanup(itnim);
303 break;
304
305 case BFA_ITNIM_SM_SLER:
306 bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
307 itnim->is_online = BFA_FALSE;
308 bfa_itnim_iotov_start(itnim);
309 bfa_itnim_sler_cb(itnim);
310 break;
311
312 case BFA_ITNIM_SM_HWFAIL:
313 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
314 itnim->is_online = BFA_FALSE;
315 bfa_itnim_iotov_start(itnim);
316 bfa_itnim_iocdisable_cleanup(itnim);
317 break;
318
319 default:
320 bfa_sm_fault(itnim->bfa, event);
321 }
322}
323
324/**
325 * Second level error recovery need.
326 */
327static void
328bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
329{
330 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
331 bfa_trc(itnim->bfa, event);
332
333 switch (event) {
334 case BFA_ITNIM_SM_OFFLINE:
335 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
336 bfa_itnim_cleanup(itnim);
337 break;
338
339 case BFA_ITNIM_SM_DELETE:
340 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
341 bfa_itnim_cleanup(itnim);
342 bfa_itnim_iotov_delete(itnim);
343 break;
344
345 case BFA_ITNIM_SM_HWFAIL:
346 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
347 bfa_itnim_iocdisable_cleanup(itnim);
348 break;
349
350 default:
351 bfa_sm_fault(itnim->bfa, event);
352 }
353}
354
355/**
356 * Going offline. Waiting for active IO cleanup.
357 */
358static void
359bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
360 enum bfa_itnim_event event)
361{
362 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
363 bfa_trc(itnim->bfa, event);
364
365 switch (event) {
366 case BFA_ITNIM_SM_CLEANUP:
367 if (bfa_itnim_send_fwdelete(itnim))
368 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
369 else
370 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
371 break;
372
373 case BFA_ITNIM_SM_DELETE:
374 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
375 bfa_itnim_iotov_delete(itnim);
376 break;
377
378 case BFA_ITNIM_SM_HWFAIL:
379 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
380 bfa_itnim_iocdisable_cleanup(itnim);
381 bfa_itnim_offline_cb(itnim);
382 break;
383
384 case BFA_ITNIM_SM_SLER:
385 break;
386
387 default:
388 bfa_sm_fault(itnim->bfa, event);
389 }
390}
391
392/**
393 * Deleting itnim. Waiting for active IO cleanup.
394 */
395static void
396bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
397 enum bfa_itnim_event event)
398{
399 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
400 bfa_trc(itnim->bfa, event);
401
402 switch (event) {
403 case BFA_ITNIM_SM_CLEANUP:
404 if (bfa_itnim_send_fwdelete(itnim))
405 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
406 else
407 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
408 break;
409
410 case BFA_ITNIM_SM_HWFAIL:
411 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
412 bfa_itnim_iocdisable_cleanup(itnim);
413 break;
414
415 default:
416 bfa_sm_fault(itnim->bfa, event);
417 }
418}
419
420/**
421 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
422 */
423static void
424bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
425{
426 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
427 bfa_trc(itnim->bfa, event);
428
429 switch (event) {
430 case BFA_ITNIM_SM_FWRSP:
431 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
432 bfa_itnim_offline_cb(itnim);
433 break;
434
435 case BFA_ITNIM_SM_DELETE:
436 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
437 break;
438
439 case BFA_ITNIM_SM_HWFAIL:
440 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
441 bfa_itnim_offline_cb(itnim);
442 break;
443
444 default:
445 bfa_sm_fault(itnim->bfa, event);
446 }
447}
448
449static void
450bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
451 enum bfa_itnim_event event)
452{
453 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
454 bfa_trc(itnim->bfa, event);
455
456 switch (event) {
457 case BFA_ITNIM_SM_QRESUME:
458 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
459 bfa_itnim_send_fwdelete(itnim);
460 break;
461
462 case BFA_ITNIM_SM_DELETE:
463 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
464 break;
465
466 case BFA_ITNIM_SM_HWFAIL:
467 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
468 bfa_reqq_wcancel(&itnim->reqq_wait);
469 bfa_itnim_offline_cb(itnim);
470 break;
471
472 default:
473 bfa_sm_fault(itnim->bfa, event);
474 }
475}
476
477/**
478 * Offline state.
479 */
480static void
481bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
482{
483 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
484 bfa_trc(itnim->bfa, event);
485
486 switch (event) {
487 case BFA_ITNIM_SM_DELETE:
488 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
489 bfa_itnim_iotov_delete(itnim);
490 bfa_fcpim_delitn(itnim);
491 break;
492
493 case BFA_ITNIM_SM_ONLINE:
494 if (bfa_itnim_send_fwcreate(itnim))
495 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
496 else
497 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
498 break;
499
500 case BFA_ITNIM_SM_HWFAIL:
501 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
502 break;
503
504 default:
505 bfa_sm_fault(itnim->bfa, event);
506 }
507}
508
509/**
510 * IOC h/w failed state.
511 */
512static void
513bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
514 enum bfa_itnim_event event)
515{
516 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
517 bfa_trc(itnim->bfa, event);
518
519 switch (event) {
520 case BFA_ITNIM_SM_DELETE:
521 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
522 bfa_itnim_iotov_delete(itnim);
523 bfa_fcpim_delitn(itnim);
524 break;
525
526 case BFA_ITNIM_SM_OFFLINE:
527 bfa_itnim_offline_cb(itnim);
528 break;
529
530 case BFA_ITNIM_SM_ONLINE:
531 if (bfa_itnim_send_fwcreate(itnim))
532 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
533 else
534 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
535 break;
536
537 case BFA_ITNIM_SM_HWFAIL:
538 break;
539
540 default:
541 bfa_sm_fault(itnim->bfa, event);
542 }
543}
544
545/**
546 * Itnim is deleted, waiting for firmware response to delete.
547 */
548static void
549bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
550{
551 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
552 bfa_trc(itnim->bfa, event);
553
554 switch (event) {
555 case BFA_ITNIM_SM_FWRSP:
556 case BFA_ITNIM_SM_HWFAIL:
557 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
558 bfa_fcpim_delitn(itnim);
559 break;
560
561 default:
562 bfa_sm_fault(itnim->bfa, event);
563 }
564}
565
566static void
567bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
568 enum bfa_itnim_event event)
569{
570 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
571 bfa_trc(itnim->bfa, event);
572
573 switch (event) {
574 case BFA_ITNIM_SM_QRESUME:
575 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
576 bfa_itnim_send_fwdelete(itnim);
577 break;
578
579 case BFA_ITNIM_SM_HWFAIL:
580 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
581 bfa_reqq_wcancel(&itnim->reqq_wait);
582 bfa_fcpim_delitn(itnim);
583 break;
584
585 default:
586 bfa_sm_fault(itnim->bfa, event);
587 }
588}
589
590
591
592/**
593 * bfa_itnim_private
594 */
595
596/**
597 * Initiate cleanup of all IOs on an IOC failure.
598 */
599static void
600bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
601{
602 struct bfa_tskim_s *tskim;
603 struct bfa_ioim_s *ioim;
604 struct list_head *qe, *qen;
605
606 list_for_each_safe(qe, qen, &itnim->tsk_q) {
607 tskim = (struct bfa_tskim_s *) qe;
608 bfa_tskim_iocdisable(tskim);
609 }
610
611 list_for_each_safe(qe, qen, &itnim->io_q) {
612 ioim = (struct bfa_ioim_s *) qe;
613 bfa_ioim_iocdisable(ioim);
614 }
615
616 /**
617 * For IO request in pending queue, we pretend an early timeout.
618 */
619 list_for_each_safe(qe, qen, &itnim->pending_q) {
620 ioim = (struct bfa_ioim_s *) qe;
621 bfa_ioim_tov(ioim);
622 }
623
624 list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
625 ioim = (struct bfa_ioim_s *) qe;
626 bfa_ioim_iocdisable(ioim);
627 }
628}
629
630/**
631 * IO cleanup completion
632 */
633static void
634bfa_itnim_cleanp_comp(void *itnim_cbarg)
635{
636 struct bfa_itnim_s *itnim = itnim_cbarg;
637
638 bfa_stats(itnim, cleanup_comps);
639 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
640}
641
642/**
643 * Initiate cleanup of all IOs.
644 */
645static void
646bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
647{
648 struct bfa_ioim_s *ioim;
649 struct bfa_tskim_s *tskim;
650 struct list_head *qe, *qen;
651
652 bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
653
654 list_for_each_safe(qe, qen, &itnim->io_q) {
655 ioim = (struct bfa_ioim_s *) qe;
656
657 /**
658 * Move IO to a cleanup queue from active queue so that a later
659 * TM will not pickup this IO.
660 */
661 list_del(&ioim->qe);
662 list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
663
664 bfa_wc_up(&itnim->wc);
665 bfa_ioim_cleanup(ioim);
666 }
667
668 list_for_each_safe(qe, qen, &itnim->tsk_q) {
669 tskim = (struct bfa_tskim_s *) qe;
670 bfa_wc_up(&itnim->wc);
671 bfa_tskim_cleanup(tskim);
672 }
673
674 bfa_wc_wait(&itnim->wc);
675}
676
677static void
678__bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
679{
680 struct bfa_itnim_s *itnim = cbarg;
681
682 if (complete)
683 bfa_cb_itnim_online(itnim->ditn);
684}
685
686static void
687__bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
688{
689 struct bfa_itnim_s *itnim = cbarg;
690
691 if (complete)
692 bfa_cb_itnim_offline(itnim->ditn);
693}
694
695static void
696__bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
697{
698 struct bfa_itnim_s *itnim = cbarg;
699
700 if (complete)
701 bfa_cb_itnim_sler(itnim->ditn);
702}
703
704/**
705 * Call to resume any I/O requests waiting for room in request queue.
706 */
707static void
708bfa_itnim_qresume(void *cbarg)
709{
710 struct bfa_itnim_s *itnim = cbarg;
711
712 bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
713}
714
715
716
717
718/**
719 * bfa_itnim_public
720 */
721
722void
723bfa_itnim_iodone(struct bfa_itnim_s *itnim)
724{
725 bfa_wc_down(&itnim->wc);
726}
727
728void
729bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
730{
731 bfa_wc_down(&itnim->wc);
732}
733
734void
735bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
736 u32 *dm_len)
737{
738 /**
739 * ITN memory
740 */
741 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
742}
743
744void
745bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
746{
747 struct bfa_s *bfa = fcpim->bfa;
748 struct bfa_itnim_s *itnim;
749 int i;
750
751 INIT_LIST_HEAD(&fcpim->itnim_q);
752
753 itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo);
754 fcpim->itnim_arr = itnim;
755
756 for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
757 bfa_os_memset(itnim, 0, sizeof(struct bfa_itnim_s));
758 itnim->bfa = bfa;
759 itnim->fcpim = fcpim;
760 itnim->reqq = BFA_REQQ_QOS_LO;
761 itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
762 itnim->iotov_active = BFA_FALSE;
763 bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
764
765 INIT_LIST_HEAD(&itnim->io_q);
766 INIT_LIST_HEAD(&itnim->io_cleanup_q);
767 INIT_LIST_HEAD(&itnim->pending_q);
768 INIT_LIST_HEAD(&itnim->tsk_q);
769 INIT_LIST_HEAD(&itnim->delay_comp_q);
770 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
771 }
772
773 bfa_meminfo_kva(minfo) = (u8 *) itnim;
774}
775
776void
777bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
778{
779 bfa_stats(itnim, ioc_disabled);
780 bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
781}
782
783static bfa_boolean_t
784bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
785{
786 struct bfi_itnim_create_req_s *m;
787
788 itnim->msg_no++;
789
790 /**
791 * check for room in queue to send request now
792 */
793 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
794 if (!m) {
795 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
796 return BFA_FALSE;
797 }
798
799 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_CREATE_REQ,
800 bfa_lpuid(itnim->bfa));
801 m->fw_handle = itnim->rport->fw_handle;
802 m->class = FC_CLASS_3;
803 m->seq_rec = itnim->seq_rec;
804 m->msg_no = itnim->msg_no;
805
806 /**
807 * queue I/O message to firmware
808 */
809 bfa_reqq_produce(itnim->bfa, itnim->reqq);
810 return BFA_TRUE;
811}
812
813static bfa_boolean_t
814bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
815{
816 struct bfi_itnim_delete_req_s *m;
817
818 /**
819 * check for room in queue to send request now
820 */
821 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
822 if (!m) {
823 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
824 return BFA_FALSE;
825 }
826
827 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_DELETE_REQ,
828 bfa_lpuid(itnim->bfa));
829 m->fw_handle = itnim->rport->fw_handle;
830
831 /**
832 * queue I/O message to firmware
833 */
834 bfa_reqq_produce(itnim->bfa, itnim->reqq);
835 return BFA_TRUE;
836}
837
838/**
839 * Cleanup all pending failed inflight requests.
840 */
841static void
842bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
843{
844 struct bfa_ioim_s *ioim;
845 struct list_head *qe, *qen;
846
847 list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
848 ioim = (struct bfa_ioim_s *)qe;
849 bfa_ioim_delayed_comp(ioim, iotov);
850 }
851}
852
853/**
854 * Start all pending IO requests.
855 */
856static void
857bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
858{
859 struct bfa_ioim_s *ioim;
860
861 bfa_itnim_iotov_stop(itnim);
862
863 /**
864 * Abort all inflight IO requests in the queue
865 */
866 bfa_itnim_delayed_comp(itnim, BFA_FALSE);
867
868 /**
869 * Start all pending IO requests.
870 */
871 while (!list_empty(&itnim->pending_q)) {
872 bfa_q_deq(&itnim->pending_q, &ioim);
873 list_add_tail(&ioim->qe, &itnim->io_q);
874 bfa_ioim_start(ioim);
875 }
876}
877
878/**
879 * Fail all pending IO requests
880 */
881static void
882bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
883{
884 struct bfa_ioim_s *ioim;
885
886 /**
887 * Fail all inflight IO requests in the queue
888 */
889 bfa_itnim_delayed_comp(itnim, BFA_TRUE);
890
891 /**
892 * Fail any pending IO requests.
893 */
894 while (!list_empty(&itnim->pending_q)) {
895 bfa_q_deq(&itnim->pending_q, &ioim);
896 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
897 bfa_ioim_tov(ioim);
898 }
899}
900
901/**
902 * IO TOV timer callback. Fail any pending IO requests.
903 */
904static void
905bfa_itnim_iotov(void *itnim_arg)
906{
907 struct bfa_itnim_s *itnim = itnim_arg;
908
909 itnim->iotov_active = BFA_FALSE;
910
911 bfa_cb_itnim_tov_begin(itnim->ditn);
912 bfa_itnim_iotov_cleanup(itnim);
913 bfa_cb_itnim_tov(itnim->ditn);
914}
915
916/**
917 * Start IO TOV timer for failing back pending IO requests in offline state.
918 */
919static void
920bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
921{
922 if (itnim->fcpim->path_tov > 0) {
923
924 itnim->iotov_active = BFA_TRUE;
925 bfa_assert(bfa_itnim_hold_io(itnim));
926 bfa_timer_start(itnim->bfa, &itnim->timer,
927 bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
928 }
929}
930
931/**
932 * Stop IO TOV timer.
933 */
934static void
935bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
936{
937 if (itnim->iotov_active) {
938 itnim->iotov_active = BFA_FALSE;
939 bfa_timer_stop(&itnim->timer);
940 }
941}
942
943/**
944 * Stop IO TOV timer.
945 */
946static void
947bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
948{
949 bfa_boolean_t pathtov_active = BFA_FALSE;
950
951 if (itnim->iotov_active)
952 pathtov_active = BFA_TRUE;
953
954 bfa_itnim_iotov_stop(itnim);
955 if (pathtov_active)
956 bfa_cb_itnim_tov_begin(itnim->ditn);
957 bfa_itnim_iotov_cleanup(itnim);
958 if (pathtov_active)
959 bfa_cb_itnim_tov(itnim->ditn);
960}
961
962
963
964/**
965 * bfa_itnim_public
966 */
967
968/**
969 * Itnim interrupt processing.
970 */
971void
972bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
973{
974 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
975 union bfi_itnim_i2h_msg_u msg;
976 struct bfa_itnim_s *itnim;
977
978 bfa_trc(bfa, m->mhdr.msg_id);
979
980 msg.msg = m;
981
982 switch (m->mhdr.msg_id) {
983 case BFI_ITNIM_I2H_CREATE_RSP:
984 itnim = BFA_ITNIM_FROM_TAG(fcpim,
985 msg.create_rsp->bfa_handle);
986 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
987 bfa_stats(itnim, create_comps);
988 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
989 break;
990
991 case BFI_ITNIM_I2H_DELETE_RSP:
992 itnim = BFA_ITNIM_FROM_TAG(fcpim,
993 msg.delete_rsp->bfa_handle);
994 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
995 bfa_stats(itnim, delete_comps);
996 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
997 break;
998
999 case BFI_ITNIM_I2H_SLER_EVENT:
1000 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1001 msg.sler_event->bfa_handle);
1002 bfa_stats(itnim, sler_events);
1003 bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
1004 break;
1005
1006 default:
1007 bfa_trc(bfa, m->mhdr.msg_id);
1008 bfa_assert(0);
1009 }
1010}
1011
1012
1013
1014/**
1015 * bfa_itnim_api
1016 */
1017
1018struct bfa_itnim_s *
1019bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1020{
1021 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1022 struct bfa_itnim_s *itnim;
1023
1024 itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
1025 bfa_assert(itnim->rport == rport);
1026
1027 itnim->ditn = ditn;
1028
1029 bfa_stats(itnim, creates);
1030 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1031
1032 return itnim;
1033}
1034
1035void
1036bfa_itnim_delete(struct bfa_itnim_s *itnim)
1037{
1038 bfa_stats(itnim, deletes);
1039 bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
1040}
1041
1042void
1043bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
1044{
1045 itnim->seq_rec = seq_rec;
1046 bfa_stats(itnim, onlines);
1047 bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
1048}
1049
1050void
1051bfa_itnim_offline(struct bfa_itnim_s *itnim)
1052{
1053 bfa_stats(itnim, offlines);
1054 bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1055}
1056
1057/**
1058 * Return true if itnim is considered offline for holding off IO request.
1059 * IO is not held if itnim is being deleted.
1060 */
1061bfa_boolean_t
1062bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1063{
1064 return
1065 itnim->fcpim->path_tov && itnim->iotov_active &&
1066 (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1067 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1068 bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1069 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1070 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1071 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable))
1072 ;
1073}
1074
1075void
1076bfa_itnim_get_stats(struct bfa_itnim_s *itnim,
1077 struct bfa_itnim_hal_stats_s *stats)
1078{
1079 *stats = itnim->stats;
1080}
1081
1082void
1083bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1084{
1085 bfa_os_memset(&itnim->stats, 0, sizeof(itnim->stats));
1086}
1087
1088
diff --git a/drivers/scsi/bfa/bfa_log.c b/drivers/scsi/bfa/bfa_log.c
deleted file mode 100644
index e7514016c9c6..000000000000
--- a/drivers/scsi/bfa/bfa_log.c
+++ /dev/null
@@ -1,346 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_log.c BFA log library
20 */
21
22#include <bfa_os_inc.h>
23#include <cs/bfa_log.h>
24
25/*
26 * global log info structure
27 */
28struct bfa_log_info_s {
29 u32 start_idx; /* start index for a module */
30 u32 total_count; /* total count for a module */
31 enum bfa_log_severity level; /* global log level */
32 bfa_log_cb_t cbfn; /* callback function */
33};
34
35static struct bfa_log_info_s bfa_log_info[BFA_LOG_MODULE_ID_MAX + 1];
36static u32 bfa_log_msg_total_count;
37static int bfa_log_initialized;
38
39static char *bfa_log_severity[] =
40 { "[none]", "[critical]", "[error]", "[warn]", "[info]", "" };
41
42/**
43 * BFA log library initialization
44 *
45 * The log library initialization includes the following,
46 * - set log instance name and callback function
47 * - read the message array generated from xml files
48 * - calculate start index for each module
49 * - calculate message count for each module
50 * - perform error checking
51 *
52 * @param[in] log_mod - log module info
53 * @param[in] instance_name - instance name
54 * @param[in] cbfn - callback function
55 *
56 * It return 0 on success, or -1 on failure
57 */
58int
59bfa_log_init(struct bfa_log_mod_s *log_mod, char *instance_name,
60 bfa_log_cb_t cbfn)
61{
62 struct bfa_log_msgdef_s *msg;
63 u32 pre_mod_id = 0;
64 u32 cur_mod_id = 0;
65 u32 i, pre_idx, idx, msg_id;
66
67 /*
68 * set instance name
69 */
70 if (log_mod) {
71 strncpy(log_mod->instance_info, instance_name,
72 sizeof(log_mod->instance_info));
73 log_mod->cbfn = cbfn;
74 for (i = 0; i <= BFA_LOG_MODULE_ID_MAX; i++)
75 log_mod->log_level[i] = BFA_LOG_WARNING;
76 }
77
78 if (bfa_log_initialized)
79 return 0;
80
81 for (i = 0; i <= BFA_LOG_MODULE_ID_MAX; i++) {
82 bfa_log_info[i].start_idx = 0;
83 bfa_log_info[i].total_count = 0;
84 bfa_log_info[i].level = BFA_LOG_WARNING;
85 bfa_log_info[i].cbfn = cbfn;
86 }
87
88 pre_idx = 0;
89 idx = 0;
90 msg = bfa_log_msg_array;
91 msg_id = BFA_LOG_GET_MSG_ID(msg);
92 pre_mod_id = BFA_LOG_GET_MOD_ID(msg_id);
93 while (msg_id != 0) {
94 cur_mod_id = BFA_LOG_GET_MOD_ID(msg_id);
95
96 if (cur_mod_id > BFA_LOG_MODULE_ID_MAX) {
97 cbfn(log_mod, msg_id,
98 "%s%s log: module id %u out of range\n",
99 BFA_LOG_CAT_NAME,
100 bfa_log_severity[BFA_LOG_ERROR],
101 cur_mod_id);
102 return -1;
103 }
104
105 if (pre_mod_id > BFA_LOG_MODULE_ID_MAX) {
106 cbfn(log_mod, msg_id,
107 "%s%s log: module id %u out of range\n",
108 BFA_LOG_CAT_NAME,
109 bfa_log_severity[BFA_LOG_ERROR],
110 pre_mod_id);
111 return -1;
112 }
113
114 if (cur_mod_id != pre_mod_id) {
115 bfa_log_info[pre_mod_id].start_idx = pre_idx;
116 bfa_log_info[pre_mod_id].total_count = idx - pre_idx;
117 pre_mod_id = cur_mod_id;
118 pre_idx = idx;
119 }
120
121 idx++;
122 msg++;
123 msg_id = BFA_LOG_GET_MSG_ID(msg);
124 }
125
126 bfa_log_info[cur_mod_id].start_idx = pre_idx;
127 bfa_log_info[cur_mod_id].total_count = idx - pre_idx;
128 bfa_log_msg_total_count = idx;
129
130 cbfn(log_mod, msg_id, "%s%s log: init OK, msg total count %u\n",
131 BFA_LOG_CAT_NAME,
132 bfa_log_severity[BFA_LOG_INFO], bfa_log_msg_total_count);
133
134 bfa_log_initialized = 1;
135
136 return 0;
137}
138
139/**
140 * BFA log set log level for a module
141 *
142 * @param[in] log_mod - log module info
143 * @param[in] mod_id - module id
144 * @param[in] log_level - log severity level
145 *
146 * It return BFA_STATUS_OK on success, or > 0 on failure
147 */
148bfa_status_t
149bfa_log_set_level(struct bfa_log_mod_s *log_mod, int mod_id,
150 enum bfa_log_severity log_level)
151{
152 if (mod_id <= BFA_LOG_UNUSED_ID || mod_id > BFA_LOG_MODULE_ID_MAX)
153 return BFA_STATUS_EINVAL;
154
155 if (log_level <= BFA_LOG_INVALID || log_level > BFA_LOG_LEVEL_MAX)
156 return BFA_STATUS_EINVAL;
157
158 if (log_mod)
159 log_mod->log_level[mod_id] = log_level;
160 else
161 bfa_log_info[mod_id].level = log_level;
162
163 return BFA_STATUS_OK;
164}
165
166/**
167 * BFA log set log level for all modules
168 *
169 * @param[in] log_mod - log module info
170 * @param[in] log_level - log severity level
171 *
172 * It return BFA_STATUS_OK on success, or > 0 on failure
173 */
174bfa_status_t
175bfa_log_set_level_all(struct bfa_log_mod_s *log_mod,
176 enum bfa_log_severity log_level)
177{
178 int mod_id = BFA_LOG_UNUSED_ID + 1;
179
180 if (log_level <= BFA_LOG_INVALID || log_level > BFA_LOG_LEVEL_MAX)
181 return BFA_STATUS_EINVAL;
182
183 if (log_mod) {
184 for (; mod_id <= BFA_LOG_MODULE_ID_MAX; mod_id++)
185 log_mod->log_level[mod_id] = log_level;
186 } else {
187 for (; mod_id <= BFA_LOG_MODULE_ID_MAX; mod_id++)
188 bfa_log_info[mod_id].level = log_level;
189 }
190
191 return BFA_STATUS_OK;
192}
193
194/**
195 * BFA log set log level for all aen sub-modules
196 *
197 * @param[in] log_mod - log module info
198 * @param[in] log_level - log severity level
199 *
200 * It return BFA_STATUS_OK on success, or > 0 on failure
201 */
202bfa_status_t
203bfa_log_set_level_aen(struct bfa_log_mod_s *log_mod,
204 enum bfa_log_severity log_level)
205{
206 int mod_id = BFA_LOG_AEN_MIN + 1;
207
208 if (log_mod) {
209 for (; mod_id <= BFA_LOG_AEN_MAX; mod_id++)
210 log_mod->log_level[mod_id] = log_level;
211 } else {
212 for (; mod_id <= BFA_LOG_AEN_MAX; mod_id++)
213 bfa_log_info[mod_id].level = log_level;
214 }
215
216 return BFA_STATUS_OK;
217}
218
219/**
220 * BFA log get log level for a module
221 *
222 * @param[in] log_mod - log module info
223 * @param[in] mod_id - module id
224 *
225 * It returns log level or -1 on error
226 */
227enum bfa_log_severity
228bfa_log_get_level(struct bfa_log_mod_s *log_mod, int mod_id)
229{
230 if (mod_id <= BFA_LOG_UNUSED_ID || mod_id > BFA_LOG_MODULE_ID_MAX)
231 return BFA_LOG_INVALID;
232
233 if (log_mod)
234 return log_mod->log_level[mod_id];
235 else
236 return bfa_log_info[mod_id].level;
237}
238
239enum bfa_log_severity
240bfa_log_get_msg_level(struct bfa_log_mod_s *log_mod, u32 msg_id)
241{
242 struct bfa_log_msgdef_s *msg;
243 u32 mod = BFA_LOG_GET_MOD_ID(msg_id);
244 u32 idx = BFA_LOG_GET_MSG_IDX(msg_id) - 1;
245
246 if (!bfa_log_initialized)
247 return BFA_LOG_INVALID;
248
249 if (mod > BFA_LOG_MODULE_ID_MAX)
250 return BFA_LOG_INVALID;
251
252 if (idx >= bfa_log_info[mod].total_count) {
253 bfa_log_info[mod].cbfn(log_mod, msg_id,
254 "%s%s log: inconsistent idx %u vs. total count %u\n",
255 BFA_LOG_CAT_NAME, bfa_log_severity[BFA_LOG_ERROR], idx,
256 bfa_log_info[mod].total_count);
257 return BFA_LOG_INVALID;
258 }
259
260 msg = bfa_log_msg_array + bfa_log_info[mod].start_idx + idx;
261 if (msg_id != BFA_LOG_GET_MSG_ID(msg)) {
262 bfa_log_info[mod].cbfn(log_mod, msg_id,
263 "%s%s log: inconsistent msg id %u array msg id %u\n",
264 BFA_LOG_CAT_NAME, bfa_log_severity[BFA_LOG_ERROR],
265 msg_id, BFA_LOG_GET_MSG_ID(msg));
266 return BFA_LOG_INVALID;
267 }
268
269 return BFA_LOG_GET_SEVERITY(msg);
270}
271
272/**
273 * BFA log message handling
274 *
275 * BFA log message handling finds the message based on message id and prints
276 * out the message based on its format and arguments. It also does prefix
277 * the severity etc.
278 *
279 * @param[in] log_mod - log module info
280 * @param[in] msg_id - message id
281 * @param[in] ... - message arguments
282 *
283 * It return 0 on success, or -1 on errors
284 */
285int
286bfa_log(struct bfa_log_mod_s *log_mod, u32 msg_id, ...)
287{
288 va_list ap;
289 char buf[256];
290 struct bfa_log_msgdef_s *msg;
291 int log_level;
292 u32 mod = BFA_LOG_GET_MOD_ID(msg_id);
293 u32 idx = BFA_LOG_GET_MSG_IDX(msg_id) - 1;
294
295 if (!bfa_log_initialized)
296 return -1;
297
298 if (mod > BFA_LOG_MODULE_ID_MAX)
299 return -1;
300
301 if (idx >= bfa_log_info[mod].total_count) {
302 bfa_log_info[mod].
303 cbfn
304 (log_mod, msg_id,
305 "%s%s log: inconsistent idx %u vs. total count %u\n",
306 BFA_LOG_CAT_NAME, bfa_log_severity[BFA_LOG_ERROR], idx,
307 bfa_log_info[mod].total_count);
308 return -1;
309 }
310
311 msg = bfa_log_msg_array + bfa_log_info[mod].start_idx + idx;
312 if (msg_id != BFA_LOG_GET_MSG_ID(msg)) {
313 bfa_log_info[mod].
314 cbfn
315 (log_mod, msg_id,
316 "%s%s log: inconsistent msg id %u array msg id %u\n",
317 BFA_LOG_CAT_NAME, bfa_log_severity[BFA_LOG_ERROR],
318 msg_id, BFA_LOG_GET_MSG_ID(msg));
319 return -1;
320 }
321
322 log_level = log_mod ? log_mod->log_level[mod] : bfa_log_info[mod].level;
323 if ((BFA_LOG_GET_SEVERITY(msg) > log_level) &&
324 (msg->attributes != BFA_LOG_ATTR_NONE))
325 return 0;
326
327 va_start(ap, msg_id);
328 bfa_os_vsprintf(buf, BFA_LOG_GET_MSG_FMT_STRING(msg), ap);
329 va_end(ap);
330
331 if (log_mod)
332 log_mod->cbfn(log_mod, msg_id, "%s[%s]%s%s %s: %s\n",
333 BFA_LOG_CAT_NAME, log_mod->instance_info,
334 bfa_log_severity[BFA_LOG_GET_SEVERITY(msg)],
335 (msg->attributes & BFA_LOG_ATTR_AUDIT)
336 ? " (audit) " : "", msg->msg_value, buf);
337 else
338 bfa_log_info[mod].cbfn(log_mod, msg_id, "%s%s%s %s: %s\n",
339 BFA_LOG_CAT_NAME,
340 bfa_log_severity[BFA_LOG_GET_SEVERITY(msg)],
341 (msg->attributes & BFA_LOG_ATTR_AUDIT) ?
342 " (audit) " : "", msg->msg_value, buf);
343
344 return 0;
345}
346
diff --git a/drivers/scsi/bfa/bfa_log_module.c b/drivers/scsi/bfa/bfa_log_module.c
deleted file mode 100644
index cf577ef7cb97..000000000000
--- a/drivers/scsi/bfa/bfa_log_module.c
+++ /dev/null
@@ -1,537 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <cs/bfa_log.h>
19#include <aen/bfa_aen_adapter.h>
20#include <aen/bfa_aen_audit.h>
21#include <aen/bfa_aen_ethport.h>
22#include <aen/bfa_aen_ioc.h>
23#include <aen/bfa_aen_itnim.h>
24#include <aen/bfa_aen_lport.h>
25#include <aen/bfa_aen_port.h>
26#include <aen/bfa_aen_rport.h>
27#include <log/bfa_log_fcs.h>
28#include <log/bfa_log_hal.h>
29#include <log/bfa_log_linux.h>
30#include <log/bfa_log_wdrv.h>
31
32struct bfa_log_msgdef_s bfa_log_msg_array[] = {
33
34
35/* messages define for BFA_AEN_CAT_ADAPTER Module */
36{BFA_AEN_ADAPTER_ADD, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
37 "BFA_AEN_ADAPTER_ADD",
38 "New adapter found: SN = %s, base port WWN = %s.",
39 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
40
41{BFA_AEN_ADAPTER_REMOVE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
42 BFA_LOG_WARNING, "BFA_AEN_ADAPTER_REMOVE",
43 "Adapter removed: SN = %s.",
44 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
45
46
47
48
49/* messages define for BFA_AEN_CAT_AUDIT Module */
50{BFA_AEN_AUDIT_AUTH_ENABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
51 BFA_LOG_INFO, "BFA_AEN_AUDIT_AUTH_ENABLE",
52 "Authentication enabled for base port: WWN = %s.",
53 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
54
55{BFA_AEN_AUDIT_AUTH_DISABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
56 BFA_LOG_INFO, "BFA_AEN_AUDIT_AUTH_DISABLE",
57 "Authentication disabled for base port: WWN = %s.",
58 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
59
60
61
62
63/* messages define for BFA_AEN_CAT_ETHPORT Module */
64{BFA_AEN_ETHPORT_LINKUP, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
65 "BFA_AEN_ETHPORT_LINKUP",
66 "Base port ethernet linkup: mac = %s.",
67 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
68
69{BFA_AEN_ETHPORT_LINKDOWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
70 "BFA_AEN_ETHPORT_LINKDOWN",
71 "Base port ethernet linkdown: mac = %s.",
72 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
73
74{BFA_AEN_ETHPORT_ENABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
75 "BFA_AEN_ETHPORT_ENABLE",
76 "Base port ethernet interface enabled: mac = %s.",
77 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
78
79{BFA_AEN_ETHPORT_DISABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
80 "BFA_AEN_ETHPORT_DISABLE",
81 "Base port ethernet interface disabled: mac = %s.",
82 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
83
84
85
86
87/* messages define for BFA_AEN_CAT_IOC Module */
88{BFA_AEN_IOC_HBGOOD, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
89 "BFA_AEN_IOC_HBGOOD",
90 "Heart Beat of IOC %d is good.",
91 ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
92
93{BFA_AEN_IOC_HBFAIL, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_CRITICAL,
94 "BFA_AEN_IOC_HBFAIL",
95 "Heart Beat of IOC %d has failed.",
96 ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
97
98{BFA_AEN_IOC_ENABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
99 "BFA_AEN_IOC_ENABLE",
100 "IOC %d is enabled.",
101 ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
102
103{BFA_AEN_IOC_DISABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
104 "BFA_AEN_IOC_DISABLE",
105 "IOC %d is disabled.",
106 ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
107
108{BFA_AEN_IOC_FWMISMATCH, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
109 BFA_LOG_CRITICAL, "BFA_AEN_IOC_FWMISMATCH",
110 "Running firmware version is incompatible with the driver version.",
111 (0), 0},
112
113{BFA_AEN_IOC_FWCFG_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
114 BFA_LOG_CRITICAL, "BFA_AEN_IOC_FWCFG_ERROR",
115 "Link initialization failed due to firmware configuration read error:"
116 " WWN = %s.",
117 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
118
119{BFA_AEN_IOC_INVALID_VENDOR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
120 BFA_LOG_ERROR, "BFA_AEN_IOC_INVALID_VENDOR",
121 "Unsupported switch vendor. Link initialization failed: WWN = %s.",
122 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
123
124{BFA_AEN_IOC_INVALID_NWWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
125 BFA_LOG_ERROR, "BFA_AEN_IOC_INVALID_NWWN",
126 "Invalid NWWN. Link initialization failed: NWWN = 00:00:00:00:00:00:00:00.",
127 (0), 0},
128
129{BFA_AEN_IOC_INVALID_PWWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
130 BFA_LOG_ERROR, "BFA_AEN_IOC_INVALID_PWWN",
131 "Invalid PWWN. Link initialization failed: PWWN = 00:00:00:00:00:00:00:00.",
132 (0), 0},
133
134
135
136
137/* messages define for BFA_AEN_CAT_ITNIM Module */
138{BFA_AEN_ITNIM_ONLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
139 "BFA_AEN_ITNIM_ONLINE",
140 "Target (WWN = %s) is online for initiator (WWN = %s).",
141 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
142
143{BFA_AEN_ITNIM_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
144 "BFA_AEN_ITNIM_OFFLINE",
145 "Target (WWN = %s) offlined by initiator (WWN = %s).",
146 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
147
148{BFA_AEN_ITNIM_DISCONNECT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
149 BFA_LOG_ERROR, "BFA_AEN_ITNIM_DISCONNECT",
150 "Target (WWN = %s) connectivity lost for initiator (WWN = %s).",
151 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
152
153
154
155
156/* messages define for BFA_AEN_CAT_LPORT Module */
157{BFA_AEN_LPORT_NEW, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
158 "BFA_AEN_LPORT_NEW",
159 "New logical port created: WWN = %s, Role = %s.",
160 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
161
162{BFA_AEN_LPORT_DELETE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
163 "BFA_AEN_LPORT_DELETE",
164 "Logical port deleted: WWN = %s, Role = %s.",
165 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
166
167{BFA_AEN_LPORT_ONLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
168 "BFA_AEN_LPORT_ONLINE",
169 "Logical port online: WWN = %s, Role = %s.",
170 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
171
172{BFA_AEN_LPORT_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
173 "BFA_AEN_LPORT_OFFLINE",
174 "Logical port taken offline: WWN = %s, Role = %s.",
175 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
176
177{BFA_AEN_LPORT_DISCONNECT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
178 BFA_LOG_ERROR, "BFA_AEN_LPORT_DISCONNECT",
179 "Logical port lost fabric connectivity: WWN = %s, Role = %s.",
180 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
181
182{BFA_AEN_LPORT_NEW_PROP, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
183 "BFA_AEN_LPORT_NEW_PROP",
184 "New virtual port created using proprietary interface: WWN = %s, Role = %s.",
185 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
186
187{BFA_AEN_LPORT_DELETE_PROP, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
188 BFA_LOG_INFO, "BFA_AEN_LPORT_DELETE_PROP",
189 "Virtual port deleted using proprietary interface: WWN = %s, Role = %s.",
190 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
191
192{BFA_AEN_LPORT_NEW_STANDARD, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
193 BFA_LOG_INFO, "BFA_AEN_LPORT_NEW_STANDARD",
194 "New virtual port created using standard interface: WWN = %s, Role = %s.",
195 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
196
197{BFA_AEN_LPORT_DELETE_STANDARD, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
198 BFA_LOG_INFO, "BFA_AEN_LPORT_DELETE_STANDARD",
199 "Virtual port deleted using standard interface: WWN = %s, Role = %s.",
200 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
201
202{BFA_AEN_LPORT_NPIV_DUP_WWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
203 BFA_LOG_WARNING, "BFA_AEN_LPORT_NPIV_DUP_WWN",
204 "Virtual port login failed. Duplicate WWN = %s reported by fabric.",
205 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
206
207{BFA_AEN_LPORT_NPIV_FABRIC_MAX, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
208 BFA_LOG_WARNING, "BFA_AEN_LPORT_NPIV_FABRIC_MAX",
209 "Virtual port (WWN = %s) login failed. Max NPIV ports already exist in"
210 " fabric/fport.",
211 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
212
213{BFA_AEN_LPORT_NPIV_UNKNOWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
214 BFA_LOG_WARNING, "BFA_AEN_LPORT_NPIV_UNKNOWN",
215 "Virtual port (WWN = %s) login failed.",
216 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
217
218
219
220
221/* messages define for BFA_AEN_CAT_PORT Module */
222{BFA_AEN_PORT_ONLINE, BFA_LOG_ATTR_NONE, BFA_LOG_INFO, "BFA_AEN_PORT_ONLINE",
223 "Base port online: WWN = %s.",
224 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
225
226{BFA_AEN_PORT_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_WARNING,
227 "BFA_AEN_PORT_OFFLINE",
228 "Base port offline: WWN = %s.",
229 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
230
231{BFA_AEN_PORT_RLIR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
232 "BFA_AEN_PORT_RLIR",
233 "RLIR event not supported.",
234 (0), 0},
235
236{BFA_AEN_PORT_SFP_INSERT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
237 "BFA_AEN_PORT_SFP_INSERT",
238 "New SFP found: WWN/MAC = %s.",
239 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
240
241{BFA_AEN_PORT_SFP_REMOVE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
242 BFA_LOG_WARNING, "BFA_AEN_PORT_SFP_REMOVE",
243 "SFP removed: WWN/MAC = %s.",
244 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
245
246{BFA_AEN_PORT_SFP_POM, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_WARNING,
247 "BFA_AEN_PORT_SFP_POM",
248 "SFP POM level to %s: WWN/MAC = %s.",
249 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
250
251{BFA_AEN_PORT_ENABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
252 "BFA_AEN_PORT_ENABLE",
253 "Base port enabled: WWN = %s.",
254 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
255
256{BFA_AEN_PORT_DISABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
257 "BFA_AEN_PORT_DISABLE",
258 "Base port disabled: WWN = %s.",
259 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
260
261{BFA_AEN_PORT_AUTH_ON, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
262 "BFA_AEN_PORT_AUTH_ON",
263 "Authentication successful for base port: WWN = %s.",
264 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
265
266{BFA_AEN_PORT_AUTH_OFF, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_ERROR,
267 "BFA_AEN_PORT_AUTH_OFF",
268 "Authentication unsuccessful for base port: WWN = %s.",
269 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
270
271{BFA_AEN_PORT_DISCONNECT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_ERROR,
272 "BFA_AEN_PORT_DISCONNECT",
273 "Base port (WWN = %s) lost fabric connectivity.",
274 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
275
276{BFA_AEN_PORT_QOS_NEG, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_WARNING,
277 "BFA_AEN_PORT_QOS_NEG",
278 "QOS negotiation failed for base port: WWN = %s.",
279 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
280
281{BFA_AEN_PORT_FABRIC_NAME_CHANGE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
282 BFA_LOG_WARNING, "BFA_AEN_PORT_FABRIC_NAME_CHANGE",
283 "Base port WWN = %s, Fabric WWN = %s.",
284 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
285
286{BFA_AEN_PORT_SFP_ACCESS_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
287 BFA_LOG_WARNING, "BFA_AEN_PORT_SFP_ACCESS_ERROR",
288 "SFP access error: WWN/MAC = %s.",
289 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
290
291{BFA_AEN_PORT_SFP_UNSUPPORT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
292 BFA_LOG_WARNING, "BFA_AEN_PORT_SFP_UNSUPPORT",
293 "Unsupported SFP found: WWN/MAC = %s.",
294 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
295
296
297
298
299/* messages define for BFA_AEN_CAT_RPORT Module */
300{BFA_AEN_RPORT_ONLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
301 "BFA_AEN_RPORT_ONLINE",
302 "Remote port (WWN = %s) online for logical port (WWN = %s).",
303 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
304
305{BFA_AEN_RPORT_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
306 "BFA_AEN_RPORT_OFFLINE",
307 "Remote port (WWN = %s) offlined by logical port (WWN = %s).",
308 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
309
310{BFA_AEN_RPORT_DISCONNECT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
311 BFA_LOG_ERROR, "BFA_AEN_RPORT_DISCONNECT",
312 "Remote port (WWN = %s) connectivity lost for logical port (WWN = %s).",
313 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
314
315{BFA_AEN_RPORT_QOS_PRIO, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
316 "BFA_AEN_RPORT_QOS_PRIO",
317 "QOS priority changed to %s: RPWWN = %s and LPWWN = %s.",
318 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) |
319 (BFA_LOG_S << BFA_LOG_ARG2) | 0), 3},
320
321{BFA_AEN_RPORT_QOS_FLOWID, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
322 "BFA_AEN_RPORT_QOS_FLOWID",
323 "QOS flow ID changed to %d: RPWWN = %s and LPWWN = %s.",
324 ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) |
325 (BFA_LOG_S << BFA_LOG_ARG2) | 0), 3},
326
327
328
329
330/* messages define for FCS Module */
331{BFA_LOG_FCS_FABRIC_NOSWITCH, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
332 BFA_LOG_INFO, "FCS_FABRIC_NOSWITCH",
333 "No switched fabric presence is detected.",
334 (0), 0},
335
336{BFA_LOG_FCS_FABRIC_ISOLATED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
337 BFA_LOG_INFO, "FCS_FABRIC_ISOLATED",
338 "Port is isolated due to VF_ID mismatch. PWWN: %s, Port VF_ID: %04x and"
339 " switch port VF_ID: %04x.",
340 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_X << BFA_LOG_ARG1) |
341 (BFA_LOG_X << BFA_LOG_ARG2) | 0), 3},
342
343
344
345
346/* messages define for HAL Module */
347{BFA_LOG_HAL_ASSERT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_ERROR,
348 "HAL_ASSERT",
349 "Assertion failure: %s:%d: %s",
350 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) |
351 (BFA_LOG_S << BFA_LOG_ARG2) | 0), 3},
352
353{BFA_LOG_HAL_HEARTBEAT_FAILURE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
354 BFA_LOG_CRITICAL, "HAL_HEARTBEAT_FAILURE",
355 "Firmware heartbeat failure at %d",
356 ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
357
358{BFA_LOG_HAL_FCPIM_PARM_INVALID, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
359 BFA_LOG_INFO, "HAL_FCPIM_PARM_INVALID",
360 "Driver configuration %s value %d is invalid. Value should be within"
361 " %d and %d.",
362 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) |
363 (BFA_LOG_D << BFA_LOG_ARG2) | (BFA_LOG_D << BFA_LOG_ARG3) | 0), 4},
364
365{BFA_LOG_HAL_SM_ASSERT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_ERROR,
366 "HAL_SM_ASSERT",
367 "SM Assertion failure: %s:%d: event = %d",
368 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) |
369 (BFA_LOG_D << BFA_LOG_ARG2) | 0), 3},
370
371{BFA_LOG_HAL_DRIVER_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
372 BFA_LOG_INFO, "HAL_DRIVER_ERROR",
373 "%s",
374 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
375
376{BFA_LOG_HAL_DRIVER_CONFIG_ERROR,
377 BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
378 "HAL_DRIVER_CONFIG_ERROR",
379 "%s",
380 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
381
382{BFA_LOG_HAL_MBOX_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
383 BFA_LOG_INFO, "HAL_MBOX_ERROR",
384 "%s",
385 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
386
387
388
389
390/* messages define for LINUX Module */
391{BFA_LOG_LINUX_DEVICE_CLAIMED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
392 BFA_LOG_INFO, "LINUX_DEVICE_CLAIMED",
393 "bfa device at %s claimed.",
394 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
395
396{BFA_LOG_LINUX_HASH_INIT_FAILED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
397 BFA_LOG_INFO, "LINUX_HASH_INIT_FAILED",
398 "Hash table initialization failure for the port %s.",
399 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
400
401{BFA_LOG_LINUX_SYSFS_FAILED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
402 BFA_LOG_INFO, "LINUX_SYSFS_FAILED",
403 "sysfs file creation failure for the port %s.",
404 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
405
406{BFA_LOG_LINUX_MEM_ALLOC_FAILED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
407 BFA_LOG_INFO, "LINUX_MEM_ALLOC_FAILED",
408 "Memory allocation failed: %s. ",
409 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
410
411{BFA_LOG_LINUX_DRIVER_REGISTRATION_FAILED,
412 BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
413 "LINUX_DRIVER_REGISTRATION_FAILED",
414 "%s. ",
415 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
416
417{BFA_LOG_LINUX_ITNIM_FREE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
418 "LINUX_ITNIM_FREE",
419 "scsi%d: FCID: %s WWPN: %s",
420 ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) |
421 (BFA_LOG_S << BFA_LOG_ARG2) | 0), 3},
422
423{BFA_LOG_LINUX_ITNIM_ONLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
424 BFA_LOG_INFO, "LINUX_ITNIM_ONLINE",
425 "Target: %d:0:%d FCID: %s WWPN: %s",
426 ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) |
427 (BFA_LOG_S << BFA_LOG_ARG2) | (BFA_LOG_S << BFA_LOG_ARG3) | 0), 4},
428
429{BFA_LOG_LINUX_ITNIM_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
430 BFA_LOG_INFO, "LINUX_ITNIM_OFFLINE",
431 "Target: %d:0:%d FCID: %s WWPN: %s",
432 ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) |
433 (BFA_LOG_S << BFA_LOG_ARG2) | (BFA_LOG_S << BFA_LOG_ARG3) | 0), 4},
434
435{BFA_LOG_LINUX_SCSI_HOST_FREE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
436 BFA_LOG_INFO, "LINUX_SCSI_HOST_FREE",
437 "Free scsi%d",
438 ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
439
440{BFA_LOG_LINUX_SCSI_ABORT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
441 "LINUX_SCSI_ABORT",
442 "scsi%d: abort cmnd %p, iotag %x",
443 ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_P << BFA_LOG_ARG1) |
444 (BFA_LOG_X << BFA_LOG_ARG2) | 0), 3},
445
446{BFA_LOG_LINUX_SCSI_ABORT_COMP, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
447 BFA_LOG_INFO, "LINUX_SCSI_ABORT_COMP",
448 "scsi%d: complete abort 0x%p, iotag 0x%x",
449 ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_P << BFA_LOG_ARG1) |
450 (BFA_LOG_X << BFA_LOG_ARG2) | 0), 3},
451
452{BFA_LOG_LINUX_DRIVER_CONFIG_ERROR,
453 BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
454 "LINUX_DRIVER_CONFIG_ERROR",
455 "%s",
456 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
457
458{BFA_LOG_LINUX_BNA_STATE_MACHINE,
459 BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
460 "LINUX_BNA_STATE_MACHINE",
461 "%s",
462 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
463
464{BFA_LOG_LINUX_IOC_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
465 BFA_LOG_INFO, "LINUX_IOC_ERROR",
466 "%s",
467 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
468
469{BFA_LOG_LINUX_RESOURCE_ALLOC_ERROR,
470 BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
471 "LINUX_RESOURCE_ALLOC_ERROR",
472 "%s",
473 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
474
475{BFA_LOG_LINUX_RING_BUFFER_ERROR,
476 BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
477 "LINUX_RING_BUFFER_ERROR",
478 "%s",
479 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
480
481{BFA_LOG_LINUX_DRIVER_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
482 BFA_LOG_ERROR, "LINUX_DRIVER_ERROR",
483 "%s",
484 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
485
486{BFA_LOG_LINUX_DRIVER_INFO, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
487 BFA_LOG_INFO, "LINUX_DRIVER_INFO",
488 "%s",
489 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
490
491{BFA_LOG_LINUX_DRIVER_DIAG, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
492 BFA_LOG_INFO, "LINUX_DRIVER_DIAG",
493 "%s",
494 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
495
496{BFA_LOG_LINUX_DRIVER_AEN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
497 BFA_LOG_INFO, "LINUX_DRIVER_AEN",
498 "%s",
499 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
500
501
502
503
504/* messages define for WDRV Module */
505{BFA_LOG_WDRV_IOC_INIT_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
506 BFA_LOG_INFO, "WDRV_IOC_INIT_ERROR",
507 "IOC initialization has failed.",
508 (0), 0},
509
510{BFA_LOG_WDRV_IOC_INTERNAL_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
511 BFA_LOG_INFO, "WDRV_IOC_INTERNAL_ERROR",
512 "IOC internal error. ",
513 (0), 0},
514
515{BFA_LOG_WDRV_IOC_START_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
516 BFA_LOG_INFO, "WDRV_IOC_START_ERROR",
517 "IOC could not be started. ",
518 (0), 0},
519
520{BFA_LOG_WDRV_IOC_STOP_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
521 BFA_LOG_INFO, "WDRV_IOC_STOP_ERROR",
522 "IOC could not be stopped. ",
523 (0), 0},
524
525{BFA_LOG_WDRV_INSUFFICIENT_RESOURCES, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
526 BFA_LOG_INFO, "WDRV_INSUFFICIENT_RESOURCES",
527 "Insufficient memory. ",
528 (0), 0},
529
530{BFA_LOG_WDRV_BASE_ADDRESS_MAP_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
531 BFA_LOG_INFO, "WDRV_BASE_ADDRESS_MAP_ERROR",
532 "Unable to map the IOC onto the system address space. ",
533 (0), 0},
534
535
536{0, 0, 0, "", "", 0, 0},
537};
diff --git a/drivers/scsi/bfa/bfa_lps.c b/drivers/scsi/bfa/bfa_lps.c
deleted file mode 100644
index acabb44f092f..000000000000
--- a/drivers/scsi/bfa/bfa_lps.c
+++ /dev/null
@@ -1,892 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfi/bfi_lps.h>
20#include <cs/bfa_debug.h>
21#include <defs/bfa_defs_pci.h>
22
23BFA_TRC_FILE(HAL, LPS);
24BFA_MODULE(lps);
25
26#define BFA_LPS_MIN_LPORTS (1)
27#define BFA_LPS_MAX_LPORTS (256)
28
29/*
30 * Maximum Vports supported per physical port or vf.
31 */
32#define BFA_LPS_MAX_VPORTS_SUPP_CB 255
33#define BFA_LPS_MAX_VPORTS_SUPP_CT 190
34
35/**
36 * forward declarations
37 */
38static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
39 u32 *dm_len);
40static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
41 struct bfa_iocfc_cfg_s *cfg,
42 struct bfa_meminfo_s *meminfo,
43 struct bfa_pcidev_s *pcidev);
44static void bfa_lps_detach(struct bfa_s *bfa);
45static void bfa_lps_start(struct bfa_s *bfa);
46static void bfa_lps_stop(struct bfa_s *bfa);
47static void bfa_lps_iocdisable(struct bfa_s *bfa);
48static void bfa_lps_login_rsp(struct bfa_s *bfa,
49 struct bfi_lps_login_rsp_s *rsp);
50static void bfa_lps_logout_rsp(struct bfa_s *bfa,
51 struct bfi_lps_logout_rsp_s *rsp);
52static void bfa_lps_reqq_resume(void *lps_arg);
53static void bfa_lps_free(struct bfa_lps_s *lps);
54static void bfa_lps_send_login(struct bfa_lps_s *lps);
55static void bfa_lps_send_logout(struct bfa_lps_s *lps);
56static void bfa_lps_login_comp(struct bfa_lps_s *lps);
57static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
58static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
59
60/**
61 * lps_pvt BFA LPS private functions
62 */
63
64enum bfa_lps_event {
65 BFA_LPS_SM_LOGIN = 1, /* login request from user */
66 BFA_LPS_SM_LOGOUT = 2, /* logout request from user */
67 BFA_LPS_SM_FWRSP = 3, /* f/w response to login/logout */
68 BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */
69 BFA_LPS_SM_DELETE = 5, /* lps delete from user */
70 BFA_LPS_SM_OFFLINE = 6, /* Link is offline */
71 BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */
72};
73
74static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
75static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
76static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps,
77 enum bfa_lps_event event);
78static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
79static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
80static void bfa_lps_sm_logowait(struct bfa_lps_s *lps,
81 enum bfa_lps_event event);
82
83/**
84 * Init state -- no login
85 */
86static void
87bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
88{
89 bfa_trc(lps->bfa, lps->lp_tag);
90 bfa_trc(lps->bfa, event);
91
92 switch (event) {
93 case BFA_LPS_SM_LOGIN:
94 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
95 bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
96 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
97 } else {
98 bfa_sm_set_state(lps, bfa_lps_sm_login);
99 bfa_lps_send_login(lps);
100 }
101 if (lps->fdisc)
102 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
103 BFA_PL_EID_LOGIN, 0, "FDISC Request");
104 else
105 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
106 BFA_PL_EID_LOGIN, 0, "FLOGI Request");
107 break;
108
109 case BFA_LPS_SM_LOGOUT:
110 bfa_lps_logout_comp(lps);
111 break;
112
113 case BFA_LPS_SM_DELETE:
114 bfa_lps_free(lps);
115 break;
116
117 case BFA_LPS_SM_RX_CVL:
118 case BFA_LPS_SM_OFFLINE:
119 break;
120
121 case BFA_LPS_SM_FWRSP:
122 /* Could happen when fabric detects loopback and discards
123 * the lps request. Fw will eventually sent out the timeout
124 * Just ignore
125 */
126 break;
127
128 default:
129 bfa_sm_fault(lps->bfa, event);
130 }
131}
132
133/**
134 * login is in progress -- awaiting response from firmware
135 */
136static void
137bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
138{
139 bfa_trc(lps->bfa, lps->lp_tag);
140 bfa_trc(lps->bfa, event);
141
142 switch (event) {
143 case BFA_LPS_SM_FWRSP:
144 if (lps->status == BFA_STATUS_OK) {
145 bfa_sm_set_state(lps, bfa_lps_sm_online);
146 if (lps->fdisc)
147 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
148 BFA_PL_EID_LOGIN, 0, "FDISC Accept");
149 else
150 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
151 BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
152 } else {
153 bfa_sm_set_state(lps, bfa_lps_sm_init);
154 if (lps->fdisc)
155 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
156 BFA_PL_EID_LOGIN, 0,
157 "FDISC Fail (RJT or timeout)");
158 else
159 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
160 BFA_PL_EID_LOGIN, 0,
161 "FLOGI Fail (RJT or timeout)");
162 }
163 bfa_lps_login_comp(lps);
164 break;
165
166 case BFA_LPS_SM_OFFLINE:
167 bfa_sm_set_state(lps, bfa_lps_sm_init);
168 break;
169
170 default:
171 bfa_sm_fault(lps->bfa, event);
172 }
173}
174
175/**
176 * login pending - awaiting space in request queue
177 */
178static void
179bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
180{
181 bfa_trc(lps->bfa, lps->lp_tag);
182 bfa_trc(lps->bfa, event);
183
184 switch (event) {
185 case BFA_LPS_SM_RESUME:
186 bfa_sm_set_state(lps, bfa_lps_sm_login);
187 break;
188
189 case BFA_LPS_SM_OFFLINE:
190 bfa_sm_set_state(lps, bfa_lps_sm_init);
191 bfa_reqq_wcancel(&lps->wqe);
192 break;
193
194 case BFA_LPS_SM_RX_CVL:
195 /*
196 * Login was not even sent out; so when getting out
197 * of this state, it will appear like a login retry
198 * after Clear virtual link
199 */
200 break;
201
202 default:
203 bfa_sm_fault(lps->bfa, event);
204 }
205}
206
207/**
208 * login complete
209 */
210static void
211bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
212{
213 bfa_trc(lps->bfa, lps->lp_tag);
214 bfa_trc(lps->bfa, event);
215
216 switch (event) {
217 case BFA_LPS_SM_LOGOUT:
218 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
219 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
220 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
221 } else {
222 bfa_sm_set_state(lps, bfa_lps_sm_logout);
223 bfa_lps_send_logout(lps);
224 }
225 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
226 BFA_PL_EID_LOGO, 0, "Logout");
227 break;
228
229 case BFA_LPS_SM_RX_CVL:
230 bfa_sm_set_state(lps, bfa_lps_sm_init);
231
232 /* Let the vport module know about this event */
233 bfa_lps_cvl_event(lps);
234 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
235 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
236 break;
237
238 case BFA_LPS_SM_OFFLINE:
239 case BFA_LPS_SM_DELETE:
240 bfa_sm_set_state(lps, bfa_lps_sm_init);
241 break;
242
243 default:
244 bfa_sm_fault(lps->bfa, event);
245 }
246}
247
248/**
249 * logout in progress - awaiting firmware response
250 */
251static void
252bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
253{
254 bfa_trc(lps->bfa, lps->lp_tag);
255 bfa_trc(lps->bfa, event);
256
257 switch (event) {
258 case BFA_LPS_SM_FWRSP:
259 bfa_sm_set_state(lps, bfa_lps_sm_init);
260 bfa_lps_logout_comp(lps);
261 break;
262
263 case BFA_LPS_SM_OFFLINE:
264 bfa_sm_set_state(lps, bfa_lps_sm_init);
265 break;
266
267 default:
268 bfa_sm_fault(lps->bfa, event);
269 }
270}
271
272/**
273 * logout pending -- awaiting space in request queue
274 */
275static void
276bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
277{
278 bfa_trc(lps->bfa, lps->lp_tag);
279 bfa_trc(lps->bfa, event);
280
281 switch (event) {
282 case BFA_LPS_SM_RESUME:
283 bfa_sm_set_state(lps, bfa_lps_sm_logout);
284 bfa_lps_send_logout(lps);
285 break;
286
287 case BFA_LPS_SM_OFFLINE:
288 bfa_sm_set_state(lps, bfa_lps_sm_init);
289 bfa_reqq_wcancel(&lps->wqe);
290 break;
291
292 default:
293 bfa_sm_fault(lps->bfa, event);
294 }
295}
296
297
298
299/**
300 * lps_pvt BFA LPS private functions
301 */
302
303/**
304 * return memory requirement
305 */
306static void
307bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
308{
309 if (cfg->drvcfg.min_cfg)
310 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS;
311 else
312 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
313}
314
315/**
316 * bfa module attach at initialization time
317 */
318static void
319bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
320 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
321{
322 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
323 struct bfa_lps_s *lps;
324 int i;
325
326 bfa_os_memset(mod, 0, sizeof(struct bfa_lps_mod_s));
327 mod->num_lps = BFA_LPS_MAX_LPORTS;
328 if (cfg->drvcfg.min_cfg)
329 mod->num_lps = BFA_LPS_MIN_LPORTS;
330 else
331 mod->num_lps = BFA_LPS_MAX_LPORTS;
332 mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo);
333
334 bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s);
335
336 INIT_LIST_HEAD(&mod->lps_free_q);
337 INIT_LIST_HEAD(&mod->lps_active_q);
338
339 for (i = 0; i < mod->num_lps; i++, lps++) {
340 lps->bfa = bfa;
341 lps->lp_tag = (u8) i;
342 lps->reqq = BFA_REQQ_LPS;
343 bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
344 list_add_tail(&lps->qe, &mod->lps_free_q);
345 }
346}
347
348static void
349bfa_lps_detach(struct bfa_s *bfa)
350{
351}
352
353static void
354bfa_lps_start(struct bfa_s *bfa)
355{
356}
357
358static void
359bfa_lps_stop(struct bfa_s *bfa)
360{
361}
362
363/**
364 * IOC in disabled state -- consider all lps offline
365 */
366static void
367bfa_lps_iocdisable(struct bfa_s *bfa)
368{
369 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
370 struct bfa_lps_s *lps;
371 struct list_head *qe, *qen;
372
373 list_for_each_safe(qe, qen, &mod->lps_active_q) {
374 lps = (struct bfa_lps_s *) qe;
375 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
376 }
377}
378
379/**
380 * Firmware login response
381 */
382static void
383bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
384{
385 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
386 struct bfa_lps_s *lps;
387
388 bfa_assert(rsp->lp_tag < mod->num_lps);
389 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
390
391 lps->status = rsp->status;
392 switch (rsp->status) {
393 case BFA_STATUS_OK:
394 lps->fport = rsp->f_port;
395 lps->npiv_en = rsp->npiv_en;
396 lps->lp_pid = rsp->lp_pid;
397 lps->pr_bbcred = bfa_os_ntohs(rsp->bb_credit);
398 lps->pr_pwwn = rsp->port_name;
399 lps->pr_nwwn = rsp->node_name;
400 lps->auth_req = rsp->auth_req;
401 lps->lp_mac = rsp->lp_mac;
402 lps->brcd_switch = rsp->brcd_switch;
403 lps->fcf_mac = rsp->fcf_mac;
404
405 break;
406
407 case BFA_STATUS_FABRIC_RJT:
408 lps->lsrjt_rsn = rsp->lsrjt_rsn;
409 lps->lsrjt_expl = rsp->lsrjt_expl;
410
411 break;
412
413 case BFA_STATUS_EPROTOCOL:
414 lps->ext_status = rsp->ext_status;
415
416 break;
417
418 default:
419 /* Nothing to do with other status */
420 break;
421 }
422
423 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
424}
425
426/**
427 * Firmware logout response
428 */
429static void
430bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
431{
432 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
433 struct bfa_lps_s *lps;
434
435 bfa_assert(rsp->lp_tag < mod->num_lps);
436 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
437
438 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
439}
440
441/**
442 * Firmware received a Clear virtual link request (for FCoE)
443 */
444static void
445bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
446{
447 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
448 struct bfa_lps_s *lps;
449
450 lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag);
451
452 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
453}
454
455/**
456 * Space is available in request queue, resume queueing request to firmware.
457 */
458static void
459bfa_lps_reqq_resume(void *lps_arg)
460{
461 struct bfa_lps_s *lps = lps_arg;
462
463 bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
464}
465
466/**
467 * lps is freed -- triggered by vport delete
468 */
469static void
470bfa_lps_free(struct bfa_lps_s *lps)
471{
472 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
473
474 list_del(&lps->qe);
475 list_add_tail(&lps->qe, &mod->lps_free_q);
476}
477
478/**
479 * send login request to firmware
480 */
481static void
482bfa_lps_send_login(struct bfa_lps_s *lps)
483{
484 struct bfi_lps_login_req_s *m;
485
486 m = bfa_reqq_next(lps->bfa, lps->reqq);
487 bfa_assert(m);
488
489 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
490 bfa_lpuid(lps->bfa));
491
492 m->lp_tag = lps->lp_tag;
493 m->alpa = lps->alpa;
494 m->pdu_size = bfa_os_htons(lps->pdusz);
495 m->pwwn = lps->pwwn;
496 m->nwwn = lps->nwwn;
497 m->fdisc = lps->fdisc;
498 m->auth_en = lps->auth_en;
499
500 bfa_reqq_produce(lps->bfa, lps->reqq);
501}
502
503/**
504 * send logout request to firmware
505 */
506static void
507bfa_lps_send_logout(struct bfa_lps_s *lps)
508{
509 struct bfi_lps_logout_req_s *m;
510
511 m = bfa_reqq_next(lps->bfa, lps->reqq);
512 bfa_assert(m);
513
514 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
515 bfa_lpuid(lps->bfa));
516
517 m->lp_tag = lps->lp_tag;
518 m->port_name = lps->pwwn;
519 bfa_reqq_produce(lps->bfa, lps->reqq);
520}
521
522/**
523 * Indirect login completion handler for non-fcs
524 */
525static void
526bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
527{
528 struct bfa_lps_s *lps = arg;
529
530 if (!complete)
531 return;
532
533 if (lps->fdisc)
534 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
535 else
536 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
537}
538
539/**
540 * Login completion handler -- direct call for fcs, queue for others
541 */
542static void
543bfa_lps_login_comp(struct bfa_lps_s *lps)
544{
545 if (!lps->bfa->fcs) {
546 bfa_cb_queue(lps->bfa, &lps->hcb_qe,
547 bfa_lps_login_comp_cb, lps);
548 return;
549 }
550
551 if (lps->fdisc)
552 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
553 else
554 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
555}
556
557/**
558 * Indirect logout completion handler for non-fcs
559 */
560static void
561bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
562{
563 struct bfa_lps_s *lps = arg;
564
565 if (!complete)
566 return;
567
568 if (lps->fdisc)
569 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
570 else
571 bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
572}
573
574/**
575 * Logout completion handler -- direct call for fcs, queue for others
576 */
577static void
578bfa_lps_logout_comp(struct bfa_lps_s *lps)
579{
580 if (!lps->bfa->fcs) {
581 bfa_cb_queue(lps->bfa, &lps->hcb_qe,
582 bfa_lps_logout_comp_cb, lps);
583 return;
584 }
585 if (lps->fdisc)
586 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
587 else
588 bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
589}
590
591/**
592 * Clear virtual link completion handler for non-fcs
593 */
594static void
595bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
596{
597 struct bfa_lps_s *lps = arg;
598
599 if (!complete)
600 return;
601
602 /* Clear virtual link to base port will result in link down */
603 if (lps->fdisc)
604 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
605}
606
607/**
608 * Received Clear virtual link event --direct call for fcs,
609 * queue for others
610 */
611static void
612bfa_lps_cvl_event(struct bfa_lps_s *lps)
613{
614 if (!lps->bfa->fcs) {
615 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
616 lps);
617 return;
618 }
619
620 /* Clear virtual link to base port will result in link down */
621 if (lps->fdisc)
622 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
623}
624
625u32
626bfa_lps_get_max_vport(struct bfa_s *bfa)
627{
628 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
629 return BFA_LPS_MAX_VPORTS_SUPP_CT;
630 else
631 return BFA_LPS_MAX_VPORTS_SUPP_CB;
632}
633
634/**
635 * lps_public BFA LPS public functions
636 */
637
638/**
639 * Allocate a lport srvice tag.
640 */
641struct bfa_lps_s *
642bfa_lps_alloc(struct bfa_s *bfa)
643{
644 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
645 struct bfa_lps_s *lps = NULL;
646
647 bfa_q_deq(&mod->lps_free_q, &lps);
648
649 if (lps == NULL)
650 return NULL;
651
652 list_add_tail(&lps->qe, &mod->lps_active_q);
653
654 bfa_sm_set_state(lps, bfa_lps_sm_init);
655 return lps;
656}
657
658/**
659 * Free lport service tag. This can be called anytime after an alloc.
660 * No need to wait for any pending login/logout completions.
661 */
662void
663bfa_lps_delete(struct bfa_lps_s *lps)
664{
665 bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
666}
667
668/**
669 * Initiate a lport login.
670 */
671void
672bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
673 wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
674{
675 lps->uarg = uarg;
676 lps->alpa = alpa;
677 lps->pdusz = pdusz;
678 lps->pwwn = pwwn;
679 lps->nwwn = nwwn;
680 lps->fdisc = BFA_FALSE;
681 lps->auth_en = auth_en;
682 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
683}
684
685/**
686 * Initiate a lport fdisc login.
687 */
688void
689bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
690 wwn_t nwwn)
691{
692 lps->uarg = uarg;
693 lps->alpa = 0;
694 lps->pdusz = pdusz;
695 lps->pwwn = pwwn;
696 lps->nwwn = nwwn;
697 lps->fdisc = BFA_TRUE;
698 lps->auth_en = BFA_FALSE;
699 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
700}
701
702/**
703 * Initiate a lport logout (flogi).
704 */
705void
706bfa_lps_flogo(struct bfa_lps_s *lps)
707{
708 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
709}
710
711/**
712 * Initiate a lport FDSIC logout.
713 */
714void
715bfa_lps_fdisclogo(struct bfa_lps_s *lps)
716{
717 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
718}
719
720/**
721 * Discard a pending login request -- should be called only for
722 * link down handling.
723 */
724void
725bfa_lps_discard(struct bfa_lps_s *lps)
726{
727 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
728}
729
730/**
731 * Return lport services tag
732 */
733u8
734bfa_lps_get_tag(struct bfa_lps_s *lps)
735{
736 return lps->lp_tag;
737}
738
739/**
740 * Return lport services tag given the pid
741 */
742u8
743bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
744{
745 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
746 struct bfa_lps_s *lps;
747 int i;
748
749 for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
750 if (lps->lp_pid == pid)
751 return lps->lp_tag;
752 }
753
754 /* Return base port tag anyway */
755 return 0;
756}
757
758/**
759 * return if fabric login indicates support for NPIV
760 */
761bfa_boolean_t
762bfa_lps_is_npiv_en(struct bfa_lps_s *lps)
763{
764 return lps->npiv_en;
765}
766
767/**
768 * Return TRUE if attached to F-Port, else return FALSE
769 */
770bfa_boolean_t
771bfa_lps_is_fport(struct bfa_lps_s *lps)
772{
773 return lps->fport;
774}
775
776/**
777 * Return TRUE if attached to a Brocade Fabric
778 */
779bfa_boolean_t
780bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps)
781{
782 return lps->brcd_switch;
783}
784/**
785 * return TRUE if authentication is required
786 */
787bfa_boolean_t
788bfa_lps_is_authreq(struct bfa_lps_s *lps)
789{
790 return lps->auth_req;
791}
792
793bfa_eproto_status_t
794bfa_lps_get_extstatus(struct bfa_lps_s *lps)
795{
796 return lps->ext_status;
797}
798
799/**
800 * return port id assigned to the lport
801 */
802u32
803bfa_lps_get_pid(struct bfa_lps_s *lps)
804{
805 return lps->lp_pid;
806}
807
808/**
809 * Return bb_credit assigned in FLOGI response
810 */
811u16
812bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps)
813{
814 return lps->pr_bbcred;
815}
816
817/**
818 * Return peer port name
819 */
820wwn_t
821bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps)
822{
823 return lps->pr_pwwn;
824}
825
826/**
827 * Return peer node name
828 */
829wwn_t
830bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps)
831{
832 return lps->pr_nwwn;
833}
834
835/**
836 * return reason code if login request is rejected
837 */
838u8
839bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps)
840{
841 return lps->lsrjt_rsn;
842}
843
844/**
845 * return explanation code if login request is rejected
846 */
847u8
848bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps)
849{
850 return lps->lsrjt_expl;
851}
852
853/**
854 * Return fpma/spma MAC for lport
855 */
856struct mac_s
857bfa_lps_get_lp_mac(struct bfa_lps_s *lps)
858{
859 return lps->lp_mac;
860}
861
862/**
863 * LPS firmware message class handler.
864 */
865void
866bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
867{
868 union bfi_lps_i2h_msg_u msg;
869
870 bfa_trc(bfa, m->mhdr.msg_id);
871 msg.msg = m;
872
873 switch (m->mhdr.msg_id) {
874 case BFI_LPS_H2I_LOGIN_RSP:
875 bfa_lps_login_rsp(bfa, msg.login_rsp);
876 break;
877
878 case BFI_LPS_H2I_LOGOUT_RSP:
879 bfa_lps_logout_rsp(bfa, msg.logout_rsp);
880 break;
881
882 case BFI_LPS_H2I_CVL_EVENT:
883 bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
884 break;
885
886 default:
887 bfa_trc(bfa, m->mhdr.msg_id);
888 bfa_assert(0);
889 }
890}
891
892
diff --git a/drivers/scsi/bfa/bfa_lps_priv.h b/drivers/scsi/bfa/bfa_lps_priv.h
deleted file mode 100644
index d16c6ce995df..000000000000
--- a/drivers/scsi/bfa/bfa_lps_priv.h
+++ /dev/null
@@ -1,38 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_LPS_PRIV_H__
19#define __BFA_LPS_PRIV_H__
20
21#include <bfa_svc.h>
22
23struct bfa_lps_mod_s {
24 struct list_head lps_free_q;
25 struct list_head lps_active_q;
26 struct bfa_lps_s *lps_arr;
27 int num_lps;
28};
29
30#define BFA_LPS_MOD(__bfa) (&(__bfa)->modules.lps_mod)
31#define BFA_LPS_FROM_TAG(__mod, __tag) (&(__mod)->lps_arr[__tag])
32
33/*
34 * external functions
35 */
36void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
37
38#endif /* __BFA_LPS_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_priv.h b/drivers/scsi/bfa/bfa_modules.h
index bf4939b1676c..2cd527338677 100644
--- a/drivers/scsi/bfa/bfa_priv.h
+++ b/drivers/scsi/bfa/bfa_modules.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -15,26 +15,52 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#ifndef __BFA_PRIV_H__ 18/**
19#define __BFA_PRIV_H__ 19 * bfa_modules.h BFA modules
20 */
21
22#ifndef __BFA_MODULES_H__
23#define __BFA_MODULES_H__
24
25#include "bfa_cs.h"
26#include "bfa.h"
27#include "bfa_svc.h"
28#include "bfa_fcpim.h"
29#include "bfa_port.h"
30
31struct bfa_modules_s {
32 struct bfa_fcport_s fcport; /* fc port module */
33 struct bfa_fcxp_mod_s fcxp_mod; /* fcxp module */
34 struct bfa_lps_mod_s lps_mod; /* fcxp module */
35 struct bfa_uf_mod_s uf_mod; /* unsolicited frame module */
36 struct bfa_rport_mod_s rport_mod; /* remote port module */
37 struct bfa_fcpim_mod_s fcpim_mod; /* FCP initiator module */
38 struct bfa_sgpg_mod_s sgpg_mod; /* SG page module */
39 struct bfa_port_s port; /* Physical port module */
40};
41
42/*
43 * !!! Only append to the enums defined here to avoid any versioning
44 * !!! needed between trace utility and driver version
45 */
46enum {
47 BFA_TRC_HAL_CORE = 1,
48 BFA_TRC_HAL_FCXP = 2,
49 BFA_TRC_HAL_FCPIM = 3,
50 BFA_TRC_HAL_IOCFC_CT = 4,
51 BFA_TRC_HAL_IOCFC_CB = 5,
52};
20 53
21#include "bfa_iocfc.h"
22#include "bfa_intr_priv.h"
23#include "bfa_trcmod_priv.h"
24#include "bfa_modules_priv.h"
25#include "bfa_fwimg_priv.h"
26#include <cs/bfa_log.h>
27#include <bfa_timer.h>
28 54
29/** 55/**
30 * Macro to define a new BFA module 56 * Macro to define a new BFA module
31 */ 57 */
32#define BFA_MODULE(__mod) \ 58#define BFA_MODULE(__mod) \
33 static void bfa_ ## __mod ## _meminfo( \ 59 static void bfa_ ## __mod ## _meminfo( \
34 struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, \ 60 struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, \
35 u32 *dm_len); \ 61 u32 *dm_len); \
36 static void bfa_ ## __mod ## _attach(struct bfa_s *bfa, \ 62 static void bfa_ ## __mod ## _attach(struct bfa_s *bfa, \
37 void *bfad, struct bfa_iocfc_cfg_s *cfg, \ 63 void *bfad, struct bfa_iocfc_cfg_s *cfg, \
38 struct bfa_meminfo_s *meminfo, \ 64 struct bfa_meminfo_s *meminfo, \
39 struct bfa_pcidev_s *pcidev); \ 65 struct bfa_pcidev_s *pcidev); \
40 static void bfa_ ## __mod ## _detach(struct bfa_s *bfa); \ 66 static void bfa_ ## __mod ## _detach(struct bfa_s *bfa); \
@@ -77,17 +103,15 @@ extern struct bfa_module_s *hal_mods[];
77 103
78struct bfa_s { 104struct bfa_s {
79 void *bfad; /* BFA driver instance */ 105 void *bfad; /* BFA driver instance */
80 struct bfa_aen_s *aen; /* AEN module */
81 struct bfa_plog_s *plog; /* portlog buffer */ 106 struct bfa_plog_s *plog; /* portlog buffer */
82 struct bfa_log_mod_s *logm; /* driver logging modulen */
83 struct bfa_trc_mod_s *trcmod; /* driver tracing */ 107 struct bfa_trc_mod_s *trcmod; /* driver tracing */
84 struct bfa_ioc_s ioc; /* IOC module */ 108 struct bfa_ioc_s ioc; /* IOC module */
85 struct bfa_iocfc_s iocfc; /* IOCFC module */ 109 struct bfa_iocfc_s iocfc; /* IOCFC module */
86 struct bfa_timer_mod_s timer_mod; /* timer module */ 110 struct bfa_timer_mod_s timer_mod; /* timer module */
87 struct bfa_modules_s modules; /* BFA modules */ 111 struct bfa_modules_s modules; /* BFA modules */
88 struct list_head comp_q; /* pending completions */ 112 struct list_head comp_q; /* pending completions */
89 bfa_boolean_t rme_process; /* RME processing enabled */ 113 bfa_boolean_t rme_process; /* RME processing enabled */
90 struct list_head reqq_waitq[BFI_IOC_MAX_CQS]; 114 struct list_head reqq_waitq[BFI_IOC_MAX_CQS];
91 bfa_boolean_t fcs; /* FCS is attached to BFA */ 115 bfa_boolean_t fcs; /* FCS is attached to BFA */
92 struct bfa_msix_s msix; 116 struct bfa_msix_s msix;
93}; 117};
@@ -95,8 +119,6 @@ struct bfa_s {
95extern bfa_isr_func_t bfa_isrs[BFI_MC_MAX]; 119extern bfa_isr_func_t bfa_isrs[BFI_MC_MAX];
96extern bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[]; 120extern bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[];
97extern bfa_boolean_t bfa_auto_recover; 121extern bfa_boolean_t bfa_auto_recover;
98extern struct bfa_module_s hal_mod_flash;
99extern struct bfa_module_s hal_mod_fcdiag;
100extern struct bfa_module_s hal_mod_sgpg; 122extern struct bfa_module_s hal_mod_sgpg;
101extern struct bfa_module_s hal_mod_fcport; 123extern struct bfa_module_s hal_mod_fcport;
102extern struct bfa_module_s hal_mod_fcxp; 124extern struct bfa_module_s hal_mod_fcxp;
@@ -104,7 +126,5 @@ extern struct bfa_module_s hal_mod_lps;
104extern struct bfa_module_s hal_mod_uf; 126extern struct bfa_module_s hal_mod_uf;
105extern struct bfa_module_s hal_mod_rport; 127extern struct bfa_module_s hal_mod_rport;
106extern struct bfa_module_s hal_mod_fcpim; 128extern struct bfa_module_s hal_mod_fcpim;
107extern struct bfa_module_s hal_mod_pbind;
108
109#endif /* __BFA_PRIV_H__ */
110 129
130#endif /* __BFA_MODULES_H__ */
diff --git a/drivers/scsi/bfa/bfa_modules_priv.h b/drivers/scsi/bfa/bfa_modules_priv.h
deleted file mode 100644
index f554c2fad6a9..000000000000
--- a/drivers/scsi/bfa/bfa_modules_priv.h
+++ /dev/null
@@ -1,43 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_MODULES_PRIV_H__
19#define __BFA_MODULES_PRIV_H__
20
21#include "bfa_uf_priv.h"
22#include "bfa_port_priv.h"
23#include "bfa_rport_priv.h"
24#include "bfa_fcxp_priv.h"
25#include "bfa_lps_priv.h"
26#include "bfa_fcpim_priv.h"
27#include <cee/bfa_cee.h>
28#include <port/bfa_port.h>
29
30
31struct bfa_modules_s {
32 struct bfa_fcport_s fcport; /* fc port module */
33 struct bfa_fcxp_mod_s fcxp_mod; /* fcxp module */
34 struct bfa_lps_mod_s lps_mod; /* fcxp module */
35 struct bfa_uf_mod_s uf_mod; /* unsolicited frame module */
36 struct bfa_rport_mod_s rport_mod; /* remote port module */
37 struct bfa_fcpim_mod_s fcpim_mod; /* FCP initiator module */
38 struct bfa_sgpg_mod_s sgpg_mod; /* SG page module */
39 struct bfa_cee_s cee; /* CEE Module */
40 struct bfa_port_s port; /* Physical port module */
41};
42
43#endif /* __BFA_MODULES_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_os_inc.h b/drivers/scsi/bfa/bfa_os_inc.h
index bd1cd3ee3022..788a250ffb8a 100644
--- a/drivers/scsi/bfa/bfa_os_inc.h
+++ b/drivers/scsi/bfa/bfa_os_inc.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -22,30 +22,20 @@
22#ifndef __BFA_OS_INC_H__ 22#ifndef __BFA_OS_INC_H__
23#define __BFA_OS_INC_H__ 23#define __BFA_OS_INC_H__
24 24
25#ifndef __KERNEL__
26#include <stdint.h>
27#else
28#include <linux/types.h> 25#include <linux/types.h>
29
30#include <linux/version.h> 26#include <linux/version.h>
31#include <linux/pci.h> 27#include <linux/pci.h>
32
33#include <linux/dma-mapping.h> 28#include <linux/dma-mapping.h>
34#define SET_MODULE_VERSION(VER)
35
36#include <linux/idr.h> 29#include <linux/idr.h>
37
38#include <linux/interrupt.h> 30#include <linux/interrupt.h>
39#include <linux/cdev.h> 31#include <linux/cdev.h>
40#include <linux/fs.h> 32#include <linux/fs.h>
41#include <linux/delay.h> 33#include <linux/delay.h>
42#include <linux/vmalloc.h> 34#include <linux/vmalloc.h>
43
44#include <linux/workqueue.h> 35#include <linux/workqueue.h>
45 36#include <linux/bitops.h>
46#include <scsi/scsi.h> 37#include <scsi/scsi.h>
47#include <scsi/scsi_host.h> 38#include <scsi/scsi_host.h>
48
49#include <scsi/scsi_tcq.h> 39#include <scsi/scsi_tcq.h>
50#include <scsi/scsi_transport_fc.h> 40#include <scsi/scsi_transport_fc.h>
51#include <scsi/scsi_transport.h> 41#include <scsi/scsi_transport.h>
@@ -54,97 +44,75 @@
54#define __BIGENDIAN 44#define __BIGENDIAN
55#endif 45#endif
56 46
57#define BFA_ERR KERN_ERR 47static inline u64 bfa_os_get_clock(void)
58#define BFA_WARNING KERN_WARNING 48{
59#define BFA_NOTICE KERN_NOTICE 49 return jiffies;
60#define BFA_INFO KERN_INFO 50}
61#define BFA_DEBUG KERN_DEBUG 51
62 52static inline u64 bfa_os_get_log_time(void)
63#define LOG_BFAD_INIT 0x00000001 53{
64#define LOG_FCP_IO 0x00000002 54 u64 system_time = 0;
65 55 struct timeval tv;
66#ifdef DEBUG 56 do_gettimeofday(&tv);
67#define BFA_LOG_TRACE(bfad, level, mask, fmt, arg...) \ 57
68 BFA_LOG(bfad, level, mask, fmt, ## arg) 58 /* We are interested in seconds only. */
69#define BFA_DEV_TRACE(bfad, level, fmt, arg...) \ 59 system_time = tv.tv_sec;
70 BFA_DEV_PRINTF(bfad, level, fmt, ## arg) 60 return system_time;
71#define BFA_TRACE(level, fmt, arg...) \ 61}
72 BFA_PRINTF(level, fmt, ## arg) 62
73#else 63#define bfa_io_lat_clock_res_div HZ
74#define BFA_LOG_TRACE(bfad, level, mask, fmt, arg...) 64#define bfa_io_lat_clock_res_mul 1000
75#define BFA_DEV_TRACE(bfad, level, fmt, arg...)
76#define BFA_TRACE(level, fmt, arg...)
77#endif
78 65
79#define BFA_ASSERT(p) do { \ 66#define BFA_ASSERT(p) do { \
80 if (!(p)) { \ 67 if (!(p)) { \
81 printk(KERN_ERR "assert(%s) failed at %s:%d\n", \ 68 printk(KERN_ERR "assert(%s) failed at %s:%d\n", \
82 #p, __FILE__, __LINE__); \ 69 #p, __FILE__, __LINE__); \
83 BUG(); \
84 } \ 70 } \
85} while (0) 71} while (0)
86 72
87 73#define BFA_LOG(level, bfad, mask, fmt, arg...) \
88#define BFA_LOG(bfad, level, mask, fmt, arg...) \ 74do { \
89do { \ 75 if (((mask) == 4) || (level[1] <= '4')) \
90 if (((mask) & (((struct bfad_s *)(bfad))-> \ 76 dev_printk(level, &((bfad)->pcidev)->dev, fmt, ##arg); \
91 cfg_data[cfg_log_mask])) || (level[1] <= '3')) \
92 dev_printk(level, &(((struct bfad_s *) \
93 (bfad))->pcidev->dev), fmt, ##arg); \
94} while (0) 77} while (0)
95 78
96#ifndef BFA_DEV_PRINTF
97#define BFA_DEV_PRINTF(bfad, level, fmt, arg...) \
98 dev_printk(level, &(((struct bfad_s *) \
99 (bfad))->pcidev->dev), fmt, ##arg);
100#endif
101
102#define BFA_PRINTF(level, fmt, arg...) \
103 printk(level fmt, ##arg);
104
105int bfa_os_MWB(void *);
106
107#define bfa_os_mmiowb() mmiowb()
108
109#define bfa_swap_3b(_x) \ 79#define bfa_swap_3b(_x) \
110 ((((_x) & 0xff) << 16) | \ 80 ((((_x) & 0xff) << 16) | \
111 ((_x) & 0x00ff00) | \ 81 ((_x) & 0x00ff00) | \
112 (((_x) & 0xff0000) >> 16)) 82 (((_x) & 0xff0000) >> 16))
113 83
114#define bfa_swap_8b(_x) \ 84#define bfa_swap_8b(_x) \
115 ((((_x) & 0xff00000000000000ull) >> 56) \ 85 ((((_x) & 0xff00000000000000ull) >> 56) \
116 | (((_x) & 0x00ff000000000000ull) >> 40) \ 86 | (((_x) & 0x00ff000000000000ull) >> 40) \
117 | (((_x) & 0x0000ff0000000000ull) >> 24) \ 87 | (((_x) & 0x0000ff0000000000ull) >> 24) \
118 | (((_x) & 0x000000ff00000000ull) >> 8) \ 88 | (((_x) & 0x000000ff00000000ull) >> 8) \
119 | (((_x) & 0x00000000ff000000ull) << 8) \ 89 | (((_x) & 0x00000000ff000000ull) << 8) \
120 | (((_x) & 0x0000000000ff0000ull) << 24) \ 90 | (((_x) & 0x0000000000ff0000ull) << 24) \
121 | (((_x) & 0x000000000000ff00ull) << 40) \ 91 | (((_x) & 0x000000000000ff00ull) << 40) \
122 | (((_x) & 0x00000000000000ffull) << 56)) 92 | (((_x) & 0x00000000000000ffull) << 56))
123 93
124#define bfa_os_swap32(_x) \ 94#define bfa_os_swap32(_x) \
125 ((((_x) & 0xff) << 24) | \ 95 ((((_x) & 0xff) << 24) | \
126 (((_x) & 0x0000ff00) << 8) | \ 96 (((_x) & 0x0000ff00) << 8) | \
127 (((_x) & 0x00ff0000) >> 8) | \ 97 (((_x) & 0x00ff0000) >> 8) | \
128 (((_x) & 0xff000000) >> 24)) 98 (((_x) & 0xff000000) >> 24))
129 99
130#define bfa_os_swap_sgaddr(_x) ((u64)( \ 100#define bfa_os_swap_sgaddr(_x) ((u64)( \
131 (((u64)(_x) & (u64)0x00000000000000ffull) << 32) | \ 101 (((u64)(_x) & (u64)0x00000000000000ffull) << 32) | \
132 (((u64)(_x) & (u64)0x000000000000ff00ull) << 32) | \ 102 (((u64)(_x) & (u64)0x000000000000ff00ull) << 32) | \
133 (((u64)(_x) & (u64)0x0000000000ff0000ull) << 32) | \ 103 (((u64)(_x) & (u64)0x0000000000ff0000ull) << 32) | \
134 (((u64)(_x) & (u64)0x00000000ff000000ull) << 32) | \ 104 (((u64)(_x) & (u64)0x00000000ff000000ull) << 32) | \
135 (((u64)(_x) & (u64)0x000000ff00000000ull) >> 32) | \ 105 (((u64)(_x) & (u64)0x000000ff00000000ull) >> 32) | \
136 (((u64)(_x) & (u64)0x0000ff0000000000ull) >> 32) | \ 106 (((u64)(_x) & (u64)0x0000ff0000000000ull) >> 32) | \
137 (((u64)(_x) & (u64)0x00ff000000000000ull) >> 32) | \ 107 (((u64)(_x) & (u64)0x00ff000000000000ull) >> 32) | \
138 (((u64)(_x) & (u64)0xff00000000000000ull) >> 32))) 108 (((u64)(_x) & (u64)0xff00000000000000ull) >> 32)))
139 109
140#ifndef __BIGENDIAN 110#ifndef __BIGENDIAN
141#define bfa_os_htons(_x) ((u16)((((_x) & 0xff00) >> 8) | \ 111#define bfa_os_htons(_x) ((u16)((((_x) & 0xff00) >> 8) | \
142 (((_x) & 0x00ff) << 8))) 112 (((_x) & 0x00ff) << 8)))
143
144#define bfa_os_htonl(_x) bfa_os_swap32(_x) 113#define bfa_os_htonl(_x) bfa_os_swap32(_x)
145#define bfa_os_htonll(_x) bfa_swap_8b(_x) 114#define bfa_os_htonll(_x) bfa_swap_8b(_x)
146#define bfa_os_hton3b(_x) bfa_swap_3b(_x) 115#define bfa_os_hton3b(_x) bfa_swap_3b(_x)
147
148#define bfa_os_wtole(_x) (_x) 116#define bfa_os_wtole(_x) (_x)
149#define bfa_os_sgaddr(_x) (_x) 117#define bfa_os_sgaddr(_x) (_x)
150 118
@@ -170,17 +138,16 @@ int bfa_os_MWB(void *);
170#define bfa_os_memcpy memcpy 138#define bfa_os_memcpy memcpy
171#define bfa_os_udelay udelay 139#define bfa_os_udelay udelay
172#define bfa_os_vsprintf vsprintf 140#define bfa_os_vsprintf vsprintf
141#define bfa_os_snprintf snprintf
173 142
174#define bfa_os_assign(__t, __s) __t = __s 143#define bfa_os_assign(__t, __s) __t = __s
175 144#define bfa_os_addr_t void __iomem *
176#define bfa_os_addr_t char __iomem *
177#define bfa_os_panic()
178 145
179#define bfa_os_reg_read(_raddr) readl(_raddr) 146#define bfa_os_reg_read(_raddr) readl(_raddr)
180#define bfa_os_reg_write(_raddr, _val) writel((_val), (_raddr)) 147#define bfa_os_reg_write(_raddr, _val) writel((_val), (_raddr))
181#define bfa_os_mem_read(_raddr, _off) \ 148#define bfa_os_mem_read(_raddr, _off) \
182 bfa_os_swap32(readl(((_raddr) + (_off)))) 149 bfa_os_swap32(readl(((_raddr) + (_off))))
183#define bfa_os_mem_write(_raddr, _off, _val) \ 150#define bfa_os_mem_write(_raddr, _off, _val) \
184 writel(bfa_os_swap32((_val)), ((_raddr) + (_off))) 151 writel(bfa_os_swap32((_val)), ((_raddr) + (_off)))
185 152
186#define BFA_TRC_TS(_trcm) \ 153#define BFA_TRC_TS(_trcm) \
@@ -191,11 +158,6 @@ int bfa_os_MWB(void *);
191 (tv.tv_sec*1000000+tv.tv_usec); \ 158 (tv.tv_sec*1000000+tv.tv_usec); \
192 }) 159 })
193 160
194struct bfa_log_mod_s;
195void bfa_os_printf(struct bfa_log_mod_s *log_mod, u32 msg_id,
196 const char *fmt, ...);
197#endif
198
199#define boolean_t int 161#define boolean_t int
200 162
201/** 163/**
@@ -206,7 +168,15 @@ struct bfa_timeval_s {
206 u32 tv_usec; /* microseconds */ 168 u32 tv_usec; /* microseconds */
207}; 169};
208 170
209void bfa_os_gettimeofday(struct bfa_timeval_s *tv); 171static inline void
172bfa_os_gettimeofday(struct bfa_timeval_s *tv)
173{
174 struct timeval tmp_tv;
175
176 do_gettimeofday(&tmp_tv);
177 tv->tv_sec = (u32) tmp_tv.tv_sec;
178 tv->tv_usec = (u32) tmp_tv.tv_usec;
179}
210 180
211static inline void 181static inline void
212wwn2str(char *wwn_str, u64 wwn) 182wwn2str(char *wwn_str, u64 wwn)
diff --git a/drivers/scsi/bfa/include/cs/bfa_plog.h b/drivers/scsi/bfa/bfa_plog.h
index f5bef63b5877..501f0ed35cf0 100644
--- a/drivers/scsi/bfa/include/cs/bfa_plog.h
+++ b/drivers/scsi/bfa/bfa_plog.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -17,8 +17,8 @@
17#ifndef __BFA_PORTLOG_H__ 17#ifndef __BFA_PORTLOG_H__
18#define __BFA_PORTLOG_H__ 18#define __BFA_PORTLOG_H__
19 19
20#include "protocol/fc.h" 20#include "bfa_fc.h"
21#include <defs/bfa_defs_types.h> 21#include "bfa_defs.h"
22 22
23#define BFA_PL_NLOG_ENTS 256 23#define BFA_PL_NLOG_ENTS 256
24#define BFA_PL_LOG_REC_INCR(_x) ((_x)++, (_x) %= BFA_PL_NLOG_ENTS) 24#define BFA_PL_LOG_REC_INCR(_x) ((_x)++, (_x) %= BFA_PL_NLOG_ENTS)
@@ -27,38 +27,30 @@
27#define BFA_PL_INT_LOG_SZ 8 /* number of integers in the integer log */ 27#define BFA_PL_INT_LOG_SZ 8 /* number of integers in the integer log */
28 28
29enum bfa_plog_log_type { 29enum bfa_plog_log_type {
30 BFA_PL_LOG_TYPE_INVALID = 0, 30 BFA_PL_LOG_TYPE_INVALID = 0,
31 BFA_PL_LOG_TYPE_INT = 1, 31 BFA_PL_LOG_TYPE_INT = 1,
32 BFA_PL_LOG_TYPE_STRING = 2, 32 BFA_PL_LOG_TYPE_STRING = 2,
33}; 33};
34 34
35/* 35/*
36 * the (fixed size) record format for each entry in the portlog 36 * the (fixed size) record format for each entry in the portlog
37 */ 37 */
38struct bfa_plog_rec_s { 38struct bfa_plog_rec_s {
39 u32 tv; /* Filled by the portlog driver when the * 39 u64 tv; /* timestamp */
40 * entry is added to the circular log. */ 40 u8 port; /* Source port that logged this entry */
41 u8 port; /* Source port that logged this entry. CM 41 u8 mid; /* module id */
42 * entities will use 0xFF */ 42 u8 eid; /* indicates Rx, Tx, IOCTL, etc. bfa_plog_eid */
43 u8 mid; /* Integer value to be used by all entities * 43 u8 log_type; /* string/integer log, bfa_plog_log_type_t */
44 * while logging. The module id to string * 44 u8 log_num_ints;
45 * conversion will be done by BFAL. See
46 * enum bfa_plog_mid */
47 u8 eid; /* indicates Rx, Tx, IOCTL, etc. See
48 * enum bfa_plog_eid */
49 u8 log_type; /* indicates string log or integer log.
50 * see bfa_plog_log_type_t */
51 u8 log_num_ints;
52 /* 45 /*
53 * interpreted only if log_type is INT_LOG. indicates number of 46 * interpreted only if log_type is INT_LOG. indicates number of
54 * integers in the int_log[] (0-PL_INT_LOG_SZ). 47 * integers in the int_log[] (0-PL_INT_LOG_SZ).
55 */ 48 */
56 u8 rsvd; 49 u8 rsvd;
57 u16 misc; /* can be used to indicate fc frame length, 50 u16 misc; /* can be used to indicate fc frame length */
58 *etc.. */
59 union { 51 union {
60 char string_log[BFA_PL_STRING_LOG_SZ]; 52 char string_log[BFA_PL_STRING_LOG_SZ];
61 u32 int_log[BFA_PL_INT_LOG_SZ]; 53 u32 int_log[BFA_PL_INT_LOG_SZ];
62 } log_entry; 54 } log_entry;
63 55
64}; 56};
@@ -73,20 +65,20 @@ struct bfa_plog_rec_s {
73 * - Do not remove any entry or rearrange the order. 65 * - Do not remove any entry or rearrange the order.
74 */ 66 */
75enum bfa_plog_mid { 67enum bfa_plog_mid {
76 BFA_PL_MID_INVALID = 0, 68 BFA_PL_MID_INVALID = 0,
77 BFA_PL_MID_DEBUG = 1, 69 BFA_PL_MID_DEBUG = 1,
78 BFA_PL_MID_DRVR = 2, 70 BFA_PL_MID_DRVR = 2,
79 BFA_PL_MID_HAL = 3, 71 BFA_PL_MID_HAL = 3,
80 BFA_PL_MID_HAL_FCXP = 4, 72 BFA_PL_MID_HAL_FCXP = 4,
81 BFA_PL_MID_HAL_UF = 5, 73 BFA_PL_MID_HAL_UF = 5,
82 BFA_PL_MID_FCS = 6, 74 BFA_PL_MID_FCS = 6,
83 BFA_PL_MID_LPS = 7, 75 BFA_PL_MID_LPS = 7,
84 BFA_PL_MID_MAX = 8 76 BFA_PL_MID_MAX = 8
85}; 77};
86 78
87#define BFA_PL_MID_STRLEN 8 79#define BFA_PL_MID_STRLEN 8
88struct bfa_plog_mid_strings_s { 80struct bfa_plog_mid_strings_s {
89 char m_str[BFA_PL_MID_STRLEN]; 81 char m_str[BFA_PL_MID_STRLEN];
90}; 82};
91 83
92/* 84/*
@@ -99,36 +91,37 @@ struct bfa_plog_mid_strings_s {
99 * - Do not remove any entry or rearrange the order. 91 * - Do not remove any entry or rearrange the order.
100 */ 92 */
101enum bfa_plog_eid { 93enum bfa_plog_eid {
102 BFA_PL_EID_INVALID = 0, 94 BFA_PL_EID_INVALID = 0,
103 BFA_PL_EID_IOC_DISABLE = 1, 95 BFA_PL_EID_IOC_DISABLE = 1,
104 BFA_PL_EID_IOC_ENABLE = 2, 96 BFA_PL_EID_IOC_ENABLE = 2,
105 BFA_PL_EID_PORT_DISABLE = 3, 97 BFA_PL_EID_PORT_DISABLE = 3,
106 BFA_PL_EID_PORT_ENABLE = 4, 98 BFA_PL_EID_PORT_ENABLE = 4,
107 BFA_PL_EID_PORT_ST_CHANGE = 5, 99 BFA_PL_EID_PORT_ST_CHANGE = 5,
108 BFA_PL_EID_TX = 6, 100 BFA_PL_EID_TX = 6,
109 BFA_PL_EID_TX_ACK1 = 7, 101 BFA_PL_EID_TX_ACK1 = 7,
110 BFA_PL_EID_TX_RJT = 8, 102 BFA_PL_EID_TX_RJT = 8,
111 BFA_PL_EID_TX_BSY = 9, 103 BFA_PL_EID_TX_BSY = 9,
112 BFA_PL_EID_RX = 10, 104 BFA_PL_EID_RX = 10,
113 BFA_PL_EID_RX_ACK1 = 11, 105 BFA_PL_EID_RX_ACK1 = 11,
114 BFA_PL_EID_RX_RJT = 12, 106 BFA_PL_EID_RX_RJT = 12,
115 BFA_PL_EID_RX_BSY = 13, 107 BFA_PL_EID_RX_BSY = 13,
116 BFA_PL_EID_CT_IN = 14, 108 BFA_PL_EID_CT_IN = 14,
117 BFA_PL_EID_CT_OUT = 15, 109 BFA_PL_EID_CT_OUT = 15,
118 BFA_PL_EID_DRIVER_START = 16, 110 BFA_PL_EID_DRIVER_START = 16,
119 BFA_PL_EID_RSCN = 17, 111 BFA_PL_EID_RSCN = 17,
120 BFA_PL_EID_DEBUG = 18, 112 BFA_PL_EID_DEBUG = 18,
121 BFA_PL_EID_MISC = 19, 113 BFA_PL_EID_MISC = 19,
122 BFA_PL_EID_FIP_FCF_DISC = 20, 114 BFA_PL_EID_FIP_FCF_DISC = 20,
123 BFA_PL_EID_FIP_FCF_CVL = 21, 115 BFA_PL_EID_FIP_FCF_CVL = 21,
124 BFA_PL_EID_LOGIN = 22, 116 BFA_PL_EID_LOGIN = 22,
125 BFA_PL_EID_LOGO = 23, 117 BFA_PL_EID_LOGO = 23,
126 BFA_PL_EID_MAX = 24 118 BFA_PL_EID_TRUNK_SCN = 24,
119 BFA_PL_EID_MAX
127}; 120};
128 121
129#define BFA_PL_ENAME_STRLEN 8 122#define BFA_PL_ENAME_STRLEN 8
130struct bfa_plog_eid_strings_s { 123struct bfa_plog_eid_strings_s {
131 char e_str[BFA_PL_ENAME_STRLEN]; 124 char e_str[BFA_PL_ENAME_STRLEN];
132}; 125};
133 126
134#define BFA_PL_SIG_LEN 8 127#define BFA_PL_SIG_LEN 8
@@ -138,12 +131,12 @@ struct bfa_plog_eid_strings_s {
138 * per port circular log buffer 131 * per port circular log buffer
139 */ 132 */
140struct bfa_plog_s { 133struct bfa_plog_s {
141 char plog_sig[BFA_PL_SIG_LEN]; /* Start signature */ 134 char plog_sig[BFA_PL_SIG_LEN]; /* Start signature */
142 u8 plog_enabled; 135 u8 plog_enabled;
143 u8 rsvd[7]; 136 u8 rsvd[7];
144 u32 ticks; 137 u32 ticks;
145 u16 head; 138 u16 head;
146 u16 tail; 139 u16 tail;
147 struct bfa_plog_rec_s plog_recs[BFA_PL_NLOG_ENTS]; 140 struct bfa_plog_rec_s plog_recs[BFA_PL_NLOG_ENTS];
148}; 141};
149 142
@@ -154,8 +147,7 @@ void bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
154 enum bfa_plog_eid event, u16 misc, 147 enum bfa_plog_eid event, u16 misc,
155 u32 *intarr, u32 num_ints); 148 u32 *intarr, u32 num_ints);
156void bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid, 149void bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
157 enum bfa_plog_eid event, u16 misc, 150 enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr);
158 struct fchs_s *fchdr);
159void bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid, 151void bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
160 enum bfa_plog_eid event, u16 misc, 152 enum bfa_plog_eid event, u16 misc,
161 struct fchs_s *fchdr, u32 pld_w0); 153 struct fchs_s *fchdr, u32 pld_w0);
diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c
index c7e69f1e56e3..b6d170a13bea 100644
--- a/drivers/scsi/bfa/bfa_port.c
+++ b/drivers/scsi/bfa/bfa_port.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -15,30 +15,25 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#include <defs/bfa_defs_port.h> 18#include "bfa_defs_svc.h"
19#include <cs/bfa_trc.h> 19#include "bfa_port.h"
20#include <cs/bfa_log.h> 20#include "bfi.h"
21#include <cs/bfa_debug.h> 21#include "bfa_ioc.h"
22#include <port/bfa_port.h> 22
23#include <bfi/bfi.h>
24#include <bfi/bfi_port.h>
25#include <bfa_ioc.h>
26#include <cna/bfa_cna_trcmod.h>
27 23
28BFA_TRC_FILE(CNA, PORT); 24BFA_TRC_FILE(CNA, PORT);
29 25
30#define bfa_ioc_portid(__ioc) ((__ioc)->port_id) 26#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
31#define bfa_lpuid(__arg) bfa_ioc_portid(&(__arg)->ioc)
32 27
33static void 28static void
34bfa_port_stats_swap(struct bfa_port_s *port, union bfa_pport_stats_u *stats) 29bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats)
35{ 30{
36 u32 *dip = (u32 *) stats; 31 u32 *dip = (u32 *) stats;
37 u32 t0, t1; 32 u32 t0, t1;
38 int i; 33 int i;
39 34
40 for (i = 0; i < sizeof(union bfa_pport_stats_u) / sizeof(u32); 35 for (i = 0; i < sizeof(union bfa_port_stats_u)/sizeof(u32);
41 i += 2) { 36 i += 2) {
42 t0 = dip[i]; 37 t0 = dip[i];
43 t1 = dip[i + 1]; 38 t1 = dip[i + 1];
44#ifdef __BIGENDIAN 39#ifdef __BIGENDIAN
@@ -49,11 +44,6 @@ bfa_port_stats_swap(struct bfa_port_s *port, union bfa_pport_stats_u *stats)
49 dip[i + 1] = bfa_os_ntohl(t0); 44 dip[i + 1] = bfa_os_ntohl(t0);
50#endif 45#endif
51 } 46 }
52
53 /** todo
54 * QoS stats r also swapped as 64bit; that structure also
55 * has to use 64 bit counters
56 */
57} 47}
58 48
59/** 49/**
@@ -68,7 +58,9 @@ bfa_port_stats_swap(struct bfa_port_s *port, union bfa_pport_stats_u *stats)
68static void 58static void
69bfa_port_enable_isr(struct bfa_port_s *port, bfa_status_t status) 59bfa_port_enable_isr(struct bfa_port_s *port, bfa_status_t status)
70{ 60{
71 bfa_assert(0); 61 bfa_trc(port, status);
62 port->endis_pending = BFA_FALSE;
63 port->endis_cbfn(port->endis_cbarg, status);
72} 64}
73 65
74/** 66/**
@@ -83,7 +75,9 @@ bfa_port_enable_isr(struct bfa_port_s *port, bfa_status_t status)
83static void 75static void
84bfa_port_disable_isr(struct bfa_port_s *port, bfa_status_t status) 76bfa_port_disable_isr(struct bfa_port_s *port, bfa_status_t status)
85{ 77{
86 bfa_assert(0); 78 bfa_trc(port, status);
79 port->endis_pending = BFA_FALSE;
80 port->endis_cbfn(port->endis_cbarg, status);
87} 81}
88 82
89/** 83/**
@@ -105,7 +99,7 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
105 struct bfa_timeval_s tv; 99 struct bfa_timeval_s tv;
106 100
107 memcpy(port->stats, port->stats_dma.kva, 101 memcpy(port->stats, port->stats_dma.kva,
108 sizeof(union bfa_pport_stats_u)); 102 sizeof(union bfa_port_stats_u));
109 bfa_port_stats_swap(port, port->stats); 103 bfa_port_stats_swap(port, port->stats);
110 104
111 bfa_os_gettimeofday(&tv); 105 bfa_os_gettimeofday(&tv);
@@ -133,11 +127,11 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
133 struct bfa_timeval_s tv; 127 struct bfa_timeval_s tv;
134 128
135 port->stats_status = status; 129 port->stats_status = status;
136 port->stats_busy = BFA_FALSE; 130 port->stats_busy = BFA_FALSE;
137 131
138 /** 132 /**
139 * re-initialize time stamp for stats reset 133 * re-initialize time stamp for stats reset
140 */ 134 */
141 bfa_os_gettimeofday(&tv); 135 bfa_os_gettimeofday(&tv);
142 port->stats_reset_time = tv.tv_sec; 136 port->stats_reset_time = tv.tv_sec;
143 137
@@ -158,10 +152,10 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
158static void 152static void
159bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m) 153bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m)
160{ 154{
161 struct bfa_port_s *port = (struct bfa_port_s *)cbarg; 155 struct bfa_port_s *port = (struct bfa_port_s *) cbarg;
162 union bfi_port_i2h_msg_u *i2hmsg; 156 union bfi_port_i2h_msg_u *i2hmsg;
163 157
164 i2hmsg = (union bfi_port_i2h_msg_u *)m; 158 i2hmsg = (union bfi_port_i2h_msg_u *) m;
165 bfa_trc(port, m->mh.msg_id); 159 bfa_trc(port, m->mh.msg_id);
166 160
167 switch (m->mh.msg_id) { 161 switch (m->mh.msg_id) {
@@ -178,9 +172,7 @@ bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m)
178 break; 172 break;
179 173
180 case BFI_PORT_I2H_GET_STATS_RSP: 174 case BFI_PORT_I2H_GET_STATS_RSP:
181 /* 175 /* Stats busy flag is still set? (may be cmd timed out) */
182 * Stats busy flag is still set? (may be cmd timed out)
183 */
184 if (port->stats_busy == BFA_FALSE) 176 if (port->stats_busy == BFA_FALSE)
185 break; 177 break;
186 bfa_port_get_stats_isr(port, i2hmsg->getstats_rsp.status); 178 bfa_port_get_stats_isr(port, i2hmsg->getstats_rsp.status);
@@ -208,7 +200,7 @@ bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m)
208u32 200u32
209bfa_port_meminfo(void) 201bfa_port_meminfo(void)
210{ 202{
211 return BFA_ROUNDUP(sizeof(union bfa_pport_stats_u), BFA_DMA_ALIGN_SZ); 203 return BFA_ROUNDUP(sizeof(union bfa_port_stats_u), BFA_DMA_ALIGN_SZ);
212} 204}
213 205
214/** 206/**
@@ -216,8 +208,8 @@ bfa_port_meminfo(void)
216 * 208 *
217 * 209 *
218 * @param[in] port Port module pointer 210 * @param[in] port Port module pointer
219 * dma_kva Kernel Virtual Address of Port DMA Memory 211 * dma_kva Kernel Virtual Address of Port DMA Memory
220 * dma_pa Physical Address of Port DMA Memory 212 * dma_pa Physical Address of Port DMA Memory
221 * 213 *
222 * @return void 214 * @return void
223 */ 215 */
@@ -225,7 +217,7 @@ void
225bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa) 217bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa)
226{ 218{
227 port->stats_dma.kva = dma_kva; 219 port->stats_dma.kva = dma_kva;
228 port->stats_dma.pa = dma_pa; 220 port->stats_dma.pa = dma_pa;
229} 221}
230 222
231/** 223/**
@@ -239,12 +231,14 @@ bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa)
239 */ 231 */
240bfa_status_t 232bfa_status_t
241bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn, 233bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
242 void *cbarg) 234 void *cbarg)
243{ 235{
244 struct bfi_port_generic_req_s *m; 236 struct bfi_port_generic_req_s *m;
245 237
246 /** todo Not implemented */ 238 if (bfa_ioc_is_disabled(port->ioc)) {
247 bfa_assert(0); 239 bfa_trc(port, BFA_STATUS_IOC_DISABLED);
240 return BFA_STATUS_IOC_DISABLED;
241 }
248 242
249 if (!bfa_ioc_is_operational(port->ioc)) { 243 if (!bfa_ioc_is_operational(port->ioc)) {
250 bfa_trc(port, BFA_STATUS_IOC_FAILURE); 244 bfa_trc(port, BFA_STATUS_IOC_FAILURE);
@@ -256,11 +250,11 @@ bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
256 return BFA_STATUS_DEVBUSY; 250 return BFA_STATUS_DEVBUSY;
257 } 251 }
258 252
259 m = (struct bfi_port_generic_req_s *)port->endis_mb.msg; 253 m = (struct bfi_port_generic_req_s *) port->endis_mb.msg;
260 254
261 port->msgtag++; 255 port->msgtag++;
262 port->endis_cbfn = cbfn; 256 port->endis_cbfn = cbfn;
263 port->endis_cbarg = cbarg; 257 port->endis_cbarg = cbarg;
264 port->endis_pending = BFA_TRUE; 258 port->endis_pending = BFA_TRUE;
265 259
266 bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_ENABLE_REQ, 260 bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_ENABLE_REQ,
@@ -281,12 +275,14 @@ bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
281 */ 275 */
282bfa_status_t 276bfa_status_t
283bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn, 277bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
284 void *cbarg) 278 void *cbarg)
285{ 279{
286 struct bfi_port_generic_req_s *m; 280 struct bfi_port_generic_req_s *m;
287 281
288 /** todo Not implemented */ 282 if (bfa_ioc_is_disabled(port->ioc)) {
289 bfa_assert(0); 283 bfa_trc(port, BFA_STATUS_IOC_DISABLED);
284 return BFA_STATUS_IOC_DISABLED;
285 }
290 286
291 if (!bfa_ioc_is_operational(port->ioc)) { 287 if (!bfa_ioc_is_operational(port->ioc)) {
292 bfa_trc(port, BFA_STATUS_IOC_FAILURE); 288 bfa_trc(port, BFA_STATUS_IOC_FAILURE);
@@ -298,11 +294,11 @@ bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
298 return BFA_STATUS_DEVBUSY; 294 return BFA_STATUS_DEVBUSY;
299 } 295 }
300 296
301 m = (struct bfi_port_generic_req_s *)port->endis_mb.msg; 297 m = (struct bfi_port_generic_req_s *) port->endis_mb.msg;
302 298
303 port->msgtag++; 299 port->msgtag++;
304 port->endis_cbfn = cbfn; 300 port->endis_cbfn = cbfn;
305 port->endis_cbarg = cbarg; 301 port->endis_cbarg = cbarg;
306 port->endis_pending = BFA_TRUE; 302 port->endis_pending = BFA_TRUE;
307 303
308 bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_DISABLE_REQ, 304 bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_DISABLE_REQ,
@@ -322,8 +318,8 @@ bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
322 * @return Status 318 * @return Status
323 */ 319 */
324bfa_status_t 320bfa_status_t
325bfa_port_get_stats(struct bfa_port_s *port, union bfa_pport_stats_u *stats, 321bfa_port_get_stats(struct bfa_port_s *port, union bfa_port_stats_u *stats,
326 bfa_port_stats_cbfn_t cbfn, void *cbarg) 322 bfa_port_stats_cbfn_t cbfn, void *cbarg)
327{ 323{
328 struct bfi_port_get_stats_req_s *m; 324 struct bfi_port_get_stats_req_s *m;
329 325
@@ -337,12 +333,12 @@ bfa_port_get_stats(struct bfa_port_s *port, union bfa_pport_stats_u *stats,
337 return BFA_STATUS_DEVBUSY; 333 return BFA_STATUS_DEVBUSY;
338 } 334 }
339 335
340 m = (struct bfi_port_get_stats_req_s *)port->stats_mb.msg; 336 m = (struct bfi_port_get_stats_req_s *) port->stats_mb.msg;
341 337
342 port->stats = stats; 338 port->stats = stats;
343 port->stats_cbfn = cbfn; 339 port->stats_cbfn = cbfn;
344 port->stats_cbarg = cbarg; 340 port->stats_cbarg = cbarg;
345 port->stats_busy = BFA_TRUE; 341 port->stats_busy = BFA_TRUE;
346 bfa_dma_be_addr_set(m->dma_addr, port->stats_dma.pa); 342 bfa_dma_be_addr_set(m->dma_addr, port->stats_dma.pa);
347 343
348 bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_GET_STATS_REQ, 344 bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_GET_STATS_REQ,
@@ -362,7 +358,7 @@ bfa_port_get_stats(struct bfa_port_s *port, union bfa_pport_stats_u *stats,
362 */ 358 */
363bfa_status_t 359bfa_status_t
364bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn, 360bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
365 void *cbarg) 361 void *cbarg)
366{ 362{
367 struct bfi_port_generic_req_s *m; 363 struct bfi_port_generic_req_s *m;
368 364
@@ -376,11 +372,11 @@ bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
376 return BFA_STATUS_DEVBUSY; 372 return BFA_STATUS_DEVBUSY;
377 } 373 }
378 374
379 m = (struct bfi_port_generic_req_s *)port->stats_mb.msg; 375 m = (struct bfi_port_generic_req_s *) port->stats_mb.msg;
380 376
381 port->stats_cbfn = cbfn; 377 port->stats_cbfn = cbfn;
382 port->stats_cbarg = cbarg; 378 port->stats_cbarg = cbarg;
383 port->stats_busy = BFA_TRUE; 379 port->stats_busy = BFA_TRUE;
384 380
385 bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_CLEAR_STATS_REQ, 381 bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_CLEAR_STATS_REQ,
386 bfa_ioc_portid(port->ioc)); 382 bfa_ioc_portid(port->ioc));
@@ -400,11 +396,9 @@ bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
400void 396void
401bfa_port_hbfail(void *arg) 397bfa_port_hbfail(void *arg)
402{ 398{
403 struct bfa_port_s *port = (struct bfa_port_s *)arg; 399 struct bfa_port_s *port = (struct bfa_port_s *) arg;
404 400
405 /* 401 /* Fail any pending get_stats/clear_stats requests */
406 * Fail any pending get_stats/clear_stats requests
407 */
408 if (port->stats_busy) { 402 if (port->stats_busy) {
409 if (port->stats_cbfn) 403 if (port->stats_cbfn)
410 port->stats_cbfn(port->stats_cbarg, BFA_STATUS_FAILED); 404 port->stats_cbfn(port->stats_cbarg, BFA_STATUS_FAILED);
@@ -412,9 +406,7 @@ bfa_port_hbfail(void *arg)
412 port->stats_busy = BFA_FALSE; 406 port->stats_busy = BFA_FALSE;
413 } 407 }
414 408
415 /* 409 /* Clear any enable/disable is pending */
416 * Clear any enable/disable is pending
417 */
418 if (port->endis_pending) { 410 if (port->endis_pending) {
419 if (port->endis_cbfn) 411 if (port->endis_cbfn)
420 port->endis_cbfn(port->endis_cbarg, BFA_STATUS_FAILED); 412 port->endis_cbfn(port->endis_cbarg, BFA_STATUS_FAILED);
@@ -433,22 +425,20 @@ bfa_port_hbfail(void *arg)
433 * The device driver specific mbox ISR functions have 425 * The device driver specific mbox ISR functions have
434 * this pointer as one of the parameters. 426 * this pointer as one of the parameters.
435 * trcmod - 427 * trcmod -
436 * logmod -
437 * 428 *
438 * @return void 429 * @return void
439 */ 430 */
440void 431void
441bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, void *dev, 432bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
442 struct bfa_trc_mod_s *trcmod, struct bfa_log_mod_s *logmod) 433 void *dev, struct bfa_trc_mod_s *trcmod)
443{ 434{
444 struct bfa_timeval_s tv; 435 struct bfa_timeval_s tv;
445 436
446 bfa_assert(port); 437 bfa_assert(port);
447 438
448 port->dev = dev; 439 port->dev = dev;
449 port->ioc = ioc; 440 port->ioc = ioc;
450 port->trcmod = trcmod; 441 port->trcmod = trcmod;
451 port->logmod = logmod;
452 442
453 port->stats_busy = BFA_FALSE; 443 port->stats_busy = BFA_FALSE;
454 port->endis_pending = BFA_FALSE; 444 port->endis_pending = BFA_FALSE;
diff --git a/drivers/scsi/bfa/bfa_port.h b/drivers/scsi/bfa/bfa_port.h
new file mode 100644
index 000000000000..dbce9dfd056b
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_port.h
@@ -0,0 +1,66 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_PORT_H__
19#define __BFA_PORT_H__
20
21#include "bfa_defs_svc.h"
22#include "bfa_ioc.h"
23#include "bfa_cs.h"
24
25typedef void (*bfa_port_stats_cbfn_t) (void *dev, bfa_status_t status);
26typedef void (*bfa_port_endis_cbfn_t) (void *dev, bfa_status_t status);
27
28struct bfa_port_s {
29 void *dev;
30 struct bfa_ioc_s *ioc;
31 struct bfa_trc_mod_s *trcmod;
32 u32 msgtag;
33 bfa_boolean_t stats_busy;
34 struct bfa_mbox_cmd_s stats_mb;
35 bfa_port_stats_cbfn_t stats_cbfn;
36 void *stats_cbarg;
37 bfa_status_t stats_status;
38 u32 stats_reset_time;
39 union bfa_port_stats_u *stats;
40 struct bfa_dma_s stats_dma;
41 bfa_boolean_t endis_pending;
42 struct bfa_mbox_cmd_s endis_mb;
43 bfa_port_endis_cbfn_t endis_cbfn;
44 void *endis_cbarg;
45 bfa_status_t endis_status;
46 struct bfa_ioc_hbfail_notify_s hbfail;
47};
48
49void bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
50 void *dev, struct bfa_trc_mod_s *trcmod);
51void bfa_port_detach(struct bfa_port_s *port);
52void bfa_port_hbfail(void *arg);
53
54bfa_status_t bfa_port_get_stats(struct bfa_port_s *port,
55 union bfa_port_stats_u *stats,
56 bfa_port_stats_cbfn_t cbfn, void *cbarg);
57bfa_status_t bfa_port_clear_stats(struct bfa_port_s *port,
58 bfa_port_stats_cbfn_t cbfn, void *cbarg);
59bfa_status_t bfa_port_enable(struct bfa_port_s *port,
60 bfa_port_endis_cbfn_t cbfn, void *cbarg);
61bfa_status_t bfa_port_disable(struct bfa_port_s *port,
62 bfa_port_endis_cbfn_t cbfn, void *cbarg);
63u32 bfa_port_meminfo(void);
64void bfa_port_mem_claim(struct bfa_port_s *port,
65 u8 *dma_kva, u64 dma_pa);
66#endif /* __BFA_PORT_H__ */
diff --git a/drivers/scsi/bfa/bfa_port_priv.h b/drivers/scsi/bfa/bfa_port_priv.h
deleted file mode 100644
index c9ebe0426fa6..000000000000
--- a/drivers/scsi/bfa/bfa_port_priv.h
+++ /dev/null
@@ -1,94 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_PORT_PRIV_H__
19#define __BFA_PORT_PRIV_H__
20
21#include <defs/bfa_defs_pport.h>
22#include <bfi/bfi_pport.h>
23#include "bfa_intr_priv.h"
24
25/**
26 * Link notification data structure
27 */
28struct bfa_fcport_ln_s {
29 struct bfa_fcport_s *fcport;
30 bfa_sm_t sm;
31 struct bfa_cb_qe_s ln_qe; /* BFA callback queue elem for ln */
32 enum bfa_pport_linkstate ln_event; /* ln event for callback */
33};
34
35/**
36 * BFA FC port data structure
37 */
38struct bfa_fcport_s {
39 struct bfa_s *bfa; /* parent BFA instance */
40 bfa_sm_t sm; /* port state machine */
41 wwn_t nwwn; /* node wwn of physical port */
42 wwn_t pwwn; /* port wwn of physical oprt */
43 enum bfa_pport_speed speed_sup;
44 /* supported speeds */
45 enum bfa_pport_speed speed; /* current speed */
46 enum bfa_pport_topology topology; /* current topology */
47 u8 myalpa; /* my ALPA in LOOP topology */
48 u8 rsvd[3];
49 u32 mypid:24;
50 u32 rsvd_b:8;
51 struct bfa_pport_cfg_s cfg; /* current port configuration */
52 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */
53 struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */
54 struct bfa_reqq_wait_s reqq_wait;
55 /* to wait for room in reqq */
56 struct bfa_reqq_wait_s svcreq_wait;
57 /* to wait for room in reqq */
58 struct bfa_reqq_wait_s stats_reqq_wait;
59 /* to wait for room in reqq (stats) */
60 void *event_cbarg;
61 void (*event_cbfn) (void *cbarg,
62 bfa_pport_event_t event);
63 union {
64 union bfi_fcport_i2h_msg_u i2hmsg;
65 } event_arg;
66 void *bfad; /* BFA driver handle */
67 struct bfa_fcport_ln_s ln; /* Link Notification */
68 struct bfa_cb_qe_s hcb_qe; /* BFA callback queue elem */
69 struct bfa_timer_s timer; /* timer */
70 u32 msgtag; /* fimrware msg tag for reply */
71 u8 *stats_kva;
72 u64 stats_pa;
73 union bfa_fcport_stats_u *stats;
74 union bfa_fcport_stats_u *stats_ret; /* driver stats location */
75 bfa_status_t stats_status; /* stats/statsclr status */
76 bfa_boolean_t stats_busy; /* outstanding stats/statsclr */
77 bfa_boolean_t stats_qfull;
78 u32 stats_reset_time; /* stats reset time stamp */
79 bfa_cb_pport_t stats_cbfn; /* driver callback function */
80 void *stats_cbarg; /* user callback arg */
81 bfa_boolean_t diag_busy; /* diag busy status */
82 bfa_boolean_t beacon; /* port beacon status */
83 bfa_boolean_t link_e2e_beacon; /* link beacon status */
84};
85
86#define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport)
87
88/*
89 * public functions
90 */
91void bfa_fcport_init(struct bfa_s *bfa);
92void bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
93
94#endif /* __BFA_PORT_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_rport.c b/drivers/scsi/bfa/bfa_rport.c
deleted file mode 100644
index ccd0680f6f16..000000000000
--- a/drivers/scsi/bfa/bfa_rport.c
+++ /dev/null
@@ -1,906 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfa_svc.h>
20#include <cs/bfa_debug.h>
21#include <bfi/bfi_rport.h>
22#include "bfa_intr_priv.h"
23
24BFA_TRC_FILE(HAL, RPORT);
25BFA_MODULE(rport);
26
27#define bfa_rport_offline_cb(__rp) do { \
28 if ((__rp)->bfa->fcs) \
29 bfa_cb_rport_offline((__rp)->rport_drv); \
30 else { \
31 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
32 __bfa_cb_rport_offline, (__rp)); \
33 } \
34} while (0)
35
36#define bfa_rport_online_cb(__rp) do { \
37 if ((__rp)->bfa->fcs) \
38 bfa_cb_rport_online((__rp)->rport_drv); \
39 else { \
40 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
41 __bfa_cb_rport_online, (__rp)); \
42 } \
43} while (0)
44
45/*
46 * forward declarations
47 */
48static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
49static void bfa_rport_free(struct bfa_rport_s *rport);
50static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
51static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
52static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
53static void __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete);
54static void __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete);
55
56/**
57 * bfa_rport_sm BFA rport state machine
58 */
59
60
61enum bfa_rport_event {
62 BFA_RPORT_SM_CREATE = 1, /* rport create event */
63 BFA_RPORT_SM_DELETE = 2, /* deleting an existing rport */
64 BFA_RPORT_SM_ONLINE = 3, /* rport is online */
65 BFA_RPORT_SM_OFFLINE = 4, /* rport is offline */
66 BFA_RPORT_SM_FWRSP = 5, /* firmware response */
67 BFA_RPORT_SM_HWFAIL = 6, /* IOC h/w failure */
68 BFA_RPORT_SM_QOS_SCN = 7, /* QoS SCN from firmware */
69 BFA_RPORT_SM_SET_SPEED = 8, /* Set Rport Speed */
70 BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */
71};
72
73static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
74 enum bfa_rport_event event);
75static void bfa_rport_sm_created(struct bfa_rport_s *rp,
76 enum bfa_rport_event event);
77static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
78 enum bfa_rport_event event);
79static void bfa_rport_sm_online(struct bfa_rport_s *rp,
80 enum bfa_rport_event event);
81static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
82 enum bfa_rport_event event);
83static void bfa_rport_sm_offline(struct bfa_rport_s *rp,
84 enum bfa_rport_event event);
85static void bfa_rport_sm_deleting(struct bfa_rport_s *rp,
86 enum bfa_rport_event event);
87static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
88 enum bfa_rport_event event);
89static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
90 enum bfa_rport_event event);
91static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
92 enum bfa_rport_event event);
93static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
94 enum bfa_rport_event event);
95static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
96 enum bfa_rport_event event);
97static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
98 enum bfa_rport_event event);
99
100/**
101 * Beginning state, only online event expected.
102 */
103static void
104bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
105{
106 bfa_trc(rp->bfa, rp->rport_tag);
107 bfa_trc(rp->bfa, event);
108
109 switch (event) {
110 case BFA_RPORT_SM_CREATE:
111 bfa_stats(rp, sm_un_cr);
112 bfa_sm_set_state(rp, bfa_rport_sm_created);
113 break;
114
115 default:
116 bfa_stats(rp, sm_un_unexp);
117 bfa_sm_fault(rp->bfa, event);
118 }
119}
120
121static void
122bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
123{
124 bfa_trc(rp->bfa, rp->rport_tag);
125 bfa_trc(rp->bfa, event);
126
127 switch (event) {
128 case BFA_RPORT_SM_ONLINE:
129 bfa_stats(rp, sm_cr_on);
130 if (bfa_rport_send_fwcreate(rp))
131 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
132 else
133 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
134 break;
135
136 case BFA_RPORT_SM_DELETE:
137 bfa_stats(rp, sm_cr_del);
138 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
139 bfa_rport_free(rp);
140 break;
141
142 case BFA_RPORT_SM_HWFAIL:
143 bfa_stats(rp, sm_cr_hwf);
144 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
145 break;
146
147 default:
148 bfa_stats(rp, sm_cr_unexp);
149 bfa_sm_fault(rp->bfa, event);
150 }
151}
152
153/**
154 * Waiting for rport create response from firmware.
155 */
156static void
157bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
158{
159 bfa_trc(rp->bfa, rp->rport_tag);
160 bfa_trc(rp->bfa, event);
161
162 switch (event) {
163 case BFA_RPORT_SM_FWRSP:
164 bfa_stats(rp, sm_fwc_rsp);
165 bfa_sm_set_state(rp, bfa_rport_sm_online);
166 bfa_rport_online_cb(rp);
167 break;
168
169 case BFA_RPORT_SM_DELETE:
170 bfa_stats(rp, sm_fwc_del);
171 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
172 break;
173
174 case BFA_RPORT_SM_OFFLINE:
175 bfa_stats(rp, sm_fwc_off);
176 bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
177 break;
178
179 case BFA_RPORT_SM_HWFAIL:
180 bfa_stats(rp, sm_fwc_hwf);
181 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
182 break;
183
184 default:
185 bfa_stats(rp, sm_fwc_unexp);
186 bfa_sm_fault(rp->bfa, event);
187 }
188}
189
190/**
191 * Request queue is full, awaiting queue resume to send create request.
192 */
193static void
194bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
195{
196 bfa_trc(rp->bfa, rp->rport_tag);
197 bfa_trc(rp->bfa, event);
198
199 switch (event) {
200 case BFA_RPORT_SM_QRESUME:
201 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
202 bfa_rport_send_fwcreate(rp);
203 break;
204
205 case BFA_RPORT_SM_DELETE:
206 bfa_stats(rp, sm_fwc_del);
207 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
208 bfa_reqq_wcancel(&rp->reqq_wait);
209 bfa_rport_free(rp);
210 break;
211
212 case BFA_RPORT_SM_OFFLINE:
213 bfa_stats(rp, sm_fwc_off);
214 bfa_sm_set_state(rp, bfa_rport_sm_offline);
215 bfa_reqq_wcancel(&rp->reqq_wait);
216 bfa_rport_offline_cb(rp);
217 break;
218
219 case BFA_RPORT_SM_HWFAIL:
220 bfa_stats(rp, sm_fwc_hwf);
221 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
222 bfa_reqq_wcancel(&rp->reqq_wait);
223 break;
224
225 default:
226 bfa_stats(rp, sm_fwc_unexp);
227 bfa_sm_fault(rp->bfa, event);
228 }
229}
230
231/**
232 * Online state - normal parking state.
233 */
234static void
235bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
236{
237 struct bfi_rport_qos_scn_s *qos_scn;
238
239 bfa_trc(rp->bfa, rp->rport_tag);
240 bfa_trc(rp->bfa, event);
241
242 switch (event) {
243 case BFA_RPORT_SM_OFFLINE:
244 bfa_stats(rp, sm_on_off);
245 if (bfa_rport_send_fwdelete(rp))
246 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
247 else
248 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
249 break;
250
251 case BFA_RPORT_SM_DELETE:
252 bfa_stats(rp, sm_on_del);
253 if (bfa_rport_send_fwdelete(rp))
254 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
255 else
256 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
257 break;
258
259 case BFA_RPORT_SM_HWFAIL:
260 bfa_stats(rp, sm_on_hwf);
261 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
262 break;
263
264 case BFA_RPORT_SM_SET_SPEED:
265 bfa_rport_send_fwspeed(rp);
266 break;
267
268 case BFA_RPORT_SM_QOS_SCN:
269 qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
270 rp->qos_attr = qos_scn->new_qos_attr;
271 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
272 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
273 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
274 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
275
276 qos_scn->old_qos_attr.qos_flow_id =
277 bfa_os_ntohl(qos_scn->old_qos_attr.qos_flow_id);
278 qos_scn->new_qos_attr.qos_flow_id =
279 bfa_os_ntohl(qos_scn->new_qos_attr.qos_flow_id);
280 qos_scn->old_qos_attr.qos_priority =
281 bfa_os_ntohl(qos_scn->old_qos_attr.qos_priority);
282 qos_scn->new_qos_attr.qos_priority =
283 bfa_os_ntohl(qos_scn->new_qos_attr.qos_priority);
284
285 if (qos_scn->old_qos_attr.qos_flow_id !=
286 qos_scn->new_qos_attr.qos_flow_id)
287 bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
288 qos_scn->old_qos_attr,
289 qos_scn->new_qos_attr);
290 if (qos_scn->old_qos_attr.qos_priority !=
291 qos_scn->new_qos_attr.qos_priority)
292 bfa_cb_rport_qos_scn_prio(rp->rport_drv,
293 qos_scn->old_qos_attr,
294 qos_scn->new_qos_attr);
295 break;
296
297 default:
298 bfa_stats(rp, sm_on_unexp);
299 bfa_sm_fault(rp->bfa, event);
300 }
301}
302
303/**
304 * Firmware rport is being deleted - awaiting f/w response.
305 */
306static void
307bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
308{
309 bfa_trc(rp->bfa, rp->rport_tag);
310 bfa_trc(rp->bfa, event);
311
312 switch (event) {
313 case BFA_RPORT_SM_FWRSP:
314 bfa_stats(rp, sm_fwd_rsp);
315 bfa_sm_set_state(rp, bfa_rport_sm_offline);
316 bfa_rport_offline_cb(rp);
317 break;
318
319 case BFA_RPORT_SM_DELETE:
320 bfa_stats(rp, sm_fwd_del);
321 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
322 break;
323
324 case BFA_RPORT_SM_HWFAIL:
325 bfa_stats(rp, sm_fwd_hwf);
326 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
327 bfa_rport_offline_cb(rp);
328 break;
329
330 default:
331 bfa_stats(rp, sm_fwd_unexp);
332 bfa_sm_fault(rp->bfa, event);
333 }
334}
335
336static void
337bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
338{
339 bfa_trc(rp->bfa, rp->rport_tag);
340 bfa_trc(rp->bfa, event);
341
342 switch (event) {
343 case BFA_RPORT_SM_QRESUME:
344 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
345 bfa_rport_send_fwdelete(rp);
346 break;
347
348 case BFA_RPORT_SM_DELETE:
349 bfa_stats(rp, sm_fwd_del);
350 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
351 break;
352
353 case BFA_RPORT_SM_HWFAIL:
354 bfa_stats(rp, sm_fwd_hwf);
355 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
356 bfa_reqq_wcancel(&rp->reqq_wait);
357 bfa_rport_offline_cb(rp);
358 break;
359
360 default:
361 bfa_stats(rp, sm_fwd_unexp);
362 bfa_sm_fault(rp->bfa, event);
363 }
364}
365
366/**
367 * Offline state.
368 */
369static void
370bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
371{
372 bfa_trc(rp->bfa, rp->rport_tag);
373 bfa_trc(rp->bfa, event);
374
375 switch (event) {
376 case BFA_RPORT_SM_DELETE:
377 bfa_stats(rp, sm_off_del);
378 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
379 bfa_rport_free(rp);
380 break;
381
382 case BFA_RPORT_SM_ONLINE:
383 bfa_stats(rp, sm_off_on);
384 if (bfa_rport_send_fwcreate(rp))
385 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
386 else
387 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
388 break;
389
390 case BFA_RPORT_SM_HWFAIL:
391 bfa_stats(rp, sm_off_hwf);
392 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
393 break;
394
395 default:
396 bfa_stats(rp, sm_off_unexp);
397 bfa_sm_fault(rp->bfa, event);
398 }
399}
400
401/**
402 * Rport is deleted, waiting for firmware response to delete.
403 */
404static void
405bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
406{
407 bfa_trc(rp->bfa, rp->rport_tag);
408 bfa_trc(rp->bfa, event);
409
410 switch (event) {
411 case BFA_RPORT_SM_FWRSP:
412 bfa_stats(rp, sm_del_fwrsp);
413 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
414 bfa_rport_free(rp);
415 break;
416
417 case BFA_RPORT_SM_HWFAIL:
418 bfa_stats(rp, sm_del_hwf);
419 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
420 bfa_rport_free(rp);
421 break;
422
423 default:
424 bfa_sm_fault(rp->bfa, event);
425 }
426}
427
428static void
429bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
430{
431 bfa_trc(rp->bfa, rp->rport_tag);
432 bfa_trc(rp->bfa, event);
433
434 switch (event) {
435 case BFA_RPORT_SM_QRESUME:
436 bfa_stats(rp, sm_del_fwrsp);
437 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
438 bfa_rport_send_fwdelete(rp);
439 break;
440
441 case BFA_RPORT_SM_HWFAIL:
442 bfa_stats(rp, sm_del_hwf);
443 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
444 bfa_reqq_wcancel(&rp->reqq_wait);
445 bfa_rport_free(rp);
446 break;
447
448 default:
449 bfa_sm_fault(rp->bfa, event);
450 }
451}
452
453/**
454 * Waiting for rport create response from firmware. A delete is pending.
455 */
456static void
457bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
458 enum bfa_rport_event event)
459{
460 bfa_trc(rp->bfa, rp->rport_tag);
461 bfa_trc(rp->bfa, event);
462
463 switch (event) {
464 case BFA_RPORT_SM_FWRSP:
465 bfa_stats(rp, sm_delp_fwrsp);
466 if (bfa_rport_send_fwdelete(rp))
467 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
468 else
469 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
470 break;
471
472 case BFA_RPORT_SM_HWFAIL:
473 bfa_stats(rp, sm_delp_hwf);
474 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
475 bfa_rport_free(rp);
476 break;
477
478 default:
479 bfa_stats(rp, sm_delp_unexp);
480 bfa_sm_fault(rp->bfa, event);
481 }
482}
483
484/**
485 * Waiting for rport create response from firmware. Rport offline is pending.
486 */
487static void
488bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
489 enum bfa_rport_event event)
490{
491 bfa_trc(rp->bfa, rp->rport_tag);
492 bfa_trc(rp->bfa, event);
493
494 switch (event) {
495 case BFA_RPORT_SM_FWRSP:
496 bfa_stats(rp, sm_offp_fwrsp);
497 if (bfa_rport_send_fwdelete(rp))
498 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
499 else
500 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
501 break;
502
503 case BFA_RPORT_SM_DELETE:
504 bfa_stats(rp, sm_offp_del);
505 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
506 break;
507
508 case BFA_RPORT_SM_HWFAIL:
509 bfa_stats(rp, sm_offp_hwf);
510 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
511 break;
512
513 default:
514 bfa_stats(rp, sm_offp_unexp);
515 bfa_sm_fault(rp->bfa, event);
516 }
517}
518
519/**
520 * IOC h/w failed.
521 */
522static void
523bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
524{
525 bfa_trc(rp->bfa, rp->rport_tag);
526 bfa_trc(rp->bfa, event);
527
528 switch (event) {
529 case BFA_RPORT_SM_OFFLINE:
530 bfa_stats(rp, sm_iocd_off);
531 bfa_rport_offline_cb(rp);
532 break;
533
534 case BFA_RPORT_SM_DELETE:
535 bfa_stats(rp, sm_iocd_del);
536 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
537 bfa_rport_free(rp);
538 break;
539
540 case BFA_RPORT_SM_ONLINE:
541 bfa_stats(rp, sm_iocd_on);
542 if (bfa_rport_send_fwcreate(rp))
543 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
544 else
545 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
546 break;
547
548 case BFA_RPORT_SM_HWFAIL:
549 break;
550
551 default:
552 bfa_stats(rp, sm_iocd_unexp);
553 bfa_sm_fault(rp->bfa, event);
554 }
555}
556
557
558
559/**
560 * bfa_rport_private BFA rport private functions
561 */
562
563static void
564__bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
565{
566 struct bfa_rport_s *rp = cbarg;
567
568 if (complete)
569 bfa_cb_rport_online(rp->rport_drv);
570}
571
572static void
573__bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
574{
575 struct bfa_rport_s *rp = cbarg;
576
577 if (complete)
578 bfa_cb_rport_offline(rp->rport_drv);
579}
580
581static void
582bfa_rport_qresume(void *cbarg)
583{
584 struct bfa_rport_s *rp = cbarg;
585
586 bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
587}
588
589static void
590bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
591 u32 *dm_len)
592{
593 if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
594 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
595
596 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s);
597}
598
599static void
600bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
601 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
602{
603 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
604 struct bfa_rport_s *rp;
605 u16 i;
606
607 INIT_LIST_HEAD(&mod->rp_free_q);
608 INIT_LIST_HEAD(&mod->rp_active_q);
609
610 rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo);
611 mod->rps_list = rp;
612 mod->num_rports = cfg->fwcfg.num_rports;
613
614 bfa_assert(mod->num_rports
615 && !(mod->num_rports & (mod->num_rports - 1)));
616
617 for (i = 0; i < mod->num_rports; i++, rp++) {
618 bfa_os_memset(rp, 0, sizeof(struct bfa_rport_s));
619 rp->bfa = bfa;
620 rp->rport_tag = i;
621 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
622
623 /**
624 * - is unused
625 */
626 if (i)
627 list_add_tail(&rp->qe, &mod->rp_free_q);
628
629 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
630 }
631
632 /**
633 * consume memory
634 */
635 bfa_meminfo_kva(meminfo) = (u8 *) rp;
636}
637
638static void
639bfa_rport_detach(struct bfa_s *bfa)
640{
641}
642
643static void
644bfa_rport_start(struct bfa_s *bfa)
645{
646}
647
648static void
649bfa_rport_stop(struct bfa_s *bfa)
650{
651}
652
653static void
654bfa_rport_iocdisable(struct bfa_s *bfa)
655{
656 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
657 struct bfa_rport_s *rport;
658 struct list_head *qe, *qen;
659
660 list_for_each_safe(qe, qen, &mod->rp_active_q) {
661 rport = (struct bfa_rport_s *) qe;
662 bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
663 }
664}
665
666static struct bfa_rport_s *
667bfa_rport_alloc(struct bfa_rport_mod_s *mod)
668{
669 struct bfa_rport_s *rport;
670
671 bfa_q_deq(&mod->rp_free_q, &rport);
672 if (rport)
673 list_add_tail(&rport->qe, &mod->rp_active_q);
674
675 return rport;
676}
677
678static void
679bfa_rport_free(struct bfa_rport_s *rport)
680{
681 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
682
683 bfa_assert(bfa_q_is_on_q(&mod->rp_active_q, rport));
684 list_del(&rport->qe);
685 list_add_tail(&rport->qe, &mod->rp_free_q);
686}
687
688static bfa_boolean_t
689bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
690{
691 struct bfi_rport_create_req_s *m;
692
693 /**
694 * check for room in queue to send request now
695 */
696 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
697 if (!m) {
698 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
699 return BFA_FALSE;
700 }
701
702 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
703 bfa_lpuid(rp->bfa));
704 m->bfa_handle = rp->rport_tag;
705 m->max_frmsz = bfa_os_htons(rp->rport_info.max_frmsz);
706 m->pid = rp->rport_info.pid;
707 m->lp_tag = rp->rport_info.lp_tag;
708 m->local_pid = rp->rport_info.local_pid;
709 m->fc_class = rp->rport_info.fc_class;
710 m->vf_en = rp->rport_info.vf_en;
711 m->vf_id = rp->rport_info.vf_id;
712 m->cisc = rp->rport_info.cisc;
713
714 /**
715 * queue I/O message to firmware
716 */
717 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
718 return BFA_TRUE;
719}
720
721static bfa_boolean_t
722bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
723{
724 struct bfi_rport_delete_req_s *m;
725
726 /**
727 * check for room in queue to send request now
728 */
729 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
730 if (!m) {
731 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
732 return BFA_FALSE;
733 }
734
735 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
736 bfa_lpuid(rp->bfa));
737 m->fw_handle = rp->fw_handle;
738
739 /**
740 * queue I/O message to firmware
741 */
742 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
743 return BFA_TRUE;
744}
745
746static bfa_boolean_t
747bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
748{
749 struct bfa_rport_speed_req_s *m;
750
751 /**
752 * check for room in queue to send request now
753 */
754 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
755 if (!m) {
756 bfa_trc(rp->bfa, rp->rport_info.speed);
757 return BFA_FALSE;
758 }
759
760 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
761 bfa_lpuid(rp->bfa));
762 m->fw_handle = rp->fw_handle;
763 m->speed = (u8)rp->rport_info.speed;
764
765 /**
766 * queue I/O message to firmware
767 */
768 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
769 return BFA_TRUE;
770}
771
772
773
774/**
775 * bfa_rport_public
776 */
777
778/**
779 * Rport interrupt processing.
780 */
781void
782bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
783{
784 union bfi_rport_i2h_msg_u msg;
785 struct bfa_rport_s *rp;
786
787 bfa_trc(bfa, m->mhdr.msg_id);
788
789 msg.msg = m;
790
791 switch (m->mhdr.msg_id) {
792 case BFI_RPORT_I2H_CREATE_RSP:
793 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
794 rp->fw_handle = msg.create_rsp->fw_handle;
795 rp->qos_attr = msg.create_rsp->qos_attr;
796 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
797 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
798 break;
799
800 case BFI_RPORT_I2H_DELETE_RSP:
801 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
802 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
803 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
804 break;
805
806 case BFI_RPORT_I2H_QOS_SCN:
807 rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
808 rp->event_arg.fw_msg = msg.qos_scn_evt;
809 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
810 break;
811
812 default:
813 bfa_trc(bfa, m->mhdr.msg_id);
814 bfa_assert(0);
815 }
816}
817
818
819
820/**
821 * bfa_rport_api
822 */
823
824struct bfa_rport_s *
825bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
826{
827 struct bfa_rport_s *rp;
828
829 rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
830
831 if (rp == NULL)
832 return NULL;
833
834 rp->bfa = bfa;
835 rp->rport_drv = rport_drv;
836 bfa_rport_clear_stats(rp);
837
838 bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
839 bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
840
841 return rp;
842}
843
844void
845bfa_rport_delete(struct bfa_rport_s *rport)
846{
847 bfa_sm_send_event(rport, BFA_RPORT_SM_DELETE);
848}
849
850void
851bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
852{
853 bfa_assert(rport_info->max_frmsz != 0);
854
855 /**
856 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
857 * responses. Default to minimum size.
858 */
859 if (rport_info->max_frmsz == 0) {
860 bfa_trc(rport->bfa, rport->rport_tag);
861 rport_info->max_frmsz = FC_MIN_PDUSZ;
862 }
863
864 bfa_os_assign(rport->rport_info, *rport_info);
865 bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
866}
867
868void
869bfa_rport_offline(struct bfa_rport_s *rport)
870{
871 bfa_sm_send_event(rport, BFA_RPORT_SM_OFFLINE);
872}
873
874void
875bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_pport_speed speed)
876{
877 bfa_assert(speed != 0);
878 bfa_assert(speed != BFA_PPORT_SPEED_AUTO);
879
880 rport->rport_info.speed = speed;
881 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
882}
883
884void
885bfa_rport_get_stats(struct bfa_rport_s *rport,
886 struct bfa_rport_hal_stats_s *stats)
887{
888 *stats = rport->stats;
889}
890
891void
892bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
893 struct bfa_rport_qos_attr_s *qos_attr)
894{
895 qos_attr->qos_priority = bfa_os_ntohl(rport->qos_attr.qos_priority);
896 qos_attr->qos_flow_id = bfa_os_ntohl(rport->qos_attr.qos_flow_id);
897
898}
899
900void
901bfa_rport_clear_stats(struct bfa_rport_s *rport)
902{
903 bfa_os_memset(&rport->stats, 0, sizeof(rport->stats));
904}
905
906
diff --git a/drivers/scsi/bfa/bfa_rport_priv.h b/drivers/scsi/bfa/bfa_rport_priv.h
deleted file mode 100644
index 6490ce2e990d..000000000000
--- a/drivers/scsi/bfa/bfa_rport_priv.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_RPORT_PRIV_H__
19#define __BFA_RPORT_PRIV_H__
20
21#include <bfa_svc.h>
22
23#define BFA_RPORT_MIN 4
24
25struct bfa_rport_mod_s {
26 struct bfa_rport_s *rps_list; /* list of rports */
27 struct list_head rp_free_q; /* free bfa_rports */
28 struct list_head rp_active_q; /* free bfa_rports */
29 u16 num_rports; /* number of rports */
30};
31
32#define BFA_RPORT_MOD(__bfa) (&(__bfa)->modules.rport_mod)
33
34/**
35 * Convert rport tag to RPORT
36 */
37#define BFA_RPORT_FROM_TAG(__bfa, _tag) \
38 (BFA_RPORT_MOD(__bfa)->rps_list + \
39 ((_tag) & (BFA_RPORT_MOD(__bfa)->num_rports - 1)))
40
41/*
42 * external functions
43 */
44void bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
45#endif /* __BFA_RPORT_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_sgpg.c b/drivers/scsi/bfa/bfa_sgpg.c
deleted file mode 100644
index ae452c42e40e..000000000000
--- a/drivers/scsi/bfa/bfa_sgpg.c
+++ /dev/null
@@ -1,226 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19
20BFA_TRC_FILE(HAL, SGPG);
21BFA_MODULE(sgpg);
22
23/**
24 * bfa_sgpg_mod BFA SGPG Mode module
25 */
26
27/**
28 * Compute and return memory needed by FCP(im) module.
29 */
30static void
31bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
32 u32 *dm_len)
33{
34 if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
35 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
36
37 *km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s);
38 *dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s);
39}
40
41
42static void
43bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
44 struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev)
45{
46 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
47 int i;
48 struct bfa_sgpg_s *hsgpg;
49 struct bfi_sgpg_s *sgpg;
50 u64 align_len;
51
52 union {
53 u64 pa;
54 union bfi_addr_u addr;
55 } sgpg_pa;
56
57 INIT_LIST_HEAD(&mod->sgpg_q);
58 INIT_LIST_HEAD(&mod->sgpg_wait_q);
59
60 bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
61
62 mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
63 mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo);
64 align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa);
65 mod->sgpg_arr_pa += align_len;
66 mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) +
67 align_len);
68 mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) +
69 align_len);
70
71 hsgpg = mod->hsgpg_arr;
72 sgpg = mod->sgpg_arr;
73 sgpg_pa.pa = mod->sgpg_arr_pa;
74 mod->free_sgpgs = mod->num_sgpgs;
75
76 bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)));
77
78 for (i = 0; i < mod->num_sgpgs; i++) {
79 bfa_os_memset(hsgpg, 0, sizeof(*hsgpg));
80 bfa_os_memset(sgpg, 0, sizeof(*sgpg));
81
82 hsgpg->sgpg = sgpg;
83 hsgpg->sgpg_pa = sgpg_pa.addr;
84 list_add_tail(&hsgpg->qe, &mod->sgpg_q);
85
86 hsgpg++;
87 sgpg++;
88 sgpg_pa.pa += sizeof(struct bfi_sgpg_s);
89 }
90
91 bfa_meminfo_kva(minfo) = (u8 *) hsgpg;
92 bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg;
93 bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa;
94}
95
96static void
97bfa_sgpg_detach(struct bfa_s *bfa)
98{
99}
100
101static void
102bfa_sgpg_start(struct bfa_s *bfa)
103{
104}
105
106static void
107bfa_sgpg_stop(struct bfa_s *bfa)
108{
109}
110
111static void
112bfa_sgpg_iocdisable(struct bfa_s *bfa)
113{
114}
115
116
117
118/**
119 * bfa_sgpg_public BFA SGPG public functions
120 */
121
122bfa_status_t
123bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
124{
125 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
126 struct bfa_sgpg_s *hsgpg;
127 int i;
128
129 bfa_trc_fp(bfa, nsgpgs);
130
131 if (mod->free_sgpgs < nsgpgs)
132 return BFA_STATUS_ENOMEM;
133
134 for (i = 0; i < nsgpgs; i++) {
135 bfa_q_deq(&mod->sgpg_q, &hsgpg);
136 bfa_assert(hsgpg);
137 list_add_tail(&hsgpg->qe, sgpg_q);
138 }
139
140 mod->free_sgpgs -= nsgpgs;
141 return BFA_STATUS_OK;
142}
143
144void
145bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
146{
147 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
148 struct bfa_sgpg_wqe_s *wqe;
149
150 bfa_trc_fp(bfa, nsgpg);
151
152 mod->free_sgpgs += nsgpg;
153 bfa_assert(mod->free_sgpgs <= mod->num_sgpgs);
154
155 list_splice_tail_init(sgpg_q, &mod->sgpg_q);
156
157 if (list_empty(&mod->sgpg_wait_q))
158 return;
159
160 /**
161 * satisfy as many waiting requests as possible
162 */
163 do {
164 wqe = bfa_q_first(&mod->sgpg_wait_q);
165 if (mod->free_sgpgs < wqe->nsgpg)
166 nsgpg = mod->free_sgpgs;
167 else
168 nsgpg = wqe->nsgpg;
169 bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
170 wqe->nsgpg -= nsgpg;
171 if (wqe->nsgpg == 0) {
172 list_del(&wqe->qe);
173 wqe->cbfn(wqe->cbarg);
174 }
175 } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
176}
177
178void
179bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
180{
181 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
182
183 bfa_assert(nsgpg > 0);
184 bfa_assert(nsgpg > mod->free_sgpgs);
185
186 wqe->nsgpg_total = wqe->nsgpg = nsgpg;
187
188 /**
189 * allocate any left to this one first
190 */
191 if (mod->free_sgpgs) {
192 /**
193 * no one else is waiting for SGPG
194 */
195 bfa_assert(list_empty(&mod->sgpg_wait_q));
196 list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
197 wqe->nsgpg -= mod->free_sgpgs;
198 mod->free_sgpgs = 0;
199 }
200
201 list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
202}
203
204void
205bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
206{
207 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
208
209 bfa_assert(bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
210 list_del(&wqe->qe);
211
212 if (wqe->nsgpg_total != wqe->nsgpg)
213 bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
214 wqe->nsgpg_total - wqe->nsgpg);
215}
216
217void
218bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
219 void *cbarg)
220{
221 INIT_LIST_HEAD(&wqe->sgpg_q);
222 wqe->cbfn = cbfn;
223 wqe->cbarg = cbarg;
224}
225
226
diff --git a/drivers/scsi/bfa/bfa_sgpg_priv.h b/drivers/scsi/bfa/bfa_sgpg_priv.h
deleted file mode 100644
index 9c2a8cbe7522..000000000000
--- a/drivers/scsi/bfa/bfa_sgpg_priv.h
+++ /dev/null
@@ -1,79 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * hal_sgpg.h BFA SG page module
20 */
21
22#ifndef __BFA_SGPG_PRIV_H__
23#define __BFA_SGPG_PRIV_H__
24
25#include <cs/bfa_q.h>
26
27#define BFA_SGPG_MIN (16)
28
29/**
30 * Alignment macro for SG page allocation
31 */
32#define BFA_SGPG_ROUNDUP(_l) (((_l) + (sizeof(struct bfi_sgpg_s) - 1)) \
33 & ~(sizeof(struct bfi_sgpg_s) - 1))
34
35struct bfa_sgpg_wqe_s {
36 struct list_head qe; /* queue sg page element */
37 int nsgpg; /* pages to be allocated */
38 int nsgpg_total; /* total pages required */
39 void (*cbfn) (void *cbarg);
40 /* callback function */
41 void *cbarg; /* callback arg */
42 struct list_head sgpg_q; /* queue of alloced sgpgs */
43};
44
45struct bfa_sgpg_s {
46 struct list_head qe; /* queue sg page element */
47 struct bfi_sgpg_s *sgpg; /* va of SG page */
48 union bfi_addr_u sgpg_pa;/* pa of SG page */
49};
50
51/**
52 * Given number of SG elements, BFA_SGPG_NPAGE() returns the number of
53 * SG pages required.
54 */
55#define BFA_SGPG_NPAGE(_nsges) (((_nsges) / BFI_SGPG_DATA_SGES) + 1)
56
57struct bfa_sgpg_mod_s {
58 struct bfa_s *bfa;
59 int num_sgpgs; /* number of SG pages */
60 int free_sgpgs; /* number of free SG pages */
61 struct bfa_sgpg_s *hsgpg_arr; /* BFA SG page array */
62 struct bfi_sgpg_s *sgpg_arr; /* actual SG page array */
63 u64 sgpg_arr_pa; /* SG page array DMA addr */
64 struct list_head sgpg_q; /* queue of free SG pages */
65 struct list_head sgpg_wait_q; /* wait queue for SG pages */
66};
67#define BFA_SGPG_MOD(__bfa) (&(__bfa)->modules.sgpg_mod)
68
69bfa_status_t bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q,
70 int nsgpgs);
71void bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q,
72 int nsgpgs);
73void bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe,
74 void (*cbfn) (void *cbarg), void *cbarg);
75void bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe,
76 int nsgpgs);
77void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe);
78
79#endif /* __BFA_SGPG_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_sm.c b/drivers/scsi/bfa/bfa_sm.c
deleted file mode 100644
index 5420f4f45e58..000000000000
--- a/drivers/scsi/bfa/bfa_sm.c
+++ /dev/null
@@ -1,38 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfasm.c BFA State machine utility functions
20 */
21
22#include <cs/bfa_sm.h>
23
24/**
25 * cs_sm_api
26 */
27
28int
29bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm)
30{
31 int i = 0;
32
33 while (smt[i].sm && smt[i].sm != sm)
34 i++;
35 return smt[i].state;
36}
37
38
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
new file mode 100644
index 000000000000..aa1dc749b281
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -0,0 +1,5423 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include "bfa_os_inc.h"
19#include "bfa_plog.h"
20#include "bfa_cs.h"
21#include "bfa_modules.h"
22#include "bfad_drv.h"
23
24BFA_TRC_FILE(HAL, FCXP);
25BFA_MODULE(fcxp);
26BFA_MODULE(sgpg);
27BFA_MODULE(lps);
28BFA_MODULE(fcport);
29BFA_MODULE(rport);
30BFA_MODULE(uf);
31
32/**
33 * LPS related definitions
34 */
35#define BFA_LPS_MIN_LPORTS (1)
36#define BFA_LPS_MAX_LPORTS (256)
37
38/*
39 * Maximum Vports supported per physical port or vf.
40 */
41#define BFA_LPS_MAX_VPORTS_SUPP_CB 255
42#define BFA_LPS_MAX_VPORTS_SUPP_CT 190
43
44/**
45 * lps_pvt BFA LPS private functions
46 */
47
48enum bfa_lps_event {
49 BFA_LPS_SM_LOGIN = 1, /* login request from user */
50 BFA_LPS_SM_LOGOUT = 2, /* logout request from user */
51 BFA_LPS_SM_FWRSP = 3, /* f/w response to login/logout */
52 BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */
53 BFA_LPS_SM_DELETE = 5, /* lps delete from user */
54 BFA_LPS_SM_OFFLINE = 6, /* Link is offline */
55 BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */
56};
57
58/**
59 * FC PORT related definitions
60 */
61/*
62 * The port is considered disabled if corresponding physical port or IOC are
63 * disabled explicitly
64 */
65#define BFA_PORT_IS_DISABLED(bfa) \
66 ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
67 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
68
69
70/**
71 * BFA port state machine events
72 */
73enum bfa_fcport_sm_event {
74 BFA_FCPORT_SM_START = 1, /* start port state machine */
75 BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
76 BFA_FCPORT_SM_ENABLE = 3, /* enable port */
77 BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
78 BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
79 BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
80 BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
81 BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
82 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
83};
84
85/**
86 * BFA port link notification state machine events
87 */
88
89enum bfa_fcport_ln_sm_event {
90 BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
91 BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
92 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
93};
94
95/**
96 * RPORT related definitions
97 */
98#define bfa_rport_offline_cb(__rp) do { \
99 if ((__rp)->bfa->fcs) \
100 bfa_cb_rport_offline((__rp)->rport_drv); \
101 else { \
102 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
103 __bfa_cb_rport_offline, (__rp)); \
104 } \
105} while (0)
106
107#define bfa_rport_online_cb(__rp) do { \
108 if ((__rp)->bfa->fcs) \
109 bfa_cb_rport_online((__rp)->rport_drv); \
110 else { \
111 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
112 __bfa_cb_rport_online, (__rp)); \
113 } \
114} while (0)
115
116
117enum bfa_rport_event {
118 BFA_RPORT_SM_CREATE = 1, /* rport create event */
119 BFA_RPORT_SM_DELETE = 2, /* deleting an existing rport */
120 BFA_RPORT_SM_ONLINE = 3, /* rport is online */
121 BFA_RPORT_SM_OFFLINE = 4, /* rport is offline */
122 BFA_RPORT_SM_FWRSP = 5, /* firmware response */
123 BFA_RPORT_SM_HWFAIL = 6, /* IOC h/w failure */
124 BFA_RPORT_SM_QOS_SCN = 7, /* QoS SCN from firmware */
125 BFA_RPORT_SM_SET_SPEED = 8, /* Set Rport Speed */
126 BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */
127};
128
129/**
130 * forward declarations FCXP related functions
131 */
132static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
133static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
134 struct bfi_fcxp_send_rsp_s *fcxp_rsp);
135static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
136 struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
137static void bfa_fcxp_qresume(void *cbarg);
138static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
139 struct bfi_fcxp_send_req_s *send_req);
140
141/**
142 * forward declarations for LPS functions
143 */
144static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
145 u32 *dm_len);
146static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
147 struct bfa_iocfc_cfg_s *cfg,
148 struct bfa_meminfo_s *meminfo,
149 struct bfa_pcidev_s *pcidev);
150static void bfa_lps_detach(struct bfa_s *bfa);
151static void bfa_lps_start(struct bfa_s *bfa);
152static void bfa_lps_stop(struct bfa_s *bfa);
153static void bfa_lps_iocdisable(struct bfa_s *bfa);
154static void bfa_lps_login_rsp(struct bfa_s *bfa,
155 struct bfi_lps_login_rsp_s *rsp);
156static void bfa_lps_logout_rsp(struct bfa_s *bfa,
157 struct bfi_lps_logout_rsp_s *rsp);
158static void bfa_lps_reqq_resume(void *lps_arg);
159static void bfa_lps_free(struct bfa_lps_s *lps);
160static void bfa_lps_send_login(struct bfa_lps_s *lps);
161static void bfa_lps_send_logout(struct bfa_lps_s *lps);
162static void bfa_lps_login_comp(struct bfa_lps_s *lps);
163static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
164static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
165
166/**
167 * forward declaration for LPS state machine
168 */
169static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
170static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
171static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
172 event);
173static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
174static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
175static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
176 event);
177
178/**
179 * forward declaration for FC Port functions
180 */
181static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
182static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
183static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
184static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
185static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
186static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
187static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
188 enum bfa_port_linkstate event, bfa_boolean_t trunk);
189static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
190 enum bfa_port_linkstate event);
191static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
192static void bfa_fcport_stats_get_timeout(void *cbarg);
193static void bfa_fcport_stats_clr_timeout(void *cbarg);
194static void bfa_trunk_iocdisable(struct bfa_s *bfa);
195
196/**
197 * forward declaration for FC PORT state machine
198 */
199static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
200 enum bfa_fcport_sm_event event);
201static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
202 enum bfa_fcport_sm_event event);
203static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
204 enum bfa_fcport_sm_event event);
205static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
206 enum bfa_fcport_sm_event event);
207static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
208 enum bfa_fcport_sm_event event);
209static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
210 enum bfa_fcport_sm_event event);
211static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
212 enum bfa_fcport_sm_event event);
213static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
214 enum bfa_fcport_sm_event event);
215static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
216 enum bfa_fcport_sm_event event);
217static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
218 enum bfa_fcport_sm_event event);
219static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
220 enum bfa_fcport_sm_event event);
221static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
222 enum bfa_fcport_sm_event event);
223
224static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
225 enum bfa_fcport_ln_sm_event event);
226static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
227 enum bfa_fcport_ln_sm_event event);
228static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
229 enum bfa_fcport_ln_sm_event event);
230static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
231 enum bfa_fcport_ln_sm_event event);
232static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
233 enum bfa_fcport_ln_sm_event event);
234static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
235 enum bfa_fcport_ln_sm_event event);
236static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
237 enum bfa_fcport_ln_sm_event event);
238
239static struct bfa_sm_table_s hal_port_sm_table[] = {
240 {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
241 {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
242 {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
243 {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
244 {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
245 {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
246 {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
247 {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
248 {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
249 {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
250 {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
251 {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
252};
253
254
255/**
256 * forward declaration for RPORT related functions
257 */
258static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
259static void bfa_rport_free(struct bfa_rport_s *rport);
260static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
261static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
262static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
263static void __bfa_cb_rport_online(void *cbarg,
264 bfa_boolean_t complete);
265static void __bfa_cb_rport_offline(void *cbarg,
266 bfa_boolean_t complete);
267
268/**
269 * forward declaration for RPORT state machine
270 */
271static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
272 enum bfa_rport_event event);
273static void bfa_rport_sm_created(struct bfa_rport_s *rp,
274 enum bfa_rport_event event);
275static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
276 enum bfa_rport_event event);
277static void bfa_rport_sm_online(struct bfa_rport_s *rp,
278 enum bfa_rport_event event);
279static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
280 enum bfa_rport_event event);
281static void bfa_rport_sm_offline(struct bfa_rport_s *rp,
282 enum bfa_rport_event event);
283static void bfa_rport_sm_deleting(struct bfa_rport_s *rp,
284 enum bfa_rport_event event);
285static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
286 enum bfa_rport_event event);
287static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
288 enum bfa_rport_event event);
289static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
290 enum bfa_rport_event event);
291static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
292 enum bfa_rport_event event);
293static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
294 enum bfa_rport_event event);
295static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
296 enum bfa_rport_event event);
297
298/**
299 * PLOG related definitions
300 */
301static int
302plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
303{
304 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
305 (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
306 return 1;
307
308 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
309 (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
310 return 1;
311
312 return 0;
313}
314
315static void
316bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
317{
318 u16 tail;
319 struct bfa_plog_rec_s *pl_recp;
320
321 if (plog->plog_enabled == 0)
322 return;
323
324 if (plkd_validate_logrec(pl_rec)) {
325 bfa_assert(0);
326 return;
327 }
328
329 tail = plog->tail;
330
331 pl_recp = &(plog->plog_recs[tail]);
332
333 bfa_os_memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
334
335 pl_recp->tv = bfa_os_get_log_time();
336 BFA_PL_LOG_REC_INCR(plog->tail);
337
338 if (plog->head == plog->tail)
339 BFA_PL_LOG_REC_INCR(plog->head);
340}
341
342void
343bfa_plog_init(struct bfa_plog_s *plog)
344{
345 bfa_os_memset((char *)plog, 0, sizeof(struct bfa_plog_s));
346
347 bfa_os_memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
348 plog->head = plog->tail = 0;
349 plog->plog_enabled = 1;
350}
351
352void
353bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
354 enum bfa_plog_eid event,
355 u16 misc, char *log_str)
356{
357 struct bfa_plog_rec_s lp;
358
359 if (plog->plog_enabled) {
360 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
361 lp.mid = mid;
362 lp.eid = event;
363 lp.log_type = BFA_PL_LOG_TYPE_STRING;
364 lp.misc = misc;
365 strncpy(lp.log_entry.string_log, log_str,
366 BFA_PL_STRING_LOG_SZ - 1);
367 lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
368 bfa_plog_add(plog, &lp);
369 }
370}
371
372void
373bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
374 enum bfa_plog_eid event,
375 u16 misc, u32 *intarr, u32 num_ints)
376{
377 struct bfa_plog_rec_s lp;
378 u32 i;
379
380 if (num_ints > BFA_PL_INT_LOG_SZ)
381 num_ints = BFA_PL_INT_LOG_SZ;
382
383 if (plog->plog_enabled) {
384 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
385 lp.mid = mid;
386 lp.eid = event;
387 lp.log_type = BFA_PL_LOG_TYPE_INT;
388 lp.misc = misc;
389
390 for (i = 0; i < num_ints; i++)
391 bfa_os_assign(lp.log_entry.int_log[i],
392 intarr[i]);
393
394 lp.log_num_ints = (u8) num_ints;
395
396 bfa_plog_add(plog, &lp);
397 }
398}
399
400void
401bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
402 enum bfa_plog_eid event,
403 u16 misc, struct fchs_s *fchdr)
404{
405 struct bfa_plog_rec_s lp;
406 u32 *tmp_int = (u32 *) fchdr;
407 u32 ints[BFA_PL_INT_LOG_SZ];
408
409 if (plog->plog_enabled) {
410 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
411
412 ints[0] = tmp_int[0];
413 ints[1] = tmp_int[1];
414 ints[2] = tmp_int[4];
415
416 bfa_plog_intarr(plog, mid, event, misc, ints, 3);
417 }
418}
419
420void
421bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
422 enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
423 u32 pld_w0)
424{
425 struct bfa_plog_rec_s lp;
426 u32 *tmp_int = (u32 *) fchdr;
427 u32 ints[BFA_PL_INT_LOG_SZ];
428
429 if (plog->plog_enabled) {
430 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
431
432 ints[0] = tmp_int[0];
433 ints[1] = tmp_int[1];
434 ints[2] = tmp_int[4];
435 ints[3] = pld_w0;
436
437 bfa_plog_intarr(plog, mid, event, misc, ints, 4);
438 }
439}
440
441void
442bfa_plog_clear(struct bfa_plog_s *plog)
443{
444 plog->head = plog->tail = 0;
445}
446
447void
448bfa_plog_enable(struct bfa_plog_s *plog)
449{
450 plog->plog_enabled = 1;
451}
452
453void
454bfa_plog_disable(struct bfa_plog_s *plog)
455{
456 plog->plog_enabled = 0;
457}
458
459bfa_boolean_t
460bfa_plog_get_setting(struct bfa_plog_s *plog)
461{
462 return (bfa_boolean_t)plog->plog_enabled;
463}
464
465/**
466 * fcxp_pvt BFA FCXP private functions
467 */
468
469static void
470claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
471{
472 u8 *dm_kva = NULL;
473 u64 dm_pa;
474 u32 buf_pool_sz;
475
476 dm_kva = bfa_meminfo_dma_virt(mi);
477 dm_pa = bfa_meminfo_dma_phys(mi);
478
479 buf_pool_sz = mod->req_pld_sz * mod->num_fcxps;
480
481 /*
482 * Initialize the fcxp req payload list
483 */
484 mod->req_pld_list_kva = dm_kva;
485 mod->req_pld_list_pa = dm_pa;
486 dm_kva += buf_pool_sz;
487 dm_pa += buf_pool_sz;
488 bfa_os_memset(mod->req_pld_list_kva, 0, buf_pool_sz);
489
490 /*
491 * Initialize the fcxp rsp payload list
492 */
493 buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps;
494 mod->rsp_pld_list_kva = dm_kva;
495 mod->rsp_pld_list_pa = dm_pa;
496 dm_kva += buf_pool_sz;
497 dm_pa += buf_pool_sz;
498 bfa_os_memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
499
500 bfa_meminfo_dma_virt(mi) = dm_kva;
501 bfa_meminfo_dma_phys(mi) = dm_pa;
502}
503
504static void
505claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
506{
507 u16 i;
508 struct bfa_fcxp_s *fcxp;
509
510 fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
511 bfa_os_memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
512
513 INIT_LIST_HEAD(&mod->fcxp_free_q);
514 INIT_LIST_HEAD(&mod->fcxp_active_q);
515
516 mod->fcxp_list = fcxp;
517
518 for (i = 0; i < mod->num_fcxps; i++) {
519 fcxp->fcxp_mod = mod;
520 fcxp->fcxp_tag = i;
521
522 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
523 bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
524 fcxp->reqq_waiting = BFA_FALSE;
525
526 fcxp = fcxp + 1;
527 }
528
529 bfa_meminfo_kva(mi) = (void *)fcxp;
530}
531
532static void
533bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
534 u32 *dm_len)
535{
536 u16 num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs;
537
538 if (num_fcxp_reqs == 0)
539 return;
540
541 /*
542 * Account for req/rsp payload
543 */
544 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
545 if (cfg->drvcfg.min_cfg)
546 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
547 else
548 *dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs;
549
550 /*
551 * Account for fcxp structs
552 */
553 *ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs;
554}
555
556static void
557bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
558 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
559{
560 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
561
562 bfa_os_memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
563 mod->bfa = bfa;
564 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
565
566 /**
567 * Initialize FCXP request and response payload sizes.
568 */
569 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
570 if (!cfg->drvcfg.min_cfg)
571 mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
572
573 INIT_LIST_HEAD(&mod->wait_q);
574
575 claim_fcxp_req_rsp_mem(mod, meminfo);
576 claim_fcxps_mem(mod, meminfo);
577}
578
579static void
580bfa_fcxp_detach(struct bfa_s *bfa)
581{
582}
583
584static void
585bfa_fcxp_start(struct bfa_s *bfa)
586{
587}
588
589static void
590bfa_fcxp_stop(struct bfa_s *bfa)
591{
592}
593
594static void
595bfa_fcxp_iocdisable(struct bfa_s *bfa)
596{
597 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
598 struct bfa_fcxp_s *fcxp;
599 struct list_head *qe, *qen;
600
601 list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
602 fcxp = (struct bfa_fcxp_s *) qe;
603 if (fcxp->caller == NULL) {
604 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
605 BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
606 bfa_fcxp_free(fcxp);
607 } else {
608 fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
609 bfa_cb_queue(bfa, &fcxp->hcb_qe,
610 __bfa_fcxp_send_cbfn, fcxp);
611 }
612 }
613}
614
615static struct bfa_fcxp_s *
616bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
617{
618 struct bfa_fcxp_s *fcxp;
619
620 bfa_q_deq(&fm->fcxp_free_q, &fcxp);
621
622 if (fcxp)
623 list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
624
625 return fcxp;
626}
627
628static void
629bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
630 struct bfa_s *bfa,
631 u8 *use_ibuf,
632 u32 *nr_sgles,
633 bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
634 bfa_fcxp_get_sglen_t *r_sglen_cbfn,
635 struct list_head *r_sgpg_q,
636 int n_sgles,
637 bfa_fcxp_get_sgaddr_t sga_cbfn,
638 bfa_fcxp_get_sglen_t sglen_cbfn)
639{
640
641 bfa_assert(bfa != NULL);
642
643 bfa_trc(bfa, fcxp->fcxp_tag);
644
645 if (n_sgles == 0) {
646 *use_ibuf = 1;
647 } else {
648 bfa_assert(*sga_cbfn != NULL);
649 bfa_assert(*sglen_cbfn != NULL);
650
651 *use_ibuf = 0;
652 *r_sga_cbfn = sga_cbfn;
653 *r_sglen_cbfn = sglen_cbfn;
654
655 *nr_sgles = n_sgles;
656
657 /*
658 * alloc required sgpgs
659 */
660 if (n_sgles > BFI_SGE_INLINE)
661 bfa_assert(0);
662 }
663
664}
665
666static void
667bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
668 void *caller, struct bfa_s *bfa, int nreq_sgles,
669 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
670 bfa_fcxp_get_sglen_t req_sglen_cbfn,
671 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
672 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
673{
674
675 bfa_assert(bfa != NULL);
676
677 bfa_trc(bfa, fcxp->fcxp_tag);
678
679 fcxp->caller = caller;
680
681 bfa_fcxp_init_reqrsp(fcxp, bfa,
682 &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
683 &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
684 nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
685
686 bfa_fcxp_init_reqrsp(fcxp, bfa,
687 &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
688 &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
689 nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
690
691}
692
693static void
694bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
695{
696 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
697 struct bfa_fcxp_wqe_s *wqe;
698
699 bfa_q_deq(&mod->wait_q, &wqe);
700 if (wqe) {
701 bfa_trc(mod->bfa, fcxp->fcxp_tag);
702
703 bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
704 wqe->nrsp_sgles, wqe->req_sga_cbfn,
705 wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
706 wqe->rsp_sglen_cbfn);
707
708 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
709 return;
710 }
711
712 bfa_assert(bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
713 list_del(&fcxp->qe);
714 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
715}
716
717static void
718bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
719 bfa_status_t req_status, u32 rsp_len,
720 u32 resid_len, struct fchs_s *rsp_fchs)
721{
722 /* discarded fcxp completion */
723}
724
725static void
726__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
727{
728 struct bfa_fcxp_s *fcxp = cbarg;
729
730 if (complete) {
731 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
732 fcxp->rsp_status, fcxp->rsp_len,
733 fcxp->residue_len, &fcxp->rsp_fchs);
734 } else {
735 bfa_fcxp_free(fcxp);
736 }
737}
738
739static void
740hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
741{
742 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
743 struct bfa_fcxp_s *fcxp;
744 u16 fcxp_tag = bfa_os_ntohs(fcxp_rsp->fcxp_tag);
745
746 bfa_trc(bfa, fcxp_tag);
747
748 fcxp_rsp->rsp_len = bfa_os_ntohl(fcxp_rsp->rsp_len);
749
750 /**
751 * @todo f/w should not set residue to non-0 when everything
752 * is received.
753 */
754 if (fcxp_rsp->req_status == BFA_STATUS_OK)
755 fcxp_rsp->residue_len = 0;
756 else
757 fcxp_rsp->residue_len = bfa_os_ntohl(fcxp_rsp->residue_len);
758
759 fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
760
761 bfa_assert(fcxp->send_cbfn != NULL);
762
763 hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
764
765 if (fcxp->send_cbfn != NULL) {
766 bfa_trc(mod->bfa, (NULL == fcxp->caller));
767 if (fcxp->caller == NULL) {
768 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
769 fcxp_rsp->req_status, fcxp_rsp->rsp_len,
770 fcxp_rsp->residue_len, &fcxp_rsp->fchs);
771 /*
772 * fcxp automatically freed on return from the callback
773 */
774 bfa_fcxp_free(fcxp);
775 } else {
776 fcxp->rsp_status = fcxp_rsp->req_status;
777 fcxp->rsp_len = fcxp_rsp->rsp_len;
778 fcxp->residue_len = fcxp_rsp->residue_len;
779 fcxp->rsp_fchs = fcxp_rsp->fchs;
780
781 bfa_cb_queue(bfa, &fcxp->hcb_qe,
782 __bfa_fcxp_send_cbfn, fcxp);
783 }
784 } else {
785 bfa_trc(bfa, (NULL == fcxp->send_cbfn));
786 }
787}
788
789static void
790hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa)
791{
792 union bfi_addr_u sga_zero = { {0} };
793
794 sge->sg_len = reqlen;
795 sge->flags = BFI_SGE_DATA_LAST;
796 bfa_dma_addr_set(sge[0].sga, req_pa);
797 bfa_sge_to_be(sge);
798 sge++;
799
800 sge->sga = sga_zero;
801 sge->sg_len = reqlen;
802 sge->flags = BFI_SGE_PGDLEN;
803 bfa_sge_to_be(sge);
804}
805
806static void
807hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
808 struct fchs_s *fchs)
809{
810 /*
811 * TODO: TX ox_id
812 */
813 if (reqlen > 0) {
814 if (fcxp->use_ireqbuf) {
815 u32 pld_w0 =
816 *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
817
818 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
819 BFA_PL_EID_TX,
820 reqlen + sizeof(struct fchs_s), fchs,
821 pld_w0);
822 } else {
823 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
824 BFA_PL_EID_TX,
825 reqlen + sizeof(struct fchs_s),
826 fchs);
827 }
828 } else {
829 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
830 reqlen + sizeof(struct fchs_s), fchs);
831 }
832}
833
834static void
835hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
836 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
837{
838 if (fcxp_rsp->rsp_len > 0) {
839 if (fcxp->use_irspbuf) {
840 u32 pld_w0 =
841 *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
842
843 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
844 BFA_PL_EID_RX,
845 (u16) fcxp_rsp->rsp_len,
846 &fcxp_rsp->fchs, pld_w0);
847 } else {
848 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
849 BFA_PL_EID_RX,
850 (u16) fcxp_rsp->rsp_len,
851 &fcxp_rsp->fchs);
852 }
853 } else {
854 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
855 (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
856 }
857}
858
859/**
860 * Handler to resume sending fcxp when space in available in cpe queue.
861 */
862static void
863bfa_fcxp_qresume(void *cbarg)
864{
865 struct bfa_fcxp_s *fcxp = cbarg;
866 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
867 struct bfi_fcxp_send_req_s *send_req;
868
869 fcxp->reqq_waiting = BFA_FALSE;
870 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
871 bfa_fcxp_queue(fcxp, send_req);
872}
873
874/**
875 * Queue fcxp send request to foimrware.
876 */
877static void
878bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
879{
880 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
881 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
882 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
883 struct bfa_rport_s *rport = reqi->bfa_rport;
884
885 bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
886 bfa_lpuid(bfa));
887
888 send_req->fcxp_tag = bfa_os_htons(fcxp->fcxp_tag);
889 if (rport) {
890 send_req->rport_fw_hndl = rport->fw_handle;
891 send_req->max_frmsz = bfa_os_htons(rport->rport_info.max_frmsz);
892 if (send_req->max_frmsz == 0)
893 send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
894 } else {
895 send_req->rport_fw_hndl = 0;
896 send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
897 }
898
899 send_req->vf_id = bfa_os_htons(reqi->vf_id);
900 send_req->lp_tag = reqi->lp_tag;
901 send_req->class = reqi->class;
902 send_req->rsp_timeout = rspi->rsp_timeout;
903 send_req->cts = reqi->cts;
904 send_req->fchs = reqi->fchs;
905
906 send_req->req_len = bfa_os_htonl(reqi->req_tot_len);
907 send_req->rsp_maxlen = bfa_os_htonl(rspi->rsp_maxlen);
908
909 /*
910 * setup req sgles
911 */
912 if (fcxp->use_ireqbuf == 1) {
913 hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len,
914 BFA_FCXP_REQ_PLD_PA(fcxp));
915 } else {
916 if (fcxp->nreq_sgles > 0) {
917 bfa_assert(fcxp->nreq_sgles == 1);
918 hal_fcxp_set_local_sges(send_req->req_sge,
919 reqi->req_tot_len,
920 fcxp->req_sga_cbfn(fcxp->caller,
921 0));
922 } else {
923 bfa_assert(reqi->req_tot_len == 0);
924 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
925 }
926 }
927
928 /*
929 * setup rsp sgles
930 */
931 if (fcxp->use_irspbuf == 1) {
932 bfa_assert(rspi->rsp_maxlen <= BFA_FCXP_MAX_LBUF_SZ);
933
934 hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen,
935 BFA_FCXP_RSP_PLD_PA(fcxp));
936
937 } else {
938 if (fcxp->nrsp_sgles > 0) {
939 bfa_assert(fcxp->nrsp_sgles == 1);
940 hal_fcxp_set_local_sges(send_req->rsp_sge,
941 rspi->rsp_maxlen,
942 fcxp->rsp_sga_cbfn(fcxp->caller,
943 0));
944 } else {
945 bfa_assert(rspi->rsp_maxlen == 0);
946 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
947 }
948 }
949
950 hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
951
952 bfa_reqq_produce(bfa, BFA_REQQ_FCXP);
953
954 bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
955 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
956}
957
958/**
959 * hal_fcxp_api BFA FCXP API
960 */
961
962/**
963 * Allocate an FCXP instance to send a response or to send a request
964 * that has a response. Request/response buffers are allocated by caller.
965 *
966 * @param[in] bfa BFA bfa instance
967 * @param[in] nreq_sgles Number of SG elements required for request
968 * buffer. 0, if fcxp internal buffers are used.
969 * Use bfa_fcxp_get_reqbuf() to get the
970 * internal req buffer.
971 * @param[in] req_sgles SG elements describing request buffer. Will be
972 * copied in by BFA and hence can be freed on
973 * return from this function.
974 * @param[in] get_req_sga function ptr to be called to get a request SG
975 * Address (given the sge index).
976 * @param[in] get_req_sglen function ptr to be called to get a request SG
977 * len (given the sge index).
978 * @param[in] get_rsp_sga function ptr to be called to get a response SG
979 * Address (given the sge index).
980 * @param[in] get_rsp_sglen function ptr to be called to get a response SG
981 * len (given the sge index).
982 *
983 * @return FCXP instance. NULL on failure.
984 */
985struct bfa_fcxp_s *
986bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
987 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
988 bfa_fcxp_get_sglen_t req_sglen_cbfn,
989 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
990 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
991{
992 struct bfa_fcxp_s *fcxp = NULL;
993
994 bfa_assert(bfa != NULL);
995
996 fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
997 if (fcxp == NULL)
998 return NULL;
999
1000 bfa_trc(bfa, fcxp->fcxp_tag);
1001
1002 bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
1003 req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
1004
1005 return fcxp;
1006}
1007
1008/**
1009 * Get the internal request buffer pointer
1010 *
1011 * @param[in] fcxp BFA fcxp pointer
1012 *
1013 * @return pointer to the internal request buffer
1014 */
1015void *
1016bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
1017{
1018 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1019 void *reqbuf;
1020
1021 bfa_assert(fcxp->use_ireqbuf == 1);
1022 reqbuf = ((u8 *)mod->req_pld_list_kva) +
1023 fcxp->fcxp_tag * mod->req_pld_sz;
1024 return reqbuf;
1025}
1026
1027u32
1028bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
1029{
1030 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1031
1032 return mod->req_pld_sz;
1033}
1034
1035/**
1036 * Get the internal response buffer pointer
1037 *
1038 * @param[in] fcxp BFA fcxp pointer
1039 *
1040 * @return pointer to the internal request buffer
1041 */
1042void *
1043bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
1044{
1045 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1046 void *rspbuf;
1047
1048 bfa_assert(fcxp->use_irspbuf == 1);
1049
1050 rspbuf = ((u8 *)mod->rsp_pld_list_kva) +
1051 fcxp->fcxp_tag * mod->rsp_pld_sz;
1052 return rspbuf;
1053}
1054
1055/**
1056 * Free the BFA FCXP
1057 *
1058 * @param[in] fcxp BFA fcxp pointer
1059 *
1060 * @return void
1061 */
1062void
1063bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
1064{
1065 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1066
1067 bfa_assert(fcxp != NULL);
1068 bfa_trc(mod->bfa, fcxp->fcxp_tag);
1069 bfa_fcxp_put(fcxp);
1070}
1071
1072/**
1073 * Send a FCXP request
1074 *
1075 * @param[in] fcxp BFA fcxp pointer
1076 * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
1077 * @param[in] vf_id virtual Fabric ID
1078 * @param[in] lp_tag lport tag
1079 * @param[in] cts use Continous sequence
1080 * @param[in] cos fc Class of Service
1081 * @param[in] reqlen request length, does not include FCHS length
1082 * @param[in] fchs fc Header Pointer. The header content will be copied
1083 * in by BFA.
1084 *
1085 * @param[in] cbfn call back function to be called on receiving
1086 * the response
1087 * @param[in] cbarg arg for cbfn
1088 * @param[in] rsp_timeout
1089 * response timeout
1090 *
1091 * @return bfa_status_t
1092 */
1093void
1094bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1095 u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1096 u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1097 void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1098{
1099 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
1100 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
1101 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
1102 struct bfi_fcxp_send_req_s *send_req;
1103
1104 bfa_trc(bfa, fcxp->fcxp_tag);
1105
1106 /**
1107 * setup request/response info
1108 */
1109 reqi->bfa_rport = rport;
1110 reqi->vf_id = vf_id;
1111 reqi->lp_tag = lp_tag;
1112 reqi->class = cos;
1113 rspi->rsp_timeout = rsp_timeout;
1114 reqi->cts = cts;
1115 reqi->fchs = *fchs;
1116 reqi->req_tot_len = reqlen;
1117 rspi->rsp_maxlen = rsp_maxlen;
1118 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1119 fcxp->send_cbarg = cbarg;
1120
1121 /**
1122 * If no room in CPE queue, wait for space in request queue
1123 */
1124 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1125 if (!send_req) {
1126 bfa_trc(bfa, fcxp->fcxp_tag);
1127 fcxp->reqq_waiting = BFA_TRUE;
1128 bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1129 return;
1130 }
1131
1132 bfa_fcxp_queue(fcxp, send_req);
1133}
1134
1135/**
1136 * Abort a BFA FCXP
1137 *
1138 * @param[in] fcxp BFA fcxp pointer
1139 *
1140 * @return void
1141 */
1142bfa_status_t
1143bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1144{
1145 bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1146 bfa_assert(0);
1147 return BFA_STATUS_OK;
1148}
1149
1150void
1151bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1152 bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1153 void *caller, int nreq_sgles,
1154 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1155 bfa_fcxp_get_sglen_t req_sglen_cbfn,
1156 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1157 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
1158{
1159 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1160
1161 bfa_assert(list_empty(&mod->fcxp_free_q));
1162
1163 wqe->alloc_cbfn = alloc_cbfn;
1164 wqe->alloc_cbarg = alloc_cbarg;
1165 wqe->caller = caller;
1166 wqe->bfa = bfa;
1167 wqe->nreq_sgles = nreq_sgles;
1168 wqe->nrsp_sgles = nrsp_sgles;
1169 wqe->req_sga_cbfn = req_sga_cbfn;
1170 wqe->req_sglen_cbfn = req_sglen_cbfn;
1171 wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1172 wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1173
1174 list_add_tail(&wqe->qe, &mod->wait_q);
1175}
1176
1177void
1178bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1179{
1180 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1181
1182 bfa_assert(bfa_q_is_on_q(&mod->wait_q, wqe));
1183 list_del(&wqe->qe);
1184}
1185
1186void
1187bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1188{
1189 /**
1190 * If waiting for room in request queue, cancel reqq wait
1191 * and free fcxp.
1192 */
1193 if (fcxp->reqq_waiting) {
1194 fcxp->reqq_waiting = BFA_FALSE;
1195 bfa_reqq_wcancel(&fcxp->reqq_wqe);
1196 bfa_fcxp_free(fcxp);
1197 return;
1198 }
1199
1200 fcxp->send_cbfn = bfa_fcxp_null_comp;
1201}
1202
1203
1204
1205/**
1206 * hal_fcxp_public BFA FCXP public functions
1207 */
1208
1209void
1210bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1211{
1212 switch (msg->mhdr.msg_id) {
1213 case BFI_FCXP_I2H_SEND_RSP:
1214 hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1215 break;
1216
1217 default:
1218 bfa_trc(bfa, msg->mhdr.msg_id);
1219 bfa_assert(0);
1220 }
1221}
1222
1223u32
1224bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1225{
1226 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1227
1228 return mod->rsp_pld_sz;
1229}
1230
1231
1232/**
1233 * BFA LPS state machine functions
1234 */
1235
1236/**
1237 * Init state -- no login
1238 */
1239static void
1240bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1241{
1242 bfa_trc(lps->bfa, lps->lp_tag);
1243 bfa_trc(lps->bfa, event);
1244
1245 switch (event) {
1246 case BFA_LPS_SM_LOGIN:
1247 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1248 bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1249 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1250 } else {
1251 bfa_sm_set_state(lps, bfa_lps_sm_login);
1252 bfa_lps_send_login(lps);
1253 }
1254
1255 if (lps->fdisc)
1256 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1257 BFA_PL_EID_LOGIN, 0, "FDISC Request");
1258 else
1259 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1260 BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1261 break;
1262
1263 case BFA_LPS_SM_LOGOUT:
1264 bfa_lps_logout_comp(lps);
1265 break;
1266
1267 case BFA_LPS_SM_DELETE:
1268 bfa_lps_free(lps);
1269 break;
1270
1271 case BFA_LPS_SM_RX_CVL:
1272 case BFA_LPS_SM_OFFLINE:
1273 break;
1274
1275 case BFA_LPS_SM_FWRSP:
1276 /*
1277 * Could happen when fabric detects loopback and discards
1278 * the lps request. Fw will eventually sent out the timeout
1279 * Just ignore
1280 */
1281 break;
1282
1283 default:
1284 bfa_sm_fault(lps->bfa, event);
1285 }
1286}
1287
1288/**
1289 * login is in progress -- awaiting response from firmware
1290 */
1291static void
1292bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1293{
1294 bfa_trc(lps->bfa, lps->lp_tag);
1295 bfa_trc(lps->bfa, event);
1296
1297 switch (event) {
1298 case BFA_LPS_SM_FWRSP:
1299 if (lps->status == BFA_STATUS_OK) {
1300 bfa_sm_set_state(lps, bfa_lps_sm_online);
1301 if (lps->fdisc)
1302 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1303 BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1304 else
1305 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1306 BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1307 } else {
1308 bfa_sm_set_state(lps, bfa_lps_sm_init);
1309 if (lps->fdisc)
1310 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1311 BFA_PL_EID_LOGIN, 0,
1312 "FDISC Fail (RJT or timeout)");
1313 else
1314 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1315 BFA_PL_EID_LOGIN, 0,
1316 "FLOGI Fail (RJT or timeout)");
1317 }
1318 bfa_lps_login_comp(lps);
1319 break;
1320
1321 case BFA_LPS_SM_OFFLINE:
1322 bfa_sm_set_state(lps, bfa_lps_sm_init);
1323 break;
1324
1325 default:
1326 bfa_sm_fault(lps->bfa, event);
1327 }
1328}
1329
1330/**
1331 * login pending - awaiting space in request queue
1332 */
1333static void
1334bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1335{
1336 bfa_trc(lps->bfa, lps->lp_tag);
1337 bfa_trc(lps->bfa, event);
1338
1339 switch (event) {
1340 case BFA_LPS_SM_RESUME:
1341 bfa_sm_set_state(lps, bfa_lps_sm_login);
1342 break;
1343
1344 case BFA_LPS_SM_OFFLINE:
1345 bfa_sm_set_state(lps, bfa_lps_sm_init);
1346 bfa_reqq_wcancel(&lps->wqe);
1347 break;
1348
1349 case BFA_LPS_SM_RX_CVL:
1350 /*
1351 * Login was not even sent out; so when getting out
1352 * of this state, it will appear like a login retry
1353 * after Clear virtual link
1354 */
1355 break;
1356
1357 default:
1358 bfa_sm_fault(lps->bfa, event);
1359 }
1360}
1361
1362/**
1363 * login complete
1364 */
1365static void
1366bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1367{
1368 bfa_trc(lps->bfa, lps->lp_tag);
1369 bfa_trc(lps->bfa, event);
1370
1371 switch (event) {
1372 case BFA_LPS_SM_LOGOUT:
1373 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1374 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1375 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1376 } else {
1377 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1378 bfa_lps_send_logout(lps);
1379 }
1380 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1381 BFA_PL_EID_LOGO, 0, "Logout");
1382 break;
1383
1384 case BFA_LPS_SM_RX_CVL:
1385 bfa_sm_set_state(lps, bfa_lps_sm_init);
1386
1387 /* Let the vport module know about this event */
1388 bfa_lps_cvl_event(lps);
1389 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1390 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1391 break;
1392
1393 case BFA_LPS_SM_OFFLINE:
1394 case BFA_LPS_SM_DELETE:
1395 bfa_sm_set_state(lps, bfa_lps_sm_init);
1396 break;
1397
1398 default:
1399 bfa_sm_fault(lps->bfa, event);
1400 }
1401}
1402
1403/**
1404 * logout in progress - awaiting firmware response
1405 */
1406static void
1407bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1408{
1409 bfa_trc(lps->bfa, lps->lp_tag);
1410 bfa_trc(lps->bfa, event);
1411
1412 switch (event) {
1413 case BFA_LPS_SM_FWRSP:
1414 bfa_sm_set_state(lps, bfa_lps_sm_init);
1415 bfa_lps_logout_comp(lps);
1416 break;
1417
1418 case BFA_LPS_SM_OFFLINE:
1419 bfa_sm_set_state(lps, bfa_lps_sm_init);
1420 break;
1421
1422 default:
1423 bfa_sm_fault(lps->bfa, event);
1424 }
1425}
1426
1427/**
1428 * logout pending -- awaiting space in request queue
1429 */
1430static void
1431bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1432{
1433 bfa_trc(lps->bfa, lps->lp_tag);
1434 bfa_trc(lps->bfa, event);
1435
1436 switch (event) {
1437 case BFA_LPS_SM_RESUME:
1438 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1439 bfa_lps_send_logout(lps);
1440 break;
1441
1442 case BFA_LPS_SM_OFFLINE:
1443 bfa_sm_set_state(lps, bfa_lps_sm_init);
1444 bfa_reqq_wcancel(&lps->wqe);
1445 break;
1446
1447 default:
1448 bfa_sm_fault(lps->bfa, event);
1449 }
1450}
1451
1452
1453
1454/**
1455 * lps_pvt BFA LPS private functions
1456 */
1457
1458/**
1459 * return memory requirement
1460 */
1461static void
1462bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
1463 u32 *dm_len)
1464{
1465 if (cfg->drvcfg.min_cfg)
1466 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS;
1467 else
1468 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
1469}
1470
1471/**
1472 * bfa module attach at initialization time
1473 */
1474static void
1475bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1476 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1477{
1478 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1479 struct bfa_lps_s *lps;
1480 int i;
1481
1482 bfa_os_memset(mod, 0, sizeof(struct bfa_lps_mod_s));
1483 mod->num_lps = BFA_LPS_MAX_LPORTS;
1484 if (cfg->drvcfg.min_cfg)
1485 mod->num_lps = BFA_LPS_MIN_LPORTS;
1486 else
1487 mod->num_lps = BFA_LPS_MAX_LPORTS;
1488 mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo);
1489
1490 bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s);
1491
1492 INIT_LIST_HEAD(&mod->lps_free_q);
1493 INIT_LIST_HEAD(&mod->lps_active_q);
1494
1495 for (i = 0; i < mod->num_lps; i++, lps++) {
1496 lps->bfa = bfa;
1497 lps->lp_tag = (u8) i;
1498 lps->reqq = BFA_REQQ_LPS;
1499 bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1500 list_add_tail(&lps->qe, &mod->lps_free_q);
1501 }
1502}
1503
1504static void
1505bfa_lps_detach(struct bfa_s *bfa)
1506{
1507}
1508
1509static void
1510bfa_lps_start(struct bfa_s *bfa)
1511{
1512}
1513
1514static void
1515bfa_lps_stop(struct bfa_s *bfa)
1516{
1517}
1518
1519/**
1520 * IOC in disabled state -- consider all lps offline
1521 */
1522static void
1523bfa_lps_iocdisable(struct bfa_s *bfa)
1524{
1525 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1526 struct bfa_lps_s *lps;
1527 struct list_head *qe, *qen;
1528
1529 list_for_each_safe(qe, qen, &mod->lps_active_q) {
1530 lps = (struct bfa_lps_s *) qe;
1531 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1532 }
1533}
1534
1535/**
1536 * Firmware login response
1537 */
1538static void
1539bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1540{
1541 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1542 struct bfa_lps_s *lps;
1543
1544 bfa_assert(rsp->lp_tag < mod->num_lps);
1545 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1546
1547 lps->status = rsp->status;
1548 switch (rsp->status) {
1549 case BFA_STATUS_OK:
1550 lps->fport = rsp->f_port;
1551 lps->npiv_en = rsp->npiv_en;
1552 lps->lp_pid = rsp->lp_pid;
1553 lps->pr_bbcred = bfa_os_ntohs(rsp->bb_credit);
1554 lps->pr_pwwn = rsp->port_name;
1555 lps->pr_nwwn = rsp->node_name;
1556 lps->auth_req = rsp->auth_req;
1557 lps->lp_mac = rsp->lp_mac;
1558 lps->brcd_switch = rsp->brcd_switch;
1559 lps->fcf_mac = rsp->fcf_mac;
1560
1561 break;
1562
1563 case BFA_STATUS_FABRIC_RJT:
1564 lps->lsrjt_rsn = rsp->lsrjt_rsn;
1565 lps->lsrjt_expl = rsp->lsrjt_expl;
1566
1567 break;
1568
1569 case BFA_STATUS_EPROTOCOL:
1570 lps->ext_status = rsp->ext_status;
1571
1572 break;
1573
1574 default:
1575 /* Nothing to do with other status */
1576 break;
1577 }
1578
1579 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1580}
1581
1582/**
1583 * Firmware logout response
1584 */
1585static void
1586bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1587{
1588 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1589 struct bfa_lps_s *lps;
1590
1591 bfa_assert(rsp->lp_tag < mod->num_lps);
1592 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1593
1594 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1595}
1596
1597/**
1598 * Firmware received a Clear virtual link request (for FCoE)
1599 */
1600static void
1601bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1602{
1603 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1604 struct bfa_lps_s *lps;
1605
1606 lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag);
1607
1608 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1609}
1610
1611/**
1612 * Space is available in request queue, resume queueing request to firmware.
1613 */
1614static void
1615bfa_lps_reqq_resume(void *lps_arg)
1616{
1617 struct bfa_lps_s *lps = lps_arg;
1618
1619 bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1620}
1621
1622/**
1623 * lps is freed -- triggered by vport delete
1624 */
1625static void
1626bfa_lps_free(struct bfa_lps_s *lps)
1627{
1628 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
1629
1630 lps->lp_pid = 0;
1631 list_del(&lps->qe);
1632 list_add_tail(&lps->qe, &mod->lps_free_q);
1633}
1634
1635/**
1636 * send login request to firmware
1637 */
1638static void
1639bfa_lps_send_login(struct bfa_lps_s *lps)
1640{
1641 struct bfi_lps_login_req_s *m;
1642
1643 m = bfa_reqq_next(lps->bfa, lps->reqq);
1644 bfa_assert(m);
1645
1646 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1647 bfa_lpuid(lps->bfa));
1648
1649 m->lp_tag = lps->lp_tag;
1650 m->alpa = lps->alpa;
1651 m->pdu_size = bfa_os_htons(lps->pdusz);
1652 m->pwwn = lps->pwwn;
1653 m->nwwn = lps->nwwn;
1654 m->fdisc = lps->fdisc;
1655 m->auth_en = lps->auth_en;
1656
1657 bfa_reqq_produce(lps->bfa, lps->reqq);
1658}
1659
1660/**
1661 * send logout request to firmware
1662 */
1663static void
1664bfa_lps_send_logout(struct bfa_lps_s *lps)
1665{
1666 struct bfi_lps_logout_req_s *m;
1667
1668 m = bfa_reqq_next(lps->bfa, lps->reqq);
1669 bfa_assert(m);
1670
1671 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1672 bfa_lpuid(lps->bfa));
1673
1674 m->lp_tag = lps->lp_tag;
1675 m->port_name = lps->pwwn;
1676 bfa_reqq_produce(lps->bfa, lps->reqq);
1677}
1678
1679/**
1680 * Indirect login completion handler for non-fcs
1681 */
1682static void
1683bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1684{
1685 struct bfa_lps_s *lps = arg;
1686
1687 if (!complete)
1688 return;
1689
1690 if (lps->fdisc)
1691 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1692 else
1693 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1694}
1695
1696/**
1697 * Login completion handler -- direct call for fcs, queue for others
1698 */
1699static void
1700bfa_lps_login_comp(struct bfa_lps_s *lps)
1701{
1702 if (!lps->bfa->fcs) {
1703 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1704 lps);
1705 return;
1706 }
1707
1708 if (lps->fdisc)
1709 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1710 else
1711 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1712}
1713
1714/**
1715 * Indirect logout completion handler for non-fcs
1716 */
1717static void
1718bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1719{
1720 struct bfa_lps_s *lps = arg;
1721
1722 if (!complete)
1723 return;
1724
1725 if (lps->fdisc)
1726 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1727}
1728
1729/**
1730 * Logout completion handler -- direct call for fcs, queue for others
1731 */
1732static void
1733bfa_lps_logout_comp(struct bfa_lps_s *lps)
1734{
1735 if (!lps->bfa->fcs) {
1736 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1737 lps);
1738 return;
1739 }
1740 if (lps->fdisc)
1741 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1742}
1743
1744/**
1745 * Clear virtual link completion handler for non-fcs
1746 */
1747static void
1748bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1749{
1750 struct bfa_lps_s *lps = arg;
1751
1752 if (!complete)
1753 return;
1754
1755 /* Clear virtual link to base port will result in link down */
1756 if (lps->fdisc)
1757 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1758}
1759
1760/**
1761 * Received Clear virtual link event --direct call for fcs,
1762 * queue for others
1763 */
1764static void
1765bfa_lps_cvl_event(struct bfa_lps_s *lps)
1766{
1767 if (!lps->bfa->fcs) {
1768 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1769 lps);
1770 return;
1771 }
1772
1773 /* Clear virtual link to base port will result in link down */
1774 if (lps->fdisc)
1775 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1776}
1777
1778
1779
1780/**
1781 * lps_public BFA LPS public functions
1782 */
1783
1784u32
1785bfa_lps_get_max_vport(struct bfa_s *bfa)
1786{
1787 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1788 return BFA_LPS_MAX_VPORTS_SUPP_CT;
1789 else
1790 return BFA_LPS_MAX_VPORTS_SUPP_CB;
1791}
1792
1793/**
1794 * Allocate a lport srvice tag.
1795 */
1796struct bfa_lps_s *
1797bfa_lps_alloc(struct bfa_s *bfa)
1798{
1799 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1800 struct bfa_lps_s *lps = NULL;
1801
1802 bfa_q_deq(&mod->lps_free_q, &lps);
1803
1804 if (lps == NULL)
1805 return NULL;
1806
1807 list_add_tail(&lps->qe, &mod->lps_active_q);
1808
1809 bfa_sm_set_state(lps, bfa_lps_sm_init);
1810 return lps;
1811}
1812
1813/**
1814 * Free lport service tag. This can be called anytime after an alloc.
1815 * No need to wait for any pending login/logout completions.
1816 */
1817void
1818bfa_lps_delete(struct bfa_lps_s *lps)
1819{
1820 bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1821}
1822
1823/**
1824 * Initiate a lport login.
1825 */
1826void
1827bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1828 wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
1829{
1830 lps->uarg = uarg;
1831 lps->alpa = alpa;
1832 lps->pdusz = pdusz;
1833 lps->pwwn = pwwn;
1834 lps->nwwn = nwwn;
1835 lps->fdisc = BFA_FALSE;
1836 lps->auth_en = auth_en;
1837 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1838}
1839
1840/**
1841 * Initiate a lport fdisc login.
1842 */
1843void
1844bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1845 wwn_t nwwn)
1846{
1847 lps->uarg = uarg;
1848 lps->alpa = 0;
1849 lps->pdusz = pdusz;
1850 lps->pwwn = pwwn;
1851 lps->nwwn = nwwn;
1852 lps->fdisc = BFA_TRUE;
1853 lps->auth_en = BFA_FALSE;
1854 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1855}
1856
1857/**
1858 * Initiate a lport logout (flogi).
1859 */
1860void
1861bfa_lps_flogo(struct bfa_lps_s *lps)
1862{
1863 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1864}
1865
1866/**
1867 * Initiate a lport FDSIC logout.
1868 */
1869void
1870bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1871{
1872 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1873}
1874
1875/**
1876 * Discard a pending login request -- should be called only for
1877 * link down handling.
1878 */
1879void
1880bfa_lps_discard(struct bfa_lps_s *lps)
1881{
1882 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1883}
1884
1885/**
1886 * Return lport services tag
1887 */
1888u8
1889bfa_lps_get_tag(struct bfa_lps_s *lps)
1890{
1891 return lps->lp_tag;
1892}
1893
1894/**
1895 * Return lport services tag given the pid
1896 */
1897u8
1898bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1899{
1900 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1901 struct bfa_lps_s *lps;
1902 int i;
1903
1904 for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1905 if (lps->lp_pid == pid)
1906 return lps->lp_tag;
1907 }
1908
1909 /* Return base port tag anyway */
1910 return 0;
1911}
1912
1913/**
1914 * return if fabric login indicates support for NPIV
1915 */
1916bfa_boolean_t
1917bfa_lps_is_npiv_en(struct bfa_lps_s *lps)
1918{
1919 return lps->npiv_en;
1920}
1921
1922/**
1923 * Return TRUE if attached to F-Port, else return FALSE
1924 */
1925bfa_boolean_t
1926bfa_lps_is_fport(struct bfa_lps_s *lps)
1927{
1928 return lps->fport;
1929}
1930
1931/**
1932 * Return TRUE if attached to a Brocade Fabric
1933 */
1934bfa_boolean_t
1935bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps)
1936{
1937 return lps->brcd_switch;
1938}
1939/**
1940 * return TRUE if authentication is required
1941 */
1942bfa_boolean_t
1943bfa_lps_is_authreq(struct bfa_lps_s *lps)
1944{
1945 return lps->auth_req;
1946}
1947
1948bfa_eproto_status_t
1949bfa_lps_get_extstatus(struct bfa_lps_s *lps)
1950{
1951 return lps->ext_status;
1952}
1953
1954/**
1955 * return port id assigned to the lport
1956 */
1957u32
1958bfa_lps_get_pid(struct bfa_lps_s *lps)
1959{
1960 return lps->lp_pid;
1961}
1962
1963/**
1964 * return port id assigned to the base lport
1965 */
1966u32
1967bfa_lps_get_base_pid(struct bfa_s *bfa)
1968{
1969 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1970
1971 return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1972}
1973
1974/**
1975 * Return bb_credit assigned in FLOGI response
1976 */
1977u16
1978bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps)
1979{
1980 return lps->pr_bbcred;
1981}
1982
1983/**
1984 * Return peer port name
1985 */
1986wwn_t
1987bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps)
1988{
1989 return lps->pr_pwwn;
1990}
1991
1992/**
1993 * Return peer node name
1994 */
1995wwn_t
1996bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps)
1997{
1998 return lps->pr_nwwn;
1999}
2000
2001/**
2002 * return reason code if login request is rejected
2003 */
2004u8
2005bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps)
2006{
2007 return lps->lsrjt_rsn;
2008}
2009
2010/**
2011 * return explanation code if login request is rejected
2012 */
2013u8
2014bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps)
2015{
2016 return lps->lsrjt_expl;
2017}
2018
2019/**
2020 * Return fpma/spma MAC for lport
2021 */
2022mac_t
2023bfa_lps_get_lp_mac(struct bfa_lps_s *lps)
2024{
2025 return lps->lp_mac;
2026}
2027
2028/**
2029 * LPS firmware message class handler.
2030 */
2031void
2032bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2033{
2034 union bfi_lps_i2h_msg_u msg;
2035
2036 bfa_trc(bfa, m->mhdr.msg_id);
2037 msg.msg = m;
2038
2039 switch (m->mhdr.msg_id) {
2040 case BFI_LPS_H2I_LOGIN_RSP:
2041 bfa_lps_login_rsp(bfa, msg.login_rsp);
2042 break;
2043
2044 case BFI_LPS_H2I_LOGOUT_RSP:
2045 bfa_lps_logout_rsp(bfa, msg.logout_rsp);
2046 break;
2047
2048 case BFI_LPS_H2I_CVL_EVENT:
2049 bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
2050 break;
2051
2052 default:
2053 bfa_trc(bfa, m->mhdr.msg_id);
2054 bfa_assert(0);
2055 }
2056}
2057
2058/**
2059 * FC PORT state machine functions
2060 */
2061static void
2062bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2063 enum bfa_fcport_sm_event event)
2064{
2065 bfa_trc(fcport->bfa, event);
2066
2067 switch (event) {
2068 case BFA_FCPORT_SM_START:
2069 /**
2070 * Start event after IOC is configured and BFA is started.
2071 */
2072 if (bfa_fcport_send_enable(fcport)) {
2073 bfa_trc(fcport->bfa, BFA_TRUE);
2074 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2075 } else {
2076 bfa_trc(fcport->bfa, BFA_FALSE);
2077 bfa_sm_set_state(fcport,
2078 bfa_fcport_sm_enabling_qwait);
2079 }
2080 break;
2081
2082 case BFA_FCPORT_SM_ENABLE:
2083 /**
2084 * Port is persistently configured to be in enabled state. Do
2085 * not change state. Port enabling is done when START event is
2086 * received.
2087 */
2088 break;
2089
2090 case BFA_FCPORT_SM_DISABLE:
2091 /**
2092 * If a port is persistently configured to be disabled, the
2093 * first event will a port disable request.
2094 */
2095 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2096 break;
2097
2098 case BFA_FCPORT_SM_HWFAIL:
2099 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2100 break;
2101
2102 default:
2103 bfa_sm_fault(fcport->bfa, event);
2104 }
2105}
2106
2107static void
2108bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2109 enum bfa_fcport_sm_event event)
2110{
2111 char pwwn_buf[BFA_STRING_32];
2112 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2113 bfa_trc(fcport->bfa, event);
2114
2115 switch (event) {
2116 case BFA_FCPORT_SM_QRESUME:
2117 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2118 bfa_fcport_send_enable(fcport);
2119 break;
2120
2121 case BFA_FCPORT_SM_STOP:
2122 bfa_reqq_wcancel(&fcport->reqq_wait);
2123 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2124 break;
2125
2126 case BFA_FCPORT_SM_ENABLE:
2127 /**
2128 * Already enable is in progress.
2129 */
2130 break;
2131
2132 case BFA_FCPORT_SM_DISABLE:
2133 /**
2134 * Just send disable request to firmware when room becomes
2135 * available in request queue.
2136 */
2137 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2138 bfa_reqq_wcancel(&fcport->reqq_wait);
2139 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2140 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2141 wwn2str(pwwn_buf, fcport->pwwn);
2142 BFA_LOG(KERN_INFO, bfad, log_level,
2143 "Base port disabled: WWN = %s\n", pwwn_buf);
2144 break;
2145
2146 case BFA_FCPORT_SM_LINKUP:
2147 case BFA_FCPORT_SM_LINKDOWN:
2148 /**
2149 * Possible to get link events when doing back-to-back
2150 * enable/disables.
2151 */
2152 break;
2153
2154 case BFA_FCPORT_SM_HWFAIL:
2155 bfa_reqq_wcancel(&fcport->reqq_wait);
2156 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2157 break;
2158
2159 default:
2160 bfa_sm_fault(fcport->bfa, event);
2161 }
2162}
2163
2164static void
2165bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2166 enum bfa_fcport_sm_event event)
2167{
2168 char pwwn_buf[BFA_STRING_32];
2169 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2170 bfa_trc(fcport->bfa, event);
2171
2172 switch (event) {
2173 case BFA_FCPORT_SM_FWRSP:
2174 case BFA_FCPORT_SM_LINKDOWN:
2175 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2176 break;
2177
2178 case BFA_FCPORT_SM_LINKUP:
2179 bfa_fcport_update_linkinfo(fcport);
2180 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2181
2182 bfa_assert(fcport->event_cbfn);
2183 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2184 break;
2185
2186 case BFA_FCPORT_SM_ENABLE:
2187 /**
2188 * Already being enabled.
2189 */
2190 break;
2191
2192 case BFA_FCPORT_SM_DISABLE:
2193 if (bfa_fcport_send_disable(fcport))
2194 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2195 else
2196 bfa_sm_set_state(fcport,
2197 bfa_fcport_sm_disabling_qwait);
2198
2199 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2200 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2201 wwn2str(pwwn_buf, fcport->pwwn);
2202 BFA_LOG(KERN_INFO, bfad, log_level,
2203 "Base port disabled: WWN = %s\n", pwwn_buf);
2204 break;
2205
2206 case BFA_FCPORT_SM_STOP:
2207 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2208 break;
2209
2210 case BFA_FCPORT_SM_HWFAIL:
2211 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2212 break;
2213
2214 default:
2215 bfa_sm_fault(fcport->bfa, event);
2216 }
2217}
2218
2219static void
2220bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2221 enum bfa_fcport_sm_event event)
2222{
2223 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2224 char pwwn_buf[BFA_STRING_32];
2225 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2226
2227 bfa_trc(fcport->bfa, event);
2228
2229 switch (event) {
2230 case BFA_FCPORT_SM_LINKUP:
2231 bfa_fcport_update_linkinfo(fcport);
2232 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2233 bfa_assert(fcport->event_cbfn);
2234 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2235 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2236 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2237
2238 bfa_trc(fcport->bfa,
2239 pevent->link_state.vc_fcf.fcf.fipenabled);
2240 bfa_trc(fcport->bfa,
2241 pevent->link_state.vc_fcf.fcf.fipfailed);
2242
2243 if (pevent->link_state.vc_fcf.fcf.fipfailed)
2244 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2245 BFA_PL_EID_FIP_FCF_DISC, 0,
2246 "FIP FCF Discovery Failed");
2247 else
2248 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2249 BFA_PL_EID_FIP_FCF_DISC, 0,
2250 "FIP FCF Discovered");
2251 }
2252
2253 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2254 wwn2str(pwwn_buf, fcport->pwwn);
2255 BFA_LOG(KERN_INFO, bfad, log_level,
2256 "Base port online: WWN = %s\n", pwwn_buf);
2257 break;
2258
2259 case BFA_FCPORT_SM_LINKDOWN:
2260 /**
2261 * Possible to get link down event.
2262 */
2263 break;
2264
2265 case BFA_FCPORT_SM_ENABLE:
2266 /**
2267 * Already enabled.
2268 */
2269 break;
2270
2271 case BFA_FCPORT_SM_DISABLE:
2272 if (bfa_fcport_send_disable(fcport))
2273 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2274 else
2275 bfa_sm_set_state(fcport,
2276 bfa_fcport_sm_disabling_qwait);
2277
2278 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2279 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2280 wwn2str(pwwn_buf, fcport->pwwn);
2281 BFA_LOG(KERN_INFO, bfad, log_level,
2282 "Base port disabled: WWN = %s\n", pwwn_buf);
2283 break;
2284
2285 case BFA_FCPORT_SM_STOP:
2286 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2287 break;
2288
2289 case BFA_FCPORT_SM_HWFAIL:
2290 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2291 break;
2292
2293 default:
2294 bfa_sm_fault(fcport->bfa, event);
2295 }
2296}
2297
2298static void
2299bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2300 enum bfa_fcport_sm_event event)
2301{
2302 char pwwn_buf[BFA_STRING_32];
2303 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2304
2305 bfa_trc(fcport->bfa, event);
2306
2307 switch (event) {
2308 case BFA_FCPORT_SM_ENABLE:
2309 /**
2310 * Already enabled.
2311 */
2312 break;
2313
2314 case BFA_FCPORT_SM_DISABLE:
2315 if (bfa_fcport_send_disable(fcport))
2316 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2317 else
2318 bfa_sm_set_state(fcport,
2319 bfa_fcport_sm_disabling_qwait);
2320
2321 bfa_fcport_reset_linkinfo(fcport);
2322 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2323 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2324 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2325 wwn2str(pwwn_buf, fcport->pwwn);
2326 BFA_LOG(KERN_INFO, bfad, log_level,
2327 "Base port offline: WWN = %s\n", pwwn_buf);
2328 BFA_LOG(KERN_INFO, bfad, log_level,
2329 "Base port disabled: WWN = %s\n", pwwn_buf);
2330 break;
2331
2332 case BFA_FCPORT_SM_LINKDOWN:
2333 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2334 bfa_fcport_reset_linkinfo(fcport);
2335 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2336 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2337 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2338 wwn2str(pwwn_buf, fcport->pwwn);
2339 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2340 BFA_LOG(KERN_INFO, bfad, log_level,
2341 "Base port offline: WWN = %s\n", pwwn_buf);
2342 else
2343 BFA_LOG(KERN_ERR, bfad, log_level,
2344 "Base port (WWN = %s) "
2345 "lost fabric connectivity\n", pwwn_buf);
2346 break;
2347
2348 case BFA_FCPORT_SM_STOP:
2349 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2350 bfa_fcport_reset_linkinfo(fcport);
2351 wwn2str(pwwn_buf, fcport->pwwn);
2352 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2353 BFA_LOG(KERN_INFO, bfad, log_level,
2354 "Base port offline: WWN = %s\n", pwwn_buf);
2355 else
2356 BFA_LOG(KERN_ERR, bfad, log_level,
2357 "Base port (WWN = %s) "
2358 "lost fabric connectivity\n", pwwn_buf);
2359 break;
2360
2361 case BFA_FCPORT_SM_HWFAIL:
2362 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2363 bfa_fcport_reset_linkinfo(fcport);
2364 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2365 wwn2str(pwwn_buf, fcport->pwwn);
2366 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2367 BFA_LOG(KERN_INFO, bfad, log_level,
2368 "Base port offline: WWN = %s\n", pwwn_buf);
2369 else
2370 BFA_LOG(KERN_ERR, bfad, log_level,
2371 "Base port (WWN = %s) "
2372 "lost fabric connectivity\n", pwwn_buf);
2373 break;
2374
2375 default:
2376 bfa_sm_fault(fcport->bfa, event);
2377 }
2378}
2379
2380static void
2381bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2382 enum bfa_fcport_sm_event event)
2383{
2384 bfa_trc(fcport->bfa, event);
2385
2386 switch (event) {
2387 case BFA_FCPORT_SM_QRESUME:
2388 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2389 bfa_fcport_send_disable(fcport);
2390 break;
2391
2392 case BFA_FCPORT_SM_STOP:
2393 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2394 bfa_reqq_wcancel(&fcport->reqq_wait);
2395 break;
2396
2397 case BFA_FCPORT_SM_ENABLE:
2398 bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2399 break;
2400
2401 case BFA_FCPORT_SM_DISABLE:
2402 /**
2403 * Already being disabled.
2404 */
2405 break;
2406
2407 case BFA_FCPORT_SM_LINKUP:
2408 case BFA_FCPORT_SM_LINKDOWN:
2409 /**
2410 * Possible to get link events when doing back-to-back
2411 * enable/disables.
2412 */
2413 break;
2414
2415 case BFA_FCPORT_SM_HWFAIL:
2416 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2417 bfa_reqq_wcancel(&fcport->reqq_wait);
2418 break;
2419
2420 default:
2421 bfa_sm_fault(fcport->bfa, event);
2422 }
2423}
2424
2425static void
2426bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2427 enum bfa_fcport_sm_event event)
2428{
2429 bfa_trc(fcport->bfa, event);
2430
2431 switch (event) {
2432 case BFA_FCPORT_SM_QRESUME:
2433 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2434 bfa_fcport_send_disable(fcport);
2435 if (bfa_fcport_send_enable(fcport))
2436 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2437 else
2438 bfa_sm_set_state(fcport,
2439 bfa_fcport_sm_enabling_qwait);
2440 break;
2441
2442 case BFA_FCPORT_SM_STOP:
2443 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2444 bfa_reqq_wcancel(&fcport->reqq_wait);
2445 break;
2446
2447 case BFA_FCPORT_SM_ENABLE:
2448 break;
2449
2450 case BFA_FCPORT_SM_DISABLE:
2451 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2452 break;
2453
2454 case BFA_FCPORT_SM_LINKUP:
2455 case BFA_FCPORT_SM_LINKDOWN:
2456 /**
2457 * Possible to get link events when doing back-to-back
2458 * enable/disables.
2459 */
2460 break;
2461
2462 case BFA_FCPORT_SM_HWFAIL:
2463 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2464 bfa_reqq_wcancel(&fcport->reqq_wait);
2465 break;
2466
2467 default:
2468 bfa_sm_fault(fcport->bfa, event);
2469 }
2470}
2471
2472static void
2473bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2474 enum bfa_fcport_sm_event event)
2475{
2476 char pwwn_buf[BFA_STRING_32];
2477 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2478 bfa_trc(fcport->bfa, event);
2479
2480 switch (event) {
2481 case BFA_FCPORT_SM_FWRSP:
2482 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2483 break;
2484
2485 case BFA_FCPORT_SM_DISABLE:
2486 /**
2487 * Already being disabled.
2488 */
2489 break;
2490
2491 case BFA_FCPORT_SM_ENABLE:
2492 if (bfa_fcport_send_enable(fcport))
2493 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2494 else
2495 bfa_sm_set_state(fcport,
2496 bfa_fcport_sm_enabling_qwait);
2497
2498 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2499 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2500 wwn2str(pwwn_buf, fcport->pwwn);
2501 BFA_LOG(KERN_INFO, bfad, log_level,
2502 "Base port enabled: WWN = %s\n", pwwn_buf);
2503 break;
2504
2505 case BFA_FCPORT_SM_STOP:
2506 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2507 break;
2508
2509 case BFA_FCPORT_SM_LINKUP:
2510 case BFA_FCPORT_SM_LINKDOWN:
2511 /**
2512 * Possible to get link events when doing back-to-back
2513 * enable/disables.
2514 */
2515 break;
2516
2517 case BFA_FCPORT_SM_HWFAIL:
2518 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2519 break;
2520
2521 default:
2522 bfa_sm_fault(fcport->bfa, event);
2523 }
2524}
2525
2526static void
2527bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2528 enum bfa_fcport_sm_event event)
2529{
2530 char pwwn_buf[BFA_STRING_32];
2531 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2532 bfa_trc(fcport->bfa, event);
2533
2534 switch (event) {
2535 case BFA_FCPORT_SM_START:
2536 /**
2537 * Ignore start event for a port that is disabled.
2538 */
2539 break;
2540
2541 case BFA_FCPORT_SM_STOP:
2542 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2543 break;
2544
2545 case BFA_FCPORT_SM_ENABLE:
2546 if (bfa_fcport_send_enable(fcport))
2547 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2548 else
2549 bfa_sm_set_state(fcport,
2550 bfa_fcport_sm_enabling_qwait);
2551
2552 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2553 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2554 wwn2str(pwwn_buf, fcport->pwwn);
2555 BFA_LOG(KERN_INFO, bfad, log_level,
2556 "Base port enabled: WWN = %s\n", pwwn_buf);
2557 break;
2558
2559 case BFA_FCPORT_SM_DISABLE:
2560 /**
2561 * Already disabled.
2562 */
2563 break;
2564
2565 case BFA_FCPORT_SM_HWFAIL:
2566 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2567 break;
2568
2569 default:
2570 bfa_sm_fault(fcport->bfa, event);
2571 }
2572}
2573
2574static void
2575bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2576 enum bfa_fcport_sm_event event)
2577{
2578 bfa_trc(fcport->bfa, event);
2579
2580 switch (event) {
2581 case BFA_FCPORT_SM_START:
2582 if (bfa_fcport_send_enable(fcport))
2583 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2584 else
2585 bfa_sm_set_state(fcport,
2586 bfa_fcport_sm_enabling_qwait);
2587 break;
2588
2589 default:
2590 /**
2591 * Ignore all other events.
2592 */
2593 ;
2594 }
2595}
2596
2597/**
2598 * Port is enabled. IOC is down/failed.
2599 */
2600static void
2601bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2602 enum bfa_fcport_sm_event event)
2603{
2604 bfa_trc(fcport->bfa, event);
2605
2606 switch (event) {
2607 case BFA_FCPORT_SM_START:
2608 if (bfa_fcport_send_enable(fcport))
2609 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2610 else
2611 bfa_sm_set_state(fcport,
2612 bfa_fcport_sm_enabling_qwait);
2613 break;
2614
2615 default:
2616 /**
2617 * Ignore all events.
2618 */
2619 ;
2620 }
2621}
2622
2623/**
2624 * Port is disabled. IOC is down/failed.
2625 */
2626static void
2627bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2628 enum bfa_fcport_sm_event event)
2629{
2630 bfa_trc(fcport->bfa, event);
2631
2632 switch (event) {
2633 case BFA_FCPORT_SM_START:
2634 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2635 break;
2636
2637 case BFA_FCPORT_SM_ENABLE:
2638 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2639 break;
2640
2641 default:
2642 /**
2643 * Ignore all events.
2644 */
2645 ;
2646 }
2647}
2648
2649/**
2650 * Link state is down
2651 */
2652static void
2653bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2654 enum bfa_fcport_ln_sm_event event)
2655{
2656 bfa_trc(ln->fcport->bfa, event);
2657
2658 switch (event) {
2659 case BFA_FCPORT_LN_SM_LINKUP:
2660 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2661 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2662 break;
2663
2664 default:
2665 bfa_sm_fault(ln->fcport->bfa, event);
2666 }
2667}
2668
2669/**
2670 * Link state is waiting for down notification
2671 */
2672static void
2673bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2674 enum bfa_fcport_ln_sm_event event)
2675{
2676 bfa_trc(ln->fcport->bfa, event);
2677
2678 switch (event) {
2679 case BFA_FCPORT_LN_SM_LINKUP:
2680 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2681 break;
2682
2683 case BFA_FCPORT_LN_SM_NOTIFICATION:
2684 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2685 break;
2686
2687 default:
2688 bfa_sm_fault(ln->fcport->bfa, event);
2689 }
2690}
2691
2692/**
2693 * Link state is waiting for down notification and there is a pending up
2694 */
2695static void
2696bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2697 enum bfa_fcport_ln_sm_event event)
2698{
2699 bfa_trc(ln->fcport->bfa, event);
2700
2701 switch (event) {
2702 case BFA_FCPORT_LN_SM_LINKDOWN:
2703 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2704 break;
2705
2706 case BFA_FCPORT_LN_SM_NOTIFICATION:
2707 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2708 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2709 break;
2710
2711 default:
2712 bfa_sm_fault(ln->fcport->bfa, event);
2713 }
2714}
2715
2716/**
2717 * Link state is up
2718 */
2719static void
2720bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2721 enum bfa_fcport_ln_sm_event event)
2722{
2723 bfa_trc(ln->fcport->bfa, event);
2724
2725 switch (event) {
2726 case BFA_FCPORT_LN_SM_LINKDOWN:
2727 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2728 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2729 break;
2730
2731 default:
2732 bfa_sm_fault(ln->fcport->bfa, event);
2733 }
2734}
2735
2736/**
2737 * Link state is waiting for up notification
2738 */
2739static void
2740bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2741 enum bfa_fcport_ln_sm_event event)
2742{
2743 bfa_trc(ln->fcport->bfa, event);
2744
2745 switch (event) {
2746 case BFA_FCPORT_LN_SM_LINKDOWN:
2747 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2748 break;
2749
2750 case BFA_FCPORT_LN_SM_NOTIFICATION:
2751 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2752 break;
2753
2754 default:
2755 bfa_sm_fault(ln->fcport->bfa, event);
2756 }
2757}
2758
2759/**
2760 * Link state is waiting for up notification and there is a pending down
2761 */
2762static void
2763bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2764 enum bfa_fcport_ln_sm_event event)
2765{
2766 bfa_trc(ln->fcport->bfa, event);
2767
2768 switch (event) {
2769 case BFA_FCPORT_LN_SM_LINKUP:
2770 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2771 break;
2772
2773 case BFA_FCPORT_LN_SM_NOTIFICATION:
2774 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2775 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2776 break;
2777
2778 default:
2779 bfa_sm_fault(ln->fcport->bfa, event);
2780 }
2781}
2782
2783/**
2784 * Link state is waiting for up notification and there are pending down and up
2785 */
2786static void
2787bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2788 enum bfa_fcport_ln_sm_event event)
2789{
2790 bfa_trc(ln->fcport->bfa, event);
2791
2792 switch (event) {
2793 case BFA_FCPORT_LN_SM_LINKDOWN:
2794 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2795 break;
2796
2797 case BFA_FCPORT_LN_SM_NOTIFICATION:
2798 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2799 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2800 break;
2801
2802 default:
2803 bfa_sm_fault(ln->fcport->bfa, event);
2804 }
2805}
2806
2807
2808
2809/**
2810 * hal_port_private
2811 */
2812
2813static void
2814__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2815{
2816 struct bfa_fcport_ln_s *ln = cbarg;
2817
2818 if (complete)
2819 ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2820 else
2821 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2822}
2823
2824/**
2825 * Send SCN notification to upper layers.
2826 * trunk - false if caller is fcport to ignore fcport event in trunked mode
2827 */
2828static void
2829bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2830 bfa_boolean_t trunk)
2831{
2832 if (fcport->cfg.trunked && !trunk)
2833 return;
2834
2835 switch (event) {
2836 case BFA_PORT_LINKUP:
2837 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2838 break;
2839 case BFA_PORT_LINKDOWN:
2840 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2841 break;
2842 default:
2843 bfa_assert(0);
2844 }
2845}
2846
2847static void
2848bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2849{
2850 struct bfa_fcport_s *fcport = ln->fcport;
2851
2852 if (fcport->bfa->fcs) {
2853 fcport->event_cbfn(fcport->event_cbarg, event);
2854 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2855 } else {
2856 ln->ln_event = event;
2857 bfa_cb_queue(fcport->bfa, &ln->ln_qe,
2858 __bfa_cb_fcport_event, ln);
2859 }
2860}
2861
2862#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2863 BFA_CACHELINE_SZ))
2864
2865static void
2866bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
2867 u32 *dm_len)
2868{
2869 *dm_len += FCPORT_STATS_DMA_SZ;
2870}
2871
2872static void
2873bfa_fcport_qresume(void *cbarg)
2874{
2875 struct bfa_fcport_s *fcport = cbarg;
2876
2877 bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
2878}
2879
2880static void
2881bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
2882{
2883 u8 *dm_kva;
2884 u64 dm_pa;
2885
2886 dm_kva = bfa_meminfo_dma_virt(meminfo);
2887 dm_pa = bfa_meminfo_dma_phys(meminfo);
2888
2889 fcport->stats_kva = dm_kva;
2890 fcport->stats_pa = dm_pa;
2891 fcport->stats = (union bfa_fcport_stats_u *) dm_kva;
2892
2893 dm_kva += FCPORT_STATS_DMA_SZ;
2894 dm_pa += FCPORT_STATS_DMA_SZ;
2895
2896 bfa_meminfo_dma_virt(meminfo) = dm_kva;
2897 bfa_meminfo_dma_phys(meminfo) = dm_pa;
2898}
2899
2900/**
2901 * Memory initialization.
2902 */
2903static void
2904bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2905 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
2906{
2907 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2908 struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
2909 struct bfa_fcport_ln_s *ln = &fcport->ln;
2910 struct bfa_timeval_s tv;
2911
2912 bfa_os_memset(fcport, 0, sizeof(struct bfa_fcport_s));
2913 fcport->bfa = bfa;
2914 ln->fcport = fcport;
2915
2916 bfa_fcport_mem_claim(fcport, meminfo);
2917
2918 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
2919 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2920
2921 /**
2922 * initialize time stamp for stats reset
2923 */
2924 bfa_os_gettimeofday(&tv);
2925 fcport->stats_reset_time = tv.tv_sec;
2926
2927 /**
2928 * initialize and set default configuration
2929 */
2930 port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
2931 port_cfg->speed = BFA_PORT_SPEED_AUTO;
2932 port_cfg->trunked = BFA_FALSE;
2933 port_cfg->maxfrsize = 0;
2934
2935 port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
2936
2937 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
2938}
2939
2940static void
2941bfa_fcport_detach(struct bfa_s *bfa)
2942{
2943}
2944
2945/**
2946 * Called when IOC is ready.
2947 */
2948static void
2949bfa_fcport_start(struct bfa_s *bfa)
2950{
2951 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
2952}
2953
2954/**
2955 * Called before IOC is stopped.
2956 */
2957static void
2958bfa_fcport_stop(struct bfa_s *bfa)
2959{
2960 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
2961 bfa_trunk_iocdisable(bfa);
2962}
2963
2964/**
2965 * Called when IOC failure is detected.
2966 */
2967static void
2968bfa_fcport_iocdisable(struct bfa_s *bfa)
2969{
2970 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2971
2972 bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
2973 bfa_trunk_iocdisable(bfa);
2974}
2975
2976static void
2977bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
2978{
2979 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2980 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2981
2982 fcport->speed = pevent->link_state.speed;
2983 fcport->topology = pevent->link_state.topology;
2984
2985 if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)
2986 fcport->myalpa = 0;
2987
2988 /* QoS Details */
2989 bfa_os_assign(fcport->qos_attr, pevent->link_state.qos_attr);
2990 bfa_os_assign(fcport->qos_vc_attr,
2991 pevent->link_state.vc_fcf.qos_vc_attr);
2992
2993 /**
2994 * update trunk state if applicable
2995 */
2996 if (!fcport->cfg.trunked)
2997 trunk->attr.state = BFA_TRUNK_DISABLED;
2998
2999 /* update FCoE specific */
3000 fcport->fcoe_vlan = bfa_os_ntohs(pevent->link_state.vc_fcf.fcf.vlan);
3001
3002 bfa_trc(fcport->bfa, fcport->speed);
3003 bfa_trc(fcport->bfa, fcport->topology);
3004}
3005
3006static void
3007bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
3008{
3009 fcport->speed = BFA_PORT_SPEED_UNKNOWN;
3010 fcport->topology = BFA_PORT_TOPOLOGY_NONE;
3011}
3012
3013/**
3014 * Send port enable message to firmware.
3015 */
3016static bfa_boolean_t
3017bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
3018{
3019 struct bfi_fcport_enable_req_s *m;
3020
3021 /**
3022 * Increment message tag before queue check, so that responses to old
3023 * requests are discarded.
3024 */
3025 fcport->msgtag++;
3026
3027 /**
3028 * check for room in queue to send request now
3029 */
3030 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3031 if (!m) {
3032 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3033 &fcport->reqq_wait);
3034 return BFA_FALSE;
3035 }
3036
3037 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
3038 bfa_lpuid(fcport->bfa));
3039 m->nwwn = fcport->nwwn;
3040 m->pwwn = fcport->pwwn;
3041 m->port_cfg = fcport->cfg;
3042 m->msgtag = fcport->msgtag;
3043 m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize);
3044 bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
3045 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
3046 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
3047
3048 /**
3049 * queue I/O message to firmware
3050 */
3051 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3052 return BFA_TRUE;
3053}
3054
3055/**
3056 * Send port disable message to firmware.
3057 */
3058static bfa_boolean_t
3059bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3060{
3061 struct bfi_fcport_req_s *m;
3062
3063 /**
3064 * Increment message tag before queue check, so that responses to old
3065 * requests are discarded.
3066 */
3067 fcport->msgtag++;
3068
3069 /**
3070 * check for room in queue to send request now
3071 */
3072 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3073 if (!m) {
3074 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3075 &fcport->reqq_wait);
3076 return BFA_FALSE;
3077 }
3078
3079 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
3080 bfa_lpuid(fcport->bfa));
3081 m->msgtag = fcport->msgtag;
3082
3083 /**
3084 * queue I/O message to firmware
3085 */
3086 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3087
3088 return BFA_TRUE;
3089}
3090
3091static void
3092bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3093{
3094 fcport->pwwn = bfa_ioc_get_pwwn(&fcport->bfa->ioc);
3095 fcport->nwwn = bfa_ioc_get_nwwn(&fcport->bfa->ioc);
3096
3097 bfa_trc(fcport->bfa, fcport->pwwn);
3098 bfa_trc(fcport->bfa, fcport->nwwn);
3099}
3100
3101static void
3102bfa_fcport_send_txcredit(void *port_cbarg)
3103{
3104
3105 struct bfa_fcport_s *fcport = port_cbarg;
3106 struct bfi_fcport_set_svc_params_req_s *m;
3107
3108 /**
3109 * check for room in queue to send request now
3110 */
3111 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3112 if (!m) {
3113 bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
3114 return;
3115 }
3116
3117 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
3118 bfa_lpuid(fcport->bfa));
3119 m->tx_bbcredit = bfa_os_htons((u16)fcport->cfg.tx_bbcredit);
3120
3121 /**
3122 * queue I/O message to firmware
3123 */
3124 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3125}
3126
3127static void
3128bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3129 struct bfa_qos_stats_s *s)
3130{
3131 u32 *dip = (u32 *) d;
3132 u32 *sip = (u32 *) s;
3133 int i;
3134
3135 /* Now swap the 32 bit fields */
3136 for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
3137 dip[i] = bfa_os_ntohl(sip[i]);
3138}
3139
3140static void
3141bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3142 struct bfa_fcoe_stats_s *s)
3143{
3144 u32 *dip = (u32 *) d;
3145 u32 *sip = (u32 *) s;
3146 int i;
3147
3148 for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3149 i = i + 2) {
3150#ifdef __BIGENDIAN
3151 dip[i] = bfa_os_ntohl(sip[i]);
3152 dip[i + 1] = bfa_os_ntohl(sip[i + 1]);
3153#else
3154 dip[i] = bfa_os_ntohl(sip[i + 1]);
3155 dip[i + 1] = bfa_os_ntohl(sip[i]);
3156#endif
3157 }
3158}
3159
3160static void
3161__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3162{
3163 struct bfa_fcport_s *fcport = cbarg;
3164
3165 if (complete) {
3166 if (fcport->stats_status == BFA_STATUS_OK) {
3167 struct bfa_timeval_s tv;
3168
3169 /* Swap FC QoS or FCoE stats */
3170 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
3171 bfa_fcport_qos_stats_swap(
3172 &fcport->stats_ret->fcqos,
3173 &fcport->stats->fcqos);
3174 } else {
3175 bfa_fcport_fcoe_stats_swap(
3176 &fcport->stats_ret->fcoe,
3177 &fcport->stats->fcoe);
3178
3179 bfa_os_gettimeofday(&tv);
3180 fcport->stats_ret->fcoe.secs_reset =
3181 tv.tv_sec - fcport->stats_reset_time;
3182 }
3183 }
3184 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3185 } else {
3186 fcport->stats_busy = BFA_FALSE;
3187 fcport->stats_status = BFA_STATUS_OK;
3188 }
3189}
3190
3191static void
3192bfa_fcport_stats_get_timeout(void *cbarg)
3193{
3194 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3195
3196 bfa_trc(fcport->bfa, fcport->stats_qfull);
3197
3198 if (fcport->stats_qfull) {
3199 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3200 fcport->stats_qfull = BFA_FALSE;
3201 }
3202
3203 fcport->stats_status = BFA_STATUS_ETIMER;
3204 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
3205 fcport);
3206}
3207
3208static void
3209bfa_fcport_send_stats_get(void *cbarg)
3210{
3211 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3212 struct bfi_fcport_req_s *msg;
3213
3214 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3215
3216 if (!msg) {
3217 fcport->stats_qfull = BFA_TRUE;
3218 bfa_reqq_winit(&fcport->stats_reqq_wait,
3219 bfa_fcport_send_stats_get, fcport);
3220 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3221 &fcport->stats_reqq_wait);
3222 return;
3223 }
3224 fcport->stats_qfull = BFA_FALSE;
3225
3226 bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3227 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3228 bfa_lpuid(fcport->bfa));
3229 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3230}
3231
3232static void
3233__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3234{
3235 struct bfa_fcport_s *fcport = cbarg;
3236
3237 if (complete) {
3238 struct bfa_timeval_s tv;
3239
3240 /**
3241 * re-initialize time stamp for stats reset
3242 */
3243 bfa_os_gettimeofday(&tv);
3244 fcport->stats_reset_time = tv.tv_sec;
3245
3246 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3247 } else {
3248 fcport->stats_busy = BFA_FALSE;
3249 fcport->stats_status = BFA_STATUS_OK;
3250 }
3251}
3252
3253static void
3254bfa_fcport_stats_clr_timeout(void *cbarg)
3255{
3256 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3257
3258 bfa_trc(fcport->bfa, fcport->stats_qfull);
3259
3260 if (fcport->stats_qfull) {
3261 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3262 fcport->stats_qfull = BFA_FALSE;
3263 }
3264
3265 fcport->stats_status = BFA_STATUS_ETIMER;
3266 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3267 __bfa_cb_fcport_stats_clr, fcport);
3268}
3269
3270static void
3271bfa_fcport_send_stats_clear(void *cbarg)
3272{
3273 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3274 struct bfi_fcport_req_s *msg;
3275
3276 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3277
3278 if (!msg) {
3279 fcport->stats_qfull = BFA_TRUE;
3280 bfa_reqq_winit(&fcport->stats_reqq_wait,
3281 bfa_fcport_send_stats_clear, fcport);
3282 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3283 &fcport->stats_reqq_wait);
3284 return;
3285 }
3286 fcport->stats_qfull = BFA_FALSE;
3287
3288 bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3289 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3290 bfa_lpuid(fcport->bfa));
3291 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3292}
3293
3294/**
3295 * Handle trunk SCN event from firmware.
3296 */
3297static void
3298bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3299{
3300 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3301 struct bfi_fcport_trunk_link_s *tlink;
3302 struct bfa_trunk_link_attr_s *lattr;
3303 enum bfa_trunk_state state_prev;
3304 int i;
3305 int link_bm = 0;
3306
3307 bfa_trc(fcport->bfa, fcport->cfg.trunked);
3308 bfa_assert(scn->trunk_state == BFA_TRUNK_ONLINE ||
3309 scn->trunk_state == BFA_TRUNK_OFFLINE);
3310
3311 bfa_trc(fcport->bfa, trunk->attr.state);
3312 bfa_trc(fcport->bfa, scn->trunk_state);
3313 bfa_trc(fcport->bfa, scn->trunk_speed);
3314
3315 /**
3316 * Save off new state for trunk attribute query
3317 */
3318 state_prev = trunk->attr.state;
3319 if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3320 trunk->attr.state = scn->trunk_state;
3321 trunk->attr.speed = scn->trunk_speed;
3322 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3323 lattr = &trunk->attr.link_attr[i];
3324 tlink = &scn->tlink[i];
3325
3326 lattr->link_state = tlink->state;
3327 lattr->trunk_wwn = tlink->trunk_wwn;
3328 lattr->fctl = tlink->fctl;
3329 lattr->speed = tlink->speed;
3330 lattr->deskew = bfa_os_ntohl(tlink->deskew);
3331
3332 if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3333 fcport->speed = tlink->speed;
3334 fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3335 link_bm |= 1 << i;
3336 }
3337
3338 bfa_trc(fcport->bfa, lattr->link_state);
3339 bfa_trc(fcport->bfa, lattr->trunk_wwn);
3340 bfa_trc(fcport->bfa, lattr->fctl);
3341 bfa_trc(fcport->bfa, lattr->speed);
3342 bfa_trc(fcport->bfa, lattr->deskew);
3343 }
3344
3345 switch (link_bm) {
3346 case 3:
3347 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3348 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3349 break;
3350 case 2:
3351 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3352 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3353 break;
3354 case 1:
3355 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3356 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3357 break;
3358 default:
3359 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3360 BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3361 }
3362
3363 /**
3364 * Notify upper layers if trunk state changed.
3365 */
3366 if ((state_prev != trunk->attr.state) ||
3367 (scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3368 bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3369 BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3370 }
3371}
3372
3373static void
3374bfa_trunk_iocdisable(struct bfa_s *bfa)
3375{
3376 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3377 int i = 0;
3378
3379 /**
3380 * In trunked mode, notify upper layers that link is down
3381 */
3382 if (fcport->cfg.trunked) {
3383 if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3384 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3385
3386 fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3387 fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3388 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3389 fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3390 fcport->trunk.attr.link_attr[i].fctl =
3391 BFA_TRUNK_LINK_FCTL_NORMAL;
3392 fcport->trunk.attr.link_attr[i].link_state =
3393 BFA_TRUNK_LINK_STATE_DN_LINKDN;
3394 fcport->trunk.attr.link_attr[i].speed =
3395 BFA_PORT_SPEED_UNKNOWN;
3396 fcport->trunk.attr.link_attr[i].deskew = 0;
3397 }
3398 }
3399}
3400
3401
3402
3403/**
3404 * hal_port_public
3405 */
3406
3407/**
3408 * Called to initialize port attributes
3409 */
3410void
3411bfa_fcport_init(struct bfa_s *bfa)
3412{
3413 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3414
3415 /**
3416 * Initialize port attributes from IOC hardware data.
3417 */
3418 bfa_fcport_set_wwns(fcport);
3419 if (fcport->cfg.maxfrsize == 0)
3420 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3421 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3422 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3423
3424 bfa_assert(fcport->cfg.maxfrsize);
3425 bfa_assert(fcport->cfg.rx_bbcredit);
3426 bfa_assert(fcport->speed_sup);
3427}
3428
3429/**
3430 * Firmware message handler.
3431 */
3432void
3433bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3434{
3435 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3436 union bfi_fcport_i2h_msg_u i2hmsg;
3437
3438 i2hmsg.msg = msg;
3439 fcport->event_arg.i2hmsg = i2hmsg;
3440
3441 bfa_trc(bfa, msg->mhdr.msg_id);
3442 bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3443
3444 switch (msg->mhdr.msg_id) {
3445 case BFI_FCPORT_I2H_ENABLE_RSP:
3446 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3447 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3448 break;
3449
3450 case BFI_FCPORT_I2H_DISABLE_RSP:
3451 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3452 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3453 break;
3454
3455 case BFI_FCPORT_I2H_EVENT:
3456 if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3457 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3458 else
3459 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
3460 break;
3461
3462 case BFI_FCPORT_I2H_TRUNK_SCN:
3463 bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3464 break;
3465
3466 case BFI_FCPORT_I2H_STATS_GET_RSP:
3467 /*
3468 * check for timer pop before processing the rsp
3469 */
3470 if (fcport->stats_busy == BFA_FALSE ||
3471 fcport->stats_status == BFA_STATUS_ETIMER)
3472 break;
3473
3474 bfa_timer_stop(&fcport->timer);
3475 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3476 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3477 __bfa_cb_fcport_stats_get, fcport);
3478 break;
3479
3480 case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3481 /*
3482 * check for timer pop before processing the rsp
3483 */
3484 if (fcport->stats_busy == BFA_FALSE ||
3485 fcport->stats_status == BFA_STATUS_ETIMER)
3486 break;
3487
3488 bfa_timer_stop(&fcport->timer);
3489 fcport->stats_status = BFA_STATUS_OK;
3490 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3491 __bfa_cb_fcport_stats_clr, fcport);
3492 break;
3493
3494 case BFI_FCPORT_I2H_ENABLE_AEN:
3495 bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3496 break;
3497
3498 case BFI_FCPORT_I2H_DISABLE_AEN:
3499 bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3500 break;
3501
3502 default:
3503 bfa_assert(0);
3504 break;
3505 }
3506}
3507
3508
3509
3510/**
3511 * hal_port_api
3512 */
3513
3514/**
3515 * Registered callback for port events.
3516 */
3517void
3518bfa_fcport_event_register(struct bfa_s *bfa,
3519 void (*cbfn) (void *cbarg,
3520 enum bfa_port_linkstate event),
3521 void *cbarg)
3522{
3523 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3524
3525 fcport->event_cbfn = cbfn;
3526 fcport->event_cbarg = cbarg;
3527}
3528
3529bfa_status_t
3530bfa_fcport_enable(struct bfa_s *bfa)
3531{
3532 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3533
3534 if (bfa_ioc_is_disabled(&bfa->ioc))
3535 return BFA_STATUS_IOC_DISABLED;
3536
3537 if (fcport->diag_busy)
3538 return BFA_STATUS_DIAG_BUSY;
3539
3540 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3541 return BFA_STATUS_OK;
3542}
3543
3544bfa_status_t
3545bfa_fcport_disable(struct bfa_s *bfa)
3546{
3547
3548 if (bfa_ioc_is_disabled(&bfa->ioc))
3549 return BFA_STATUS_IOC_DISABLED;
3550
3551 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3552 return BFA_STATUS_OK;
3553}
3554
3555/**
3556 * Configure port speed.
3557 */
3558bfa_status_t
3559bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3560{
3561 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3562
3563 bfa_trc(bfa, speed);
3564
3565 if (fcport->cfg.trunked == BFA_TRUE)
3566 return BFA_STATUS_TRUNK_ENABLED;
3567 if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3568 bfa_trc(bfa, fcport->speed_sup);
3569 return BFA_STATUS_UNSUPP_SPEED;
3570 }
3571
3572 fcport->cfg.speed = speed;
3573
3574 return BFA_STATUS_OK;
3575}
3576
3577/**
3578 * Get current speed.
3579 */
3580enum bfa_port_speed
3581bfa_fcport_get_speed(struct bfa_s *bfa)
3582{
3583 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3584
3585 return fcport->speed;
3586}
3587
3588/**
3589 * Configure port topology.
3590 */
3591bfa_status_t
3592bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3593{
3594 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3595
3596 bfa_trc(bfa, topology);
3597 bfa_trc(bfa, fcport->cfg.topology);
3598
3599 switch (topology) {
3600 case BFA_PORT_TOPOLOGY_P2P:
3601 case BFA_PORT_TOPOLOGY_LOOP:
3602 case BFA_PORT_TOPOLOGY_AUTO:
3603 break;
3604
3605 default:
3606 return BFA_STATUS_EINVAL;
3607 }
3608
3609 fcport->cfg.topology = topology;
3610 return BFA_STATUS_OK;
3611}
3612
3613/**
3614 * Get current topology.
3615 */
3616enum bfa_port_topology
3617bfa_fcport_get_topology(struct bfa_s *bfa)
3618{
3619 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3620
3621 return fcport->topology;
3622}
3623
3624bfa_status_t
3625bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3626{
3627 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3628
3629 bfa_trc(bfa, alpa);
3630 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3631 bfa_trc(bfa, fcport->cfg.hardalpa);
3632
3633 fcport->cfg.cfg_hardalpa = BFA_TRUE;
3634 fcport->cfg.hardalpa = alpa;
3635
3636 return BFA_STATUS_OK;
3637}
3638
3639bfa_status_t
3640bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3641{
3642 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3643
3644 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3645 bfa_trc(bfa, fcport->cfg.hardalpa);
3646
3647 fcport->cfg.cfg_hardalpa = BFA_FALSE;
3648 return BFA_STATUS_OK;
3649}
3650
3651bfa_boolean_t
3652bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3653{
3654 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3655
3656 *alpa = fcport->cfg.hardalpa;
3657 return fcport->cfg.cfg_hardalpa;
3658}
3659
3660u8
3661bfa_fcport_get_myalpa(struct bfa_s *bfa)
3662{
3663 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3664
3665 return fcport->myalpa;
3666}
3667
3668bfa_status_t
3669bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3670{
3671 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3672
3673 bfa_trc(bfa, maxfrsize);
3674 bfa_trc(bfa, fcport->cfg.maxfrsize);
3675
3676 /* with in range */
3677 if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3678 return BFA_STATUS_INVLD_DFSZ;
3679
3680 /* power of 2, if not the max frame size of 2112 */
3681 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3682 return BFA_STATUS_INVLD_DFSZ;
3683
3684 fcport->cfg.maxfrsize = maxfrsize;
3685 return BFA_STATUS_OK;
3686}
3687
3688u16
3689bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3690{
3691 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3692
3693 return fcport->cfg.maxfrsize;
3694}
3695
3696u8
3697bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3698{
3699 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3700
3701 return fcport->cfg.rx_bbcredit;
3702}
3703
3704void
3705bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
3706{
3707 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3708
3709 fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
3710 bfa_fcport_send_txcredit(fcport);
3711}
3712
3713/**
3714 * Get port attributes.
3715 */
3716
3717wwn_t
3718bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3719{
3720 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3721 if (node)
3722 return fcport->nwwn;
3723 else
3724 return fcport->pwwn;
3725}
3726
3727void
3728bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3729{
3730 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3731
3732 bfa_os_memset(attr, 0, sizeof(struct bfa_port_attr_s));
3733
3734 attr->nwwn = fcport->nwwn;
3735 attr->pwwn = fcport->pwwn;
3736
3737 attr->factorypwwn = bfa_ioc_get_mfg_pwwn(&bfa->ioc);
3738 attr->factorynwwn = bfa_ioc_get_mfg_nwwn(&bfa->ioc);
3739
3740 bfa_os_memcpy(&attr->pport_cfg, &fcport->cfg,
3741 sizeof(struct bfa_port_cfg_s));
3742 /* speed attributes */
3743 attr->pport_cfg.speed = fcport->cfg.speed;
3744 attr->speed_supported = fcport->speed_sup;
3745 attr->speed = fcport->speed;
3746 attr->cos_supported = FC_CLASS_3;
3747
3748 /* topology attributes */
3749 attr->pport_cfg.topology = fcport->cfg.topology;
3750 attr->topology = fcport->topology;
3751 attr->pport_cfg.trunked = fcport->cfg.trunked;
3752
3753 /* beacon attributes */
3754 attr->beacon = fcport->beacon;
3755 attr->link_e2e_beacon = fcport->link_e2e_beacon;
3756 attr->plog_enabled = bfa_plog_get_setting(fcport->bfa->plog);
3757 attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa);
3758
3759 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
3760 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
3761 attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
3762 if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
3763 attr->port_state = BFA_PORT_ST_IOCDIS;
3764 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
3765 attr->port_state = BFA_PORT_ST_FWMISMATCH;
3766
3767 /* FCoE vlan */
3768 attr->fcoe_vlan = fcport->fcoe_vlan;
3769}
3770
3771#define BFA_FCPORT_STATS_TOV 1000
3772
3773/**
3774 * Fetch port statistics (FCQoS or FCoE).
3775 */
3776bfa_status_t
3777bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3778 bfa_cb_port_t cbfn, void *cbarg)
3779{
3780 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3781
3782 if (fcport->stats_busy) {
3783 bfa_trc(bfa, fcport->stats_busy);
3784 return BFA_STATUS_DEVBUSY;
3785 }
3786
3787 fcport->stats_busy = BFA_TRUE;
3788 fcport->stats_ret = stats;
3789 fcport->stats_cbfn = cbfn;
3790 fcport->stats_cbarg = cbarg;
3791
3792 bfa_fcport_send_stats_get(fcport);
3793
3794 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
3795 fcport, BFA_FCPORT_STATS_TOV);
3796 return BFA_STATUS_OK;
3797}
3798
3799/**
3800 * Reset port statistics (FCQoS or FCoE).
3801 */
3802bfa_status_t
3803bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3804{
3805 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3806
3807 if (fcport->stats_busy) {
3808 bfa_trc(bfa, fcport->stats_busy);
3809 return BFA_STATUS_DEVBUSY;
3810 }
3811
3812 fcport->stats_busy = BFA_TRUE;
3813 fcport->stats_cbfn = cbfn;
3814 fcport->stats_cbarg = cbarg;
3815
3816 bfa_fcport_send_stats_clear(fcport);
3817
3818 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
3819 fcport, BFA_FCPORT_STATS_TOV);
3820 return BFA_STATUS_OK;
3821}
3822
3823/**
3824 * Fetch FCQoS port statistics
3825 */
3826bfa_status_t
3827bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3828 bfa_cb_port_t cbfn, void *cbarg)
3829{
3830 /* Meaningful only for FC mode */
3831 bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
3832
3833 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
3834}
3835
3836/**
3837 * Reset FCoE port statistics
3838 */
3839bfa_status_t
3840bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3841{
3842 /* Meaningful only for FC mode */
3843 bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
3844
3845 return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
3846}
3847
3848/**
3849 * Fetch FCQoS port statistics
3850 */
3851bfa_status_t
3852bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3853 bfa_cb_port_t cbfn, void *cbarg)
3854{
3855 /* Meaningful only for FCoE mode */
3856 bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
3857
3858 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
3859}
3860
3861/**
3862 * Reset FCoE port statistics
3863 */
3864bfa_status_t
3865bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3866{
3867 /* Meaningful only for FCoE mode */
3868 bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
3869
3870 return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
3871}
3872
3873void
3874bfa_fcport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr)
3875{
3876 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3877
3878 qos_attr->state = fcport->qos_attr.state;
3879 qos_attr->total_bb_cr = bfa_os_ntohl(fcport->qos_attr.total_bb_cr);
3880}
3881
3882void
3883bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
3884 struct bfa_qos_vc_attr_s *qos_vc_attr)
3885{
3886 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3887 struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
3888 u32 i = 0;
3889
3890 qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count);
3891 qos_vc_attr->shared_credit = bfa_os_ntohs(bfa_vc_attr->shared_credit);
3892 qos_vc_attr->elp_opmode_flags =
3893 bfa_os_ntohl(bfa_vc_attr->elp_opmode_flags);
3894
3895 /* Individual VC info */
3896 while (i < qos_vc_attr->total_vc_count) {
3897 qos_vc_attr->vc_info[i].vc_credit =
3898 bfa_vc_attr->vc_info[i].vc_credit;
3899 qos_vc_attr->vc_info[i].borrow_credit =
3900 bfa_vc_attr->vc_info[i].borrow_credit;
3901 qos_vc_attr->vc_info[i].priority =
3902 bfa_vc_attr->vc_info[i].priority;
3903 ++i;
3904 }
3905}
3906
3907/**
3908 * Fetch port attributes.
3909 */
3910bfa_boolean_t
3911bfa_fcport_is_disabled(struct bfa_s *bfa)
3912{
3913 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3914
3915 return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
3916 BFA_PORT_ST_DISABLED;
3917
3918}
3919
3920bfa_boolean_t
3921bfa_fcport_is_ratelim(struct bfa_s *bfa)
3922{
3923 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3924
3925 return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
3926
3927}
3928
3929void
3930bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
3931{
3932 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3933 enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
3934
3935 bfa_trc(bfa, on_off);
3936 bfa_trc(bfa, fcport->cfg.qos_enabled);
3937
3938 bfa_trc(bfa, ioc_type);
3939
3940 if (ioc_type == BFA_IOC_TYPE_FC) {
3941 fcport->cfg.qos_enabled = on_off;
3942 /**
3943 * Notify fcpim of the change in QoS state
3944 */
3945 bfa_fcpim_update_ioredirect(bfa);
3946 }
3947}
3948
3949void
3950bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off)
3951{
3952 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3953
3954 bfa_trc(bfa, on_off);
3955 bfa_trc(bfa, fcport->cfg.ratelimit);
3956
3957 fcport->cfg.ratelimit = on_off;
3958 if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
3959 fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
3960}
3961
3962/**
3963 * Configure default minimum ratelim speed
3964 */
3965bfa_status_t
3966bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3967{
3968 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3969
3970 bfa_trc(bfa, speed);
3971
3972 /* Auto and speeds greater than the supported speed, are invalid */
3973 if ((speed == BFA_PORT_SPEED_AUTO) || (speed > fcport->speed_sup)) {
3974 bfa_trc(bfa, fcport->speed_sup);
3975 return BFA_STATUS_UNSUPP_SPEED;
3976 }
3977
3978 fcport->cfg.trl_def_speed = speed;
3979
3980 return BFA_STATUS_OK;
3981}
3982
3983/**
3984 * Get default minimum ratelim speed
3985 */
3986enum bfa_port_speed
3987bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
3988{
3989 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3990
3991 bfa_trc(bfa, fcport->cfg.trl_def_speed);
3992 return fcport->cfg.trl_def_speed;
3993
3994}
3995void
3996bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status)
3997{
3998 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3999
4000 bfa_trc(bfa, status);
4001 bfa_trc(bfa, fcport->diag_busy);
4002
4003 fcport->diag_busy = status;
4004}
4005
4006void
4007bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
4008 bfa_boolean_t link_e2e_beacon)
4009{
4010 struct bfa_s *bfa = dev;
4011 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4012
4013 bfa_trc(bfa, beacon);
4014 bfa_trc(bfa, link_e2e_beacon);
4015 bfa_trc(bfa, fcport->beacon);
4016 bfa_trc(bfa, fcport->link_e2e_beacon);
4017
4018 fcport->beacon = beacon;
4019 fcport->link_e2e_beacon = link_e2e_beacon;
4020}
4021
4022bfa_boolean_t
4023bfa_fcport_is_linkup(struct bfa_s *bfa)
4024{
4025 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4026
4027 return (!fcport->cfg.trunked &&
4028 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
4029 (fcport->cfg.trunked &&
4030 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
4031}
4032
4033bfa_boolean_t
4034bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
4035{
4036 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4037
4038 return fcport->cfg.qos_enabled;
4039}
4040
4041bfa_status_t
4042bfa_trunk_get_attr(struct bfa_s *bfa, struct bfa_trunk_attr_s *attr)
4043
4044{
4045 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4046 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
4047
4048 bfa_trc(bfa, fcport->cfg.trunked);
4049 bfa_trc(bfa, trunk->attr.state);
4050 *attr = trunk->attr;
4051 attr->port_id = bfa_lps_get_base_pid(bfa);
4052
4053 return BFA_STATUS_OK;
4054}
4055
4056void
4057bfa_trunk_enable_cfg(struct bfa_s *bfa)
4058{
4059 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4060 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
4061
4062 bfa_trc(bfa, 1);
4063 trunk->attr.state = BFA_TRUNK_OFFLINE;
4064 fcport->cfg.trunked = BFA_TRUE;
4065}
4066
4067bfa_status_t
4068bfa_trunk_enable(struct bfa_s *bfa)
4069{
4070 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4071 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
4072
4073 bfa_trc(bfa, 1);
4074
4075 trunk->attr.state = BFA_TRUNK_OFFLINE;
4076 bfa_fcport_disable(bfa);
4077 fcport->cfg.trunked = BFA_TRUE;
4078 bfa_fcport_enable(bfa);
4079
4080 return BFA_STATUS_OK;
4081}
4082
4083bfa_status_t
4084bfa_trunk_disable(struct bfa_s *bfa)
4085{
4086 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4087 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
4088
4089 bfa_trc(bfa, 0);
4090 trunk->attr.state = BFA_TRUNK_DISABLED;
4091 bfa_fcport_disable(bfa);
4092 fcport->cfg.trunked = BFA_FALSE;
4093 bfa_fcport_enable(bfa);
4094 return BFA_STATUS_OK;
4095}
4096
4097
4098/**
4099 * Rport State machine functions
4100 */
4101/**
4102 * Beginning state, only online event expected.
4103 */
4104static void
4105bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
4106{
4107 bfa_trc(rp->bfa, rp->rport_tag);
4108 bfa_trc(rp->bfa, event);
4109
4110 switch (event) {
4111 case BFA_RPORT_SM_CREATE:
4112 bfa_stats(rp, sm_un_cr);
4113 bfa_sm_set_state(rp, bfa_rport_sm_created);
4114 break;
4115
4116 default:
4117 bfa_stats(rp, sm_un_unexp);
4118 bfa_sm_fault(rp->bfa, event);
4119 }
4120}
4121
4122static void
4123bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
4124{
4125 bfa_trc(rp->bfa, rp->rport_tag);
4126 bfa_trc(rp->bfa, event);
4127
4128 switch (event) {
4129 case BFA_RPORT_SM_ONLINE:
4130 bfa_stats(rp, sm_cr_on);
4131 if (bfa_rport_send_fwcreate(rp))
4132 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4133 else
4134 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4135 break;
4136
4137 case BFA_RPORT_SM_DELETE:
4138 bfa_stats(rp, sm_cr_del);
4139 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4140 bfa_rport_free(rp);
4141 break;
4142
4143 case BFA_RPORT_SM_HWFAIL:
4144 bfa_stats(rp, sm_cr_hwf);
4145 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4146 break;
4147
4148 default:
4149 bfa_stats(rp, sm_cr_unexp);
4150 bfa_sm_fault(rp->bfa, event);
4151 }
4152}
4153
4154/**
4155 * Waiting for rport create response from firmware.
4156 */
4157static void
4158bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
4159{
4160 bfa_trc(rp->bfa, rp->rport_tag);
4161 bfa_trc(rp->bfa, event);
4162
4163 switch (event) {
4164 case BFA_RPORT_SM_FWRSP:
4165 bfa_stats(rp, sm_fwc_rsp);
4166 bfa_sm_set_state(rp, bfa_rport_sm_online);
4167 bfa_rport_online_cb(rp);
4168 break;
4169
4170 case BFA_RPORT_SM_DELETE:
4171 bfa_stats(rp, sm_fwc_del);
4172 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4173 break;
4174
4175 case BFA_RPORT_SM_OFFLINE:
4176 bfa_stats(rp, sm_fwc_off);
4177 bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
4178 break;
4179
4180 case BFA_RPORT_SM_HWFAIL:
4181 bfa_stats(rp, sm_fwc_hwf);
4182 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4183 break;
4184
4185 default:
4186 bfa_stats(rp, sm_fwc_unexp);
4187 bfa_sm_fault(rp->bfa, event);
4188 }
4189}
4190
4191/**
4192 * Request queue is full, awaiting queue resume to send create request.
4193 */
4194static void
4195bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4196{
4197 bfa_trc(rp->bfa, rp->rport_tag);
4198 bfa_trc(rp->bfa, event);
4199
4200 switch (event) {
4201 case BFA_RPORT_SM_QRESUME:
4202 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4203 bfa_rport_send_fwcreate(rp);
4204 break;
4205
4206 case BFA_RPORT_SM_DELETE:
4207 bfa_stats(rp, sm_fwc_del);
4208 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4209 bfa_reqq_wcancel(&rp->reqq_wait);
4210 bfa_rport_free(rp);
4211 break;
4212
4213 case BFA_RPORT_SM_OFFLINE:
4214 bfa_stats(rp, sm_fwc_off);
4215 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4216 bfa_reqq_wcancel(&rp->reqq_wait);
4217 bfa_rport_offline_cb(rp);
4218 break;
4219
4220 case BFA_RPORT_SM_HWFAIL:
4221 bfa_stats(rp, sm_fwc_hwf);
4222 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4223 bfa_reqq_wcancel(&rp->reqq_wait);
4224 break;
4225
4226 default:
4227 bfa_stats(rp, sm_fwc_unexp);
4228 bfa_sm_fault(rp->bfa, event);
4229 }
4230}
4231
4232/**
4233 * Online state - normal parking state.
4234 */
4235static void
4236bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
4237{
4238 struct bfi_rport_qos_scn_s *qos_scn;
4239
4240 bfa_trc(rp->bfa, rp->rport_tag);
4241 bfa_trc(rp->bfa, event);
4242
4243 switch (event) {
4244 case BFA_RPORT_SM_OFFLINE:
4245 bfa_stats(rp, sm_on_off);
4246 if (bfa_rport_send_fwdelete(rp))
4247 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4248 else
4249 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4250 break;
4251
4252 case BFA_RPORT_SM_DELETE:
4253 bfa_stats(rp, sm_on_del);
4254 if (bfa_rport_send_fwdelete(rp))
4255 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4256 else
4257 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4258 break;
4259
4260 case BFA_RPORT_SM_HWFAIL:
4261 bfa_stats(rp, sm_on_hwf);
4262 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4263 break;
4264
4265 case BFA_RPORT_SM_SET_SPEED:
4266 bfa_rport_send_fwspeed(rp);
4267 break;
4268
4269 case BFA_RPORT_SM_QOS_SCN:
4270 qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
4271 rp->qos_attr = qos_scn->new_qos_attr;
4272 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
4273 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
4274 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
4275 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
4276
4277 qos_scn->old_qos_attr.qos_flow_id =
4278 bfa_os_ntohl(qos_scn->old_qos_attr.qos_flow_id);
4279 qos_scn->new_qos_attr.qos_flow_id =
4280 bfa_os_ntohl(qos_scn->new_qos_attr.qos_flow_id);
4281
4282 if (qos_scn->old_qos_attr.qos_flow_id !=
4283 qos_scn->new_qos_attr.qos_flow_id)
4284 bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
4285 qos_scn->old_qos_attr,
4286 qos_scn->new_qos_attr);
4287 if (qos_scn->old_qos_attr.qos_priority !=
4288 qos_scn->new_qos_attr.qos_priority)
4289 bfa_cb_rport_qos_scn_prio(rp->rport_drv,
4290 qos_scn->old_qos_attr,
4291 qos_scn->new_qos_attr);
4292 break;
4293
4294 default:
4295 bfa_stats(rp, sm_on_unexp);
4296 bfa_sm_fault(rp->bfa, event);
4297 }
4298}
4299
4300/**
4301 * Firmware rport is being deleted - awaiting f/w response.
4302 */
4303static void
4304bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
4305{
4306 bfa_trc(rp->bfa, rp->rport_tag);
4307 bfa_trc(rp->bfa, event);
4308
4309 switch (event) {
4310 case BFA_RPORT_SM_FWRSP:
4311 bfa_stats(rp, sm_fwd_rsp);
4312 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4313 bfa_rport_offline_cb(rp);
4314 break;
4315
4316 case BFA_RPORT_SM_DELETE:
4317 bfa_stats(rp, sm_fwd_del);
4318 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4319 break;
4320
4321 case BFA_RPORT_SM_HWFAIL:
4322 bfa_stats(rp, sm_fwd_hwf);
4323 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4324 bfa_rport_offline_cb(rp);
4325 break;
4326
4327 default:
4328 bfa_stats(rp, sm_fwd_unexp);
4329 bfa_sm_fault(rp->bfa, event);
4330 }
4331}
4332
4333static void
4334bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4335{
4336 bfa_trc(rp->bfa, rp->rport_tag);
4337 bfa_trc(rp->bfa, event);
4338
4339 switch (event) {
4340 case BFA_RPORT_SM_QRESUME:
4341 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4342 bfa_rport_send_fwdelete(rp);
4343 break;
4344
4345 case BFA_RPORT_SM_DELETE:
4346 bfa_stats(rp, sm_fwd_del);
4347 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4348 break;
4349
4350 case BFA_RPORT_SM_HWFAIL:
4351 bfa_stats(rp, sm_fwd_hwf);
4352 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4353 bfa_reqq_wcancel(&rp->reqq_wait);
4354 bfa_rport_offline_cb(rp);
4355 break;
4356
4357 default:
4358 bfa_stats(rp, sm_fwd_unexp);
4359 bfa_sm_fault(rp->bfa, event);
4360 }
4361}
4362
4363/**
4364 * Offline state.
4365 */
4366static void
4367bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4368{
4369 bfa_trc(rp->bfa, rp->rport_tag);
4370 bfa_trc(rp->bfa, event);
4371
4372 switch (event) {
4373 case BFA_RPORT_SM_DELETE:
4374 bfa_stats(rp, sm_off_del);
4375 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4376 bfa_rport_free(rp);
4377 break;
4378
4379 case BFA_RPORT_SM_ONLINE:
4380 bfa_stats(rp, sm_off_on);
4381 if (bfa_rport_send_fwcreate(rp))
4382 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4383 else
4384 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4385 break;
4386
4387 case BFA_RPORT_SM_HWFAIL:
4388 bfa_stats(rp, sm_off_hwf);
4389 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4390 break;
4391
4392 default:
4393 bfa_stats(rp, sm_off_unexp);
4394 bfa_sm_fault(rp->bfa, event);
4395 }
4396}
4397
4398/**
4399 * Rport is deleted, waiting for firmware response to delete.
4400 */
4401static void
4402bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4403{
4404 bfa_trc(rp->bfa, rp->rport_tag);
4405 bfa_trc(rp->bfa, event);
4406
4407 switch (event) {
4408 case BFA_RPORT_SM_FWRSP:
4409 bfa_stats(rp, sm_del_fwrsp);
4410 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4411 bfa_rport_free(rp);
4412 break;
4413
4414 case BFA_RPORT_SM_HWFAIL:
4415 bfa_stats(rp, sm_del_hwf);
4416 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4417 bfa_rport_free(rp);
4418 break;
4419
4420 default:
4421 bfa_sm_fault(rp->bfa, event);
4422 }
4423}
4424
4425static void
4426bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4427{
4428 bfa_trc(rp->bfa, rp->rport_tag);
4429 bfa_trc(rp->bfa, event);
4430
4431 switch (event) {
4432 case BFA_RPORT_SM_QRESUME:
4433 bfa_stats(rp, sm_del_fwrsp);
4434 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4435 bfa_rport_send_fwdelete(rp);
4436 break;
4437
4438 case BFA_RPORT_SM_HWFAIL:
4439 bfa_stats(rp, sm_del_hwf);
4440 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4441 bfa_reqq_wcancel(&rp->reqq_wait);
4442 bfa_rport_free(rp);
4443 break;
4444
4445 default:
4446 bfa_sm_fault(rp->bfa, event);
4447 }
4448}
4449
4450/**
4451 * Waiting for rport create response from firmware. A delete is pending.
4452 */
4453static void
4454bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4455 enum bfa_rport_event event)
4456{
4457 bfa_trc(rp->bfa, rp->rport_tag);
4458 bfa_trc(rp->bfa, event);
4459
4460 switch (event) {
4461 case BFA_RPORT_SM_FWRSP:
4462 bfa_stats(rp, sm_delp_fwrsp);
4463 if (bfa_rport_send_fwdelete(rp))
4464 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4465 else
4466 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4467 break;
4468
4469 case BFA_RPORT_SM_HWFAIL:
4470 bfa_stats(rp, sm_delp_hwf);
4471 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4472 bfa_rport_free(rp);
4473 break;
4474
4475 default:
4476 bfa_stats(rp, sm_delp_unexp);
4477 bfa_sm_fault(rp->bfa, event);
4478 }
4479}
4480
4481/**
4482 * Waiting for rport create response from firmware. Rport offline is pending.
4483 */
4484static void
4485bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4486 enum bfa_rport_event event)
4487{
4488 bfa_trc(rp->bfa, rp->rport_tag);
4489 bfa_trc(rp->bfa, event);
4490
4491 switch (event) {
4492 case BFA_RPORT_SM_FWRSP:
4493 bfa_stats(rp, sm_offp_fwrsp);
4494 if (bfa_rport_send_fwdelete(rp))
4495 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4496 else
4497 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4498 break;
4499
4500 case BFA_RPORT_SM_DELETE:
4501 bfa_stats(rp, sm_offp_del);
4502 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4503 break;
4504
4505 case BFA_RPORT_SM_HWFAIL:
4506 bfa_stats(rp, sm_offp_hwf);
4507 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4508 break;
4509
4510 default:
4511 bfa_stats(rp, sm_offp_unexp);
4512 bfa_sm_fault(rp->bfa, event);
4513 }
4514}
4515
4516/**
4517 * IOC h/w failed.
4518 */
4519static void
4520bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4521{
4522 bfa_trc(rp->bfa, rp->rport_tag);
4523 bfa_trc(rp->bfa, event);
4524
4525 switch (event) {
4526 case BFA_RPORT_SM_OFFLINE:
4527 bfa_stats(rp, sm_iocd_off);
4528 bfa_rport_offline_cb(rp);
4529 break;
4530
4531 case BFA_RPORT_SM_DELETE:
4532 bfa_stats(rp, sm_iocd_del);
4533 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4534 bfa_rport_free(rp);
4535 break;
4536
4537 case BFA_RPORT_SM_ONLINE:
4538 bfa_stats(rp, sm_iocd_on);
4539 if (bfa_rport_send_fwcreate(rp))
4540 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4541 else
4542 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4543 break;
4544
4545 case BFA_RPORT_SM_HWFAIL:
4546 break;
4547
4548 default:
4549 bfa_stats(rp, sm_iocd_unexp);
4550 bfa_sm_fault(rp->bfa, event);
4551 }
4552}
4553
4554
4555
4556/**
4557 * bfa_rport_private BFA rport private functions
4558 */
4559
4560static void
4561__bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4562{
4563 struct bfa_rport_s *rp = cbarg;
4564
4565 if (complete)
4566 bfa_cb_rport_online(rp->rport_drv);
4567}
4568
4569static void
4570__bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4571{
4572 struct bfa_rport_s *rp = cbarg;
4573
4574 if (complete)
4575 bfa_cb_rport_offline(rp->rport_drv);
4576}
4577
4578static void
4579bfa_rport_qresume(void *cbarg)
4580{
4581 struct bfa_rport_s *rp = cbarg;
4582
4583 bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4584}
4585
4586static void
4587bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
4588 u32 *dm_len)
4589{
4590 if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4591 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4592
4593 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s);
4594}
4595
4596static void
4597bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4598 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
4599{
4600 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4601 struct bfa_rport_s *rp;
4602 u16 i;
4603
4604 INIT_LIST_HEAD(&mod->rp_free_q);
4605 INIT_LIST_HEAD(&mod->rp_active_q);
4606
4607 rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo);
4608 mod->rps_list = rp;
4609 mod->num_rports = cfg->fwcfg.num_rports;
4610
4611 bfa_assert(mod->num_rports &&
4612 !(mod->num_rports & (mod->num_rports - 1)));
4613
4614 for (i = 0; i < mod->num_rports; i++, rp++) {
4615 bfa_os_memset(rp, 0, sizeof(struct bfa_rport_s));
4616 rp->bfa = bfa;
4617 rp->rport_tag = i;
4618 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4619
4620 /**
4621 * - is unused
4622 */
4623 if (i)
4624 list_add_tail(&rp->qe, &mod->rp_free_q);
4625
4626 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4627 }
4628
4629 /**
4630 * consume memory
4631 */
4632 bfa_meminfo_kva(meminfo) = (u8 *) rp;
4633}
4634
4635static void
4636bfa_rport_detach(struct bfa_s *bfa)
4637{
4638}
4639
4640static void
4641bfa_rport_start(struct bfa_s *bfa)
4642{
4643}
4644
4645static void
4646bfa_rport_stop(struct bfa_s *bfa)
4647{
4648}
4649
4650static void
4651bfa_rport_iocdisable(struct bfa_s *bfa)
4652{
4653 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4654 struct bfa_rport_s *rport;
4655 struct list_head *qe, *qen;
4656
4657 list_for_each_safe(qe, qen, &mod->rp_active_q) {
4658 rport = (struct bfa_rport_s *) qe;
4659 bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4660 }
4661}
4662
4663static struct bfa_rport_s *
4664bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4665{
4666 struct bfa_rport_s *rport;
4667
4668 bfa_q_deq(&mod->rp_free_q, &rport);
4669 if (rport)
4670 list_add_tail(&rport->qe, &mod->rp_active_q);
4671
4672 return rport;
4673}
4674
4675static void
4676bfa_rport_free(struct bfa_rport_s *rport)
4677{
4678 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4679
4680 bfa_assert(bfa_q_is_on_q(&mod->rp_active_q, rport));
4681 list_del(&rport->qe);
4682 list_add_tail(&rport->qe, &mod->rp_free_q);
4683}
4684
4685static bfa_boolean_t
4686bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4687{
4688 struct bfi_rport_create_req_s *m;
4689
4690 /**
4691 * check for room in queue to send request now
4692 */
4693 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4694 if (!m) {
4695 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4696 return BFA_FALSE;
4697 }
4698
4699 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4700 bfa_lpuid(rp->bfa));
4701 m->bfa_handle = rp->rport_tag;
4702 m->max_frmsz = bfa_os_htons(rp->rport_info.max_frmsz);
4703 m->pid = rp->rport_info.pid;
4704 m->lp_tag = rp->rport_info.lp_tag;
4705 m->local_pid = rp->rport_info.local_pid;
4706 m->fc_class = rp->rport_info.fc_class;
4707 m->vf_en = rp->rport_info.vf_en;
4708 m->vf_id = rp->rport_info.vf_id;
4709 m->cisc = rp->rport_info.cisc;
4710
4711 /**
4712 * queue I/O message to firmware
4713 */
4714 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4715 return BFA_TRUE;
4716}
4717
4718static bfa_boolean_t
4719bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4720{
4721 struct bfi_rport_delete_req_s *m;
4722
4723 /**
4724 * check for room in queue to send request now
4725 */
4726 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4727 if (!m) {
4728 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4729 return BFA_FALSE;
4730 }
4731
4732 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
4733 bfa_lpuid(rp->bfa));
4734 m->fw_handle = rp->fw_handle;
4735
4736 /**
4737 * queue I/O message to firmware
4738 */
4739 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4740 return BFA_TRUE;
4741}
4742
4743static bfa_boolean_t
4744bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4745{
4746 struct bfa_rport_speed_req_s *m;
4747
4748 /**
4749 * check for room in queue to send request now
4750 */
4751 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4752 if (!m) {
4753 bfa_trc(rp->bfa, rp->rport_info.speed);
4754 return BFA_FALSE;
4755 }
4756
4757 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
4758 bfa_lpuid(rp->bfa));
4759 m->fw_handle = rp->fw_handle;
4760 m->speed = (u8)rp->rport_info.speed;
4761
4762 /**
4763 * queue I/O message to firmware
4764 */
4765 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4766 return BFA_TRUE;
4767}
4768
4769
4770
4771/**
4772 * bfa_rport_public
4773 */
4774
4775/**
4776 * Rport interrupt processing.
4777 */
4778void
4779bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4780{
4781 union bfi_rport_i2h_msg_u msg;
4782 struct bfa_rport_s *rp;
4783
4784 bfa_trc(bfa, m->mhdr.msg_id);
4785
4786 msg.msg = m;
4787
4788 switch (m->mhdr.msg_id) {
4789 case BFI_RPORT_I2H_CREATE_RSP:
4790 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4791 rp->fw_handle = msg.create_rsp->fw_handle;
4792 rp->qos_attr = msg.create_rsp->qos_attr;
4793 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
4794 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4795 break;
4796
4797 case BFI_RPORT_I2H_DELETE_RSP:
4798 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
4799 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
4800 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4801 break;
4802
4803 case BFI_RPORT_I2H_QOS_SCN:
4804 rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
4805 rp->event_arg.fw_msg = msg.qos_scn_evt;
4806 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
4807 break;
4808
4809 default:
4810 bfa_trc(bfa, m->mhdr.msg_id);
4811 bfa_assert(0);
4812 }
4813}
4814
4815
4816
4817/**
4818 * bfa_rport_api
4819 */
4820
4821struct bfa_rport_s *
4822bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
4823{
4824 struct bfa_rport_s *rp;
4825
4826 rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
4827
4828 if (rp == NULL)
4829 return NULL;
4830
4831 rp->bfa = bfa;
4832 rp->rport_drv = rport_drv;
4833 bfa_rport_clear_stats(rp);
4834
4835 bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
4836 bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
4837
4838 return rp;
4839}
4840
4841void
4842bfa_rport_delete(struct bfa_rport_s *rport)
4843{
4844 bfa_sm_send_event(rport, BFA_RPORT_SM_DELETE);
4845}
4846
4847void
4848bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
4849{
4850 bfa_assert(rport_info->max_frmsz != 0);
4851
4852 /**
4853 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
4854 * responses. Default to minimum size.
4855 */
4856 if (rport_info->max_frmsz == 0) {
4857 bfa_trc(rport->bfa, rport->rport_tag);
4858 rport_info->max_frmsz = FC_MIN_PDUSZ;
4859 }
4860
4861 bfa_os_assign(rport->rport_info, *rport_info);
4862 bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
4863}
4864
4865void
4866bfa_rport_offline(struct bfa_rport_s *rport)
4867{
4868 bfa_sm_send_event(rport, BFA_RPORT_SM_OFFLINE);
4869}
4870
4871void
4872bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
4873{
4874 bfa_assert(speed != 0);
4875 bfa_assert(speed != BFA_PORT_SPEED_AUTO);
4876
4877 rport->rport_info.speed = speed;
4878 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
4879}
4880
4881void
4882bfa_rport_get_stats(struct bfa_rport_s *rport,
4883 struct bfa_rport_hal_stats_s *stats)
4884{
4885 *stats = rport->stats;
4886}
4887
4888void
4889bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
4890 struct bfa_rport_qos_attr_s *qos_attr)
4891{
4892 qos_attr->qos_priority = rport->qos_attr.qos_priority;
4893 qos_attr->qos_flow_id = bfa_os_ntohl(rport->qos_attr.qos_flow_id);
4894
4895}
4896
4897void
4898bfa_rport_clear_stats(struct bfa_rport_s *rport)
4899{
4900 bfa_os_memset(&rport->stats, 0, sizeof(rport->stats));
4901}
4902
4903
4904/**
4905 * SGPG related functions
4906 */
4907
4908/**
4909 * Compute and return memory needed by FCP(im) module.
4910 */
4911static void
4912bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
4913 u32 *dm_len)
4914{
4915 if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
4916 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
4917
4918 *km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s);
4919 *dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s);
4920}
4921
4922
4923static void
4924bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4925 struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev)
4926{
4927 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4928 int i;
4929 struct bfa_sgpg_s *hsgpg;
4930 struct bfi_sgpg_s *sgpg;
4931 u64 align_len;
4932
4933 union {
4934 u64 pa;
4935 union bfi_addr_u addr;
4936 } sgpg_pa, sgpg_pa_tmp;
4937
4938 INIT_LIST_HEAD(&mod->sgpg_q);
4939 INIT_LIST_HEAD(&mod->sgpg_wait_q);
4940
4941 bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
4942
4943 mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
4944 mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo);
4945 align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa);
4946 mod->sgpg_arr_pa += align_len;
4947 mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) +
4948 align_len);
4949 mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) +
4950 align_len);
4951
4952 hsgpg = mod->hsgpg_arr;
4953 sgpg = mod->sgpg_arr;
4954 sgpg_pa.pa = mod->sgpg_arr_pa;
4955 mod->free_sgpgs = mod->num_sgpgs;
4956
4957 bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)));
4958
4959 for (i = 0; i < mod->num_sgpgs; i++) {
4960 bfa_os_memset(hsgpg, 0, sizeof(*hsgpg));
4961 bfa_os_memset(sgpg, 0, sizeof(*sgpg));
4962
4963 hsgpg->sgpg = sgpg;
4964 sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
4965 hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
4966 list_add_tail(&hsgpg->qe, &mod->sgpg_q);
4967
4968 hsgpg++;
4969 sgpg++;
4970 sgpg_pa.pa += sizeof(struct bfi_sgpg_s);
4971 }
4972
4973 bfa_meminfo_kva(minfo) = (u8 *) hsgpg;
4974 bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg;
4975 bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa;
4976}
4977
4978static void
4979bfa_sgpg_detach(struct bfa_s *bfa)
4980{
4981}
4982
4983static void
4984bfa_sgpg_start(struct bfa_s *bfa)
4985{
4986}
4987
4988static void
4989bfa_sgpg_stop(struct bfa_s *bfa)
4990{
4991}
4992
4993static void
4994bfa_sgpg_iocdisable(struct bfa_s *bfa)
4995{
4996}
4997
4998
4999
5000/**
5001 * hal_sgpg_public BFA SGPG public functions
5002 */
5003
5004bfa_status_t
5005bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
5006{
5007 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5008 struct bfa_sgpg_s *hsgpg;
5009 int i;
5010
5011 bfa_trc_fp(bfa, nsgpgs);
5012
5013 if (mod->free_sgpgs < nsgpgs)
5014 return BFA_STATUS_ENOMEM;
5015
5016 for (i = 0; i < nsgpgs; i++) {
5017 bfa_q_deq(&mod->sgpg_q, &hsgpg);
5018 bfa_assert(hsgpg);
5019 list_add_tail(&hsgpg->qe, sgpg_q);
5020 }
5021
5022 mod->free_sgpgs -= nsgpgs;
5023 return BFA_STATUS_OK;
5024}
5025
5026void
5027bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
5028{
5029 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5030 struct bfa_sgpg_wqe_s *wqe;
5031
5032 bfa_trc_fp(bfa, nsgpg);
5033
5034 mod->free_sgpgs += nsgpg;
5035 bfa_assert(mod->free_sgpgs <= mod->num_sgpgs);
5036
5037 list_splice_tail_init(sgpg_q, &mod->sgpg_q);
5038
5039 if (list_empty(&mod->sgpg_wait_q))
5040 return;
5041
5042 /**
5043 * satisfy as many waiting requests as possible
5044 */
5045 do {
5046 wqe = bfa_q_first(&mod->sgpg_wait_q);
5047 if (mod->free_sgpgs < wqe->nsgpg)
5048 nsgpg = mod->free_sgpgs;
5049 else
5050 nsgpg = wqe->nsgpg;
5051 bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
5052 wqe->nsgpg -= nsgpg;
5053 if (wqe->nsgpg == 0) {
5054 list_del(&wqe->qe);
5055 wqe->cbfn(wqe->cbarg);
5056 }
5057 } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
5058}
5059
5060void
5061bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
5062{
5063 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5064
5065 bfa_assert(nsgpg > 0);
5066 bfa_assert(nsgpg > mod->free_sgpgs);
5067
5068 wqe->nsgpg_total = wqe->nsgpg = nsgpg;
5069
5070 /**
5071 * allocate any left to this one first
5072 */
5073 if (mod->free_sgpgs) {
5074 /**
5075 * no one else is waiting for SGPG
5076 */
5077 bfa_assert(list_empty(&mod->sgpg_wait_q));
5078 list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
5079 wqe->nsgpg -= mod->free_sgpgs;
5080 mod->free_sgpgs = 0;
5081 }
5082
5083 list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
5084}
5085
5086void
5087bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
5088{
5089 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5090
5091 bfa_assert(bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
5092 list_del(&wqe->qe);
5093
5094 if (wqe->nsgpg_total != wqe->nsgpg)
5095 bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
5096 wqe->nsgpg_total - wqe->nsgpg);
5097}
5098
5099void
5100bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
5101 void *cbarg)
5102{
5103 INIT_LIST_HEAD(&wqe->sgpg_q);
5104 wqe->cbfn = cbfn;
5105 wqe->cbarg = cbarg;
5106}
5107
5108/**
5109 * UF related functions
5110 */
5111/*
5112 *****************************************************************************
5113 * Internal functions
5114 *****************************************************************************
5115 */
5116static void
5117__bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
5118{
5119 struct bfa_uf_s *uf = cbarg;
5120 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
5121
5122 if (complete)
5123 ufm->ufrecv(ufm->cbarg, uf);
5124}
5125
5126static void
5127claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
5128{
5129 u32 uf_pb_tot_sz;
5130
5131 ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi);
5132 ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi);
5133 uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs),
5134 BFA_DMA_ALIGN_SZ);
5135
5136 bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
5137 bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
5138
5139 bfa_os_memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
5140}
5141
5142static void
5143claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
5144{
5145 struct bfi_uf_buf_post_s *uf_bp_msg;
5146 struct bfi_sge_s *sge;
5147 union bfi_addr_u sga_zero = { {0} };
5148 u16 i;
5149 u16 buf_len;
5150
5151 ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi);
5152 uf_bp_msg = ufm->uf_buf_posts;
5153
5154 for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
5155 i++, uf_bp_msg++) {
5156 bfa_os_memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
5157
5158 uf_bp_msg->buf_tag = i;
5159 buf_len = sizeof(struct bfa_uf_buf_s);
5160 uf_bp_msg->buf_len = bfa_os_htons(buf_len);
5161 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
5162 bfa_lpuid(ufm->bfa));
5163
5164 sge = uf_bp_msg->sge;
5165 sge[0].sg_len = buf_len;
5166 sge[0].flags = BFI_SGE_DATA_LAST;
5167 bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i));
5168 bfa_sge_to_be(sge);
5169
5170 sge[1].sg_len = buf_len;
5171 sge[1].flags = BFI_SGE_PGDLEN;
5172 sge[1].sga = sga_zero;
5173 bfa_sge_to_be(&sge[1]);
5174 }
5175
5176 /**
5177 * advance pointer beyond consumed memory
5178 */
5179 bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
5180}
5181
5182static void
5183claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
5184{
5185 u16 i;
5186 struct bfa_uf_s *uf;
5187
5188 /*
5189 * Claim block of memory for UF list
5190 */
5191 ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi);
5192
5193 /*
5194 * Initialize UFs and queue it in UF free queue
5195 */
5196 for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
5197 bfa_os_memset(uf, 0, sizeof(struct bfa_uf_s));
5198 uf->bfa = ufm->bfa;
5199 uf->uf_tag = i;
5200 uf->pb_len = sizeof(struct bfa_uf_buf_s);
5201 uf->buf_kva = (void *)&ufm->uf_pbs_kva[i];
5202 uf->buf_pa = ufm_pbs_pa(ufm, i);
5203 list_add_tail(&uf->qe, &ufm->uf_free_q);
5204 }
5205
5206 /**
5207 * advance memory pointer
5208 */
5209 bfa_meminfo_kva(mi) = (u8 *) uf;
5210}
5211
5212static void
5213uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
5214{
5215 claim_uf_pbs(ufm, mi);
5216 claim_ufs(ufm, mi);
5217 claim_uf_post_msgs(ufm, mi);
5218}
5219
5220static void
5221bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
5222{
5223 u32 num_ufs = cfg->fwcfg.num_uf_bufs;
5224
5225 /*
5226 * dma-able memory for UF posted bufs
5227 */
5228 *dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs),
5229 BFA_DMA_ALIGN_SZ);
5230
5231 /*
5232 * kernel Virtual memory for UFs and UF buf post msg copies
5233 */
5234 *ndm_len += sizeof(struct bfa_uf_s) * num_ufs;
5235 *ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs;
5236}
5237
5238static void
5239bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5240 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
5241{
5242 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5243
5244 bfa_os_memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
5245 ufm->bfa = bfa;
5246 ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
5247 INIT_LIST_HEAD(&ufm->uf_free_q);
5248 INIT_LIST_HEAD(&ufm->uf_posted_q);
5249
5250 uf_mem_claim(ufm, meminfo);
5251}
5252
5253static void
5254bfa_uf_detach(struct bfa_s *bfa)
5255{
5256}
5257
5258static struct bfa_uf_s *
5259bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
5260{
5261 struct bfa_uf_s *uf;
5262
5263 bfa_q_deq(&uf_mod->uf_free_q, &uf);
5264 return uf;
5265}
5266
5267static void
5268bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
5269{
5270 list_add_tail(&uf->qe, &uf_mod->uf_free_q);
5271}
5272
5273static bfa_status_t
5274bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
5275{
5276 struct bfi_uf_buf_post_s *uf_post_msg;
5277
5278 uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
5279 if (!uf_post_msg)
5280 return BFA_STATUS_FAILED;
5281
5282 bfa_os_memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
5283 sizeof(struct bfi_uf_buf_post_s));
5284 bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP);
5285
5286 bfa_trc(ufm->bfa, uf->uf_tag);
5287
5288 list_add_tail(&uf->qe, &ufm->uf_posted_q);
5289 return BFA_STATUS_OK;
5290}
5291
5292static void
5293bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
5294{
5295 struct bfa_uf_s *uf;
5296
5297 while ((uf = bfa_uf_get(uf_mod)) != NULL) {
5298 if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
5299 break;
5300 }
5301}
5302
5303static void
5304uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
5305{
5306 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5307 u16 uf_tag = m->buf_tag;
5308 struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag];
5309 struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
5310 u8 *buf = &uf_buf->d[0];
5311 struct fchs_s *fchs;
5312
5313 m->frm_len = bfa_os_ntohs(m->frm_len);
5314 m->xfr_len = bfa_os_ntohs(m->xfr_len);
5315
5316 fchs = (struct fchs_s *)uf_buf;
5317
5318 list_del(&uf->qe); /* dequeue from posted queue */
5319
5320 uf->data_ptr = buf;
5321 uf->data_len = m->xfr_len;
5322
5323 bfa_assert(uf->data_len >= sizeof(struct fchs_s));
5324
5325 if (uf->data_len == sizeof(struct fchs_s)) {
5326 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
5327 uf->data_len, (struct fchs_s *)buf);
5328 } else {
5329 u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
5330 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
5331 BFA_PL_EID_RX, uf->data_len,
5332 (struct fchs_s *)buf, pld_w0);
5333 }
5334
5335 if (bfa->fcs)
5336 __bfa_cb_uf_recv(uf, BFA_TRUE);
5337 else
5338 bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
5339}
5340
5341static void
5342bfa_uf_stop(struct bfa_s *bfa)
5343{
5344}
5345
5346static void
5347bfa_uf_iocdisable(struct bfa_s *bfa)
5348{
5349 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5350 struct bfa_uf_s *uf;
5351 struct list_head *qe, *qen;
5352
5353 list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
5354 uf = (struct bfa_uf_s *) qe;
5355 list_del(&uf->qe);
5356 bfa_uf_put(ufm, uf);
5357 }
5358}
5359
5360static void
5361bfa_uf_start(struct bfa_s *bfa)
5362{
5363 bfa_uf_post_all(BFA_UF_MOD(bfa));
5364}
5365
5366
5367
5368/**
5369 * hal_uf_api
5370 */
5371
5372/**
5373 * Register handler for all unsolicted recieve frames.
5374 *
5375 * @param[in] bfa BFA instance
5376 * @param[in] ufrecv receive handler function
5377 * @param[in] cbarg receive handler arg
5378 */
5379void
5380bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5381{
5382 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5383
5384 ufm->ufrecv = ufrecv;
5385 ufm->cbarg = cbarg;
5386}
5387
5388/**
5389 * Free an unsolicited frame back to BFA.
5390 *
5391 * @param[in] uf unsolicited frame to be freed
5392 *
5393 * @return None
5394 */
5395void
5396bfa_uf_free(struct bfa_uf_s *uf)
5397{
5398 bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
5399 bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
5400}
5401
5402
5403
5404/**
5405 * uf_pub BFA uf module public functions
5406 */
5407void
5408bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5409{
5410 bfa_trc(bfa, msg->mhdr.msg_id);
5411
5412 switch (msg->mhdr.msg_id) {
5413 case BFI_UF_I2H_FRM_RCVD:
5414 uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
5415 break;
5416
5417 default:
5418 bfa_trc(bfa, msg->mhdr.msg_id);
5419 bfa_assert(0);
5420 }
5421}
5422
5423
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
new file mode 100644
index 000000000000..9921dad0d039
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -0,0 +1,657 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_SVC_H__
19#define __BFA_SVC_H__
20
21#include "bfa_cs.h"
22#include "bfi_ms.h"
23
24
25/**
26 * Scatter-gather DMA related defines
27 */
28#define BFA_SGPG_MIN (16)
29
30/**
31 * Alignment macro for SG page allocation
32 */
33#define BFA_SGPG_ROUNDUP(_l) (((_l) + (sizeof(struct bfi_sgpg_s) - 1)) \
34 & ~(sizeof(struct bfi_sgpg_s) - 1))
35
36struct bfa_sgpg_wqe_s {
37 struct list_head qe; /* queue sg page element */
38 int nsgpg; /* pages to be allocated */
39 int nsgpg_total; /* total pages required */
40 void (*cbfn) (void *cbarg); /* callback function */
41 void *cbarg; /* callback arg */
42 struct list_head sgpg_q; /* queue of alloced sgpgs */
43};
44
45struct bfa_sgpg_s {
46 struct list_head qe; /* queue sg page element */
47 struct bfi_sgpg_s *sgpg; /* va of SG page */
48 union bfi_addr_u sgpg_pa; /* pa of SG page */
49};
50
51/**
52 * Given number of SG elements, BFA_SGPG_NPAGE() returns the number of
53 * SG pages required.
54 */
55#define BFA_SGPG_NPAGE(_nsges) (((_nsges) / BFI_SGPG_DATA_SGES) + 1)
56
57struct bfa_sgpg_mod_s {
58 struct bfa_s *bfa;
59 int num_sgpgs; /* number of SG pages */
60 int free_sgpgs; /* number of free SG pages */
61 struct bfa_sgpg_s *hsgpg_arr; /* BFA SG page array */
62 struct bfi_sgpg_s *sgpg_arr; /* actual SG page array */
63 u64 sgpg_arr_pa; /* SG page array DMA addr */
64 struct list_head sgpg_q; /* queue of free SG pages */
65 struct list_head sgpg_wait_q; /* wait queue for SG pages */
66};
67#define BFA_SGPG_MOD(__bfa) (&(__bfa)->modules.sgpg_mod)
68
69bfa_status_t bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q,
70 int nsgpgs);
71void bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs);
72void bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe,
73 void (*cbfn) (void *cbarg), void *cbarg);
74void bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpgs);
75void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe);
76
77
78/**
79 * FCXP related defines
80 */
81#define BFA_FCXP_MIN (1)
82#define BFA_FCXP_MAX_IBUF_SZ (2 * 1024 + 256)
83#define BFA_FCXP_MAX_LBUF_SZ (4 * 1024 + 256)
84
85struct bfa_fcxp_mod_s {
86 struct bfa_s *bfa; /* backpointer to BFA */
87 struct bfa_fcxp_s *fcxp_list; /* array of FCXPs */
88 u16 num_fcxps; /* max num FCXP requests */
89 struct list_head fcxp_free_q; /* free FCXPs */
90 struct list_head fcxp_active_q; /* active FCXPs */
91 void *req_pld_list_kva; /* list of FCXP req pld */
92 u64 req_pld_list_pa; /* list of FCXP req pld */
93 void *rsp_pld_list_kva; /* list of FCXP resp pld */
94 u64 rsp_pld_list_pa; /* list of FCXP resp pld */
95 struct list_head wait_q; /* wait queue for free fcxp */
96 u32 req_pld_sz;
97 u32 rsp_pld_sz;
98};
99
100#define BFA_FCXP_MOD(__bfa) (&(__bfa)->modules.fcxp_mod)
101#define BFA_FCXP_FROM_TAG(__mod, __tag) (&(__mod)->fcxp_list[__tag])
102
103typedef void (*fcxp_send_cb_t) (struct bfa_s *ioc, struct bfa_fcxp_s *fcxp,
104 void *cb_arg, bfa_status_t req_status,
105 u32 rsp_len, u32 resid_len,
106 struct fchs_s *rsp_fchs);
107
108typedef u64 (*bfa_fcxp_get_sgaddr_t) (void *bfad_fcxp, int sgeid);
109typedef u32 (*bfa_fcxp_get_sglen_t) (void *bfad_fcxp, int sgeid);
110typedef void (*bfa_cb_fcxp_send_t) (void *bfad_fcxp, struct bfa_fcxp_s *fcxp,
111 void *cbarg, enum bfa_status req_status,
112 u32 rsp_len, u32 resid_len,
113 struct fchs_s *rsp_fchs);
114typedef void (*bfa_fcxp_alloc_cbfn_t) (void *cbarg, struct bfa_fcxp_s *fcxp);
115
116
117
118/**
119 * Information needed for a FCXP request
120 */
121struct bfa_fcxp_req_info_s {
122 struct bfa_rport_s *bfa_rport;
123 /** Pointer to the bfa rport that was
124 * returned from bfa_rport_create().
125 * This could be left NULL for WKA or
126 * for FCXP interactions before the
127 * rport nexus is established
128 */
129 struct fchs_s fchs; /* request FC header structure */
130 u8 cts; /* continous sequence */
131 u8 class; /* FC class for the request/response */
132 u16 max_frmsz; /* max send frame size */
133 u16 vf_id; /* vsan tag if applicable */
134 u8 lp_tag; /* lport tag */
135 u32 req_tot_len; /* request payload total length */
136};
137
138struct bfa_fcxp_rsp_info_s {
139 struct fchs_s rsp_fchs;
140 /** !< Response frame's FC header will
141 * be sent back in this field */
142 u8 rsp_timeout;
143 /** !< timeout in seconds, 0-no response
144 */
145 u8 rsvd2[3];
146 u32 rsp_maxlen; /* max response length expected */
147};
148
149struct bfa_fcxp_s {
150 struct list_head qe; /* fcxp queue element */
151 bfa_sm_t sm; /* state machine */
152 void *caller; /* driver or fcs */
153 struct bfa_fcxp_mod_s *fcxp_mod;
154 /* back pointer to fcxp mod */
155 u16 fcxp_tag; /* internal tag */
156 struct bfa_fcxp_req_info_s req_info;
157 /* request info */
158 struct bfa_fcxp_rsp_info_s rsp_info;
159 /* response info */
160 u8 use_ireqbuf; /* use internal req buf */
161 u8 use_irspbuf; /* use internal rsp buf */
162 u32 nreq_sgles; /* num request SGLEs */
163 u32 nrsp_sgles; /* num response SGLEs */
164 struct list_head req_sgpg_q; /* SG pages for request buf */
165 struct list_head req_sgpg_wqe; /* wait queue for req SG page */
166 struct list_head rsp_sgpg_q; /* SG pages for response buf */
167 struct list_head rsp_sgpg_wqe; /* wait queue for rsp SG page */
168
169 bfa_fcxp_get_sgaddr_t req_sga_cbfn;
170 /* SG elem addr user function */
171 bfa_fcxp_get_sglen_t req_sglen_cbfn;
172 /* SG elem len user function */
173 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn;
174 /* SG elem addr user function */
175 bfa_fcxp_get_sglen_t rsp_sglen_cbfn;
176 /* SG elem len user function */
177 bfa_cb_fcxp_send_t send_cbfn; /* send completion callback */
178 void *send_cbarg; /* callback arg */
179 struct bfa_sge_s req_sge[BFA_FCXP_MAX_SGES];
180 /* req SG elems */
181 struct bfa_sge_s rsp_sge[BFA_FCXP_MAX_SGES];
182 /* rsp SG elems */
183 u8 rsp_status; /* comp: rsp status */
184 u32 rsp_len; /* comp: actual response len */
185 u32 residue_len; /* comp: residual rsp length */
186 struct fchs_s rsp_fchs; /* comp: response fchs */
187 struct bfa_cb_qe_s hcb_qe; /* comp: callback qelem */
188 struct bfa_reqq_wait_s reqq_wqe;
189 bfa_boolean_t reqq_waiting;
190};
191
192struct bfa_fcxp_wqe_s {
193 struct list_head qe;
194 bfa_fcxp_alloc_cbfn_t alloc_cbfn;
195 void *alloc_cbarg;
196 void *caller;
197 struct bfa_s *bfa;
198 int nreq_sgles;
199 int nrsp_sgles;
200 bfa_fcxp_get_sgaddr_t req_sga_cbfn;
201 bfa_fcxp_get_sglen_t req_sglen_cbfn;
202 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn;
203 bfa_fcxp_get_sglen_t rsp_sglen_cbfn;
204};
205
206#define BFA_FCXP_REQ_PLD(_fcxp) (bfa_fcxp_get_reqbuf(_fcxp))
207#define BFA_FCXP_RSP_FCHS(_fcxp) (&((_fcxp)->rsp_info.fchs))
208#define BFA_FCXP_RSP_PLD(_fcxp) (bfa_fcxp_get_rspbuf(_fcxp))
209
210#define BFA_FCXP_REQ_PLD_PA(_fcxp) \
211 ((_fcxp)->fcxp_mod->req_pld_list_pa + \
212 ((_fcxp)->fcxp_mod->req_pld_sz * (_fcxp)->fcxp_tag))
213
214#define BFA_FCXP_RSP_PLD_PA(_fcxp) \
215 ((_fcxp)->fcxp_mod->rsp_pld_list_pa + \
216 ((_fcxp)->fcxp_mod->rsp_pld_sz * (_fcxp)->fcxp_tag))
217
218void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
219
220
221/**
222 * RPORT related defines
223 */
224#define BFA_RPORT_MIN 4
225
226struct bfa_rport_mod_s {
227 struct bfa_rport_s *rps_list; /* list of rports */
228 struct list_head rp_free_q; /* free bfa_rports */
229 struct list_head rp_active_q; /* free bfa_rports */
230 u16 num_rports; /* number of rports */
231};
232
233#define BFA_RPORT_MOD(__bfa) (&(__bfa)->modules.rport_mod)
234
235/**
236 * Convert rport tag to RPORT
237 */
238#define BFA_RPORT_FROM_TAG(__bfa, _tag) \
239 (BFA_RPORT_MOD(__bfa)->rps_list + \
240 ((_tag) & (BFA_RPORT_MOD(__bfa)->num_rports - 1)))
241
242/*
243 * protected functions
244 */
245void bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
246
247/**
248 * BFA rport information.
249 */
250struct bfa_rport_info_s {
251 u16 max_frmsz; /* max rcv pdu size */
252 u32 pid:24, /* remote port ID */
253 lp_tag:8; /* tag */
254 u32 local_pid:24, /* local port ID */
255 cisc:8; /* CIRO supported */
256 u8 fc_class; /* supported FC classes. enum fc_cos */
257 u8 vf_en; /* virtual fabric enable */
258 u16 vf_id; /* virtual fabric ID */
259 enum bfa_port_speed speed; /* Rport's current speed */
260};
261
262/**
263 * BFA rport data structure
264 */
265struct bfa_rport_s {
266 struct list_head qe; /* queue element */
267 bfa_sm_t sm; /* state machine */
268 struct bfa_s *bfa; /* backpointer to BFA */
269 void *rport_drv; /* fcs/driver rport object */
270 u16 fw_handle; /* firmware rport handle */
271 u16 rport_tag; /* BFA rport tag */
272 struct bfa_rport_info_s rport_info; /* rport info from fcs/driver */
273 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
274 struct bfa_cb_qe_s hcb_qe; /* BFA callback qelem */
275 struct bfa_rport_hal_stats_s stats; /* BFA rport statistics */
276 struct bfa_rport_qos_attr_s qos_attr;
277 union a {
278 bfa_status_t status; /* f/w status */
279 void *fw_msg; /* QoS scn event */
280 } event_arg;
281};
282#define BFA_RPORT_FC_COS(_rport) ((_rport)->rport_info.fc_class)
283
284
285/**
286 * UF - unsolicited receive related defines
287 */
288
289#define BFA_UF_MIN (4)
290
291
292struct bfa_uf_s {
293 struct list_head qe; /* queue element */
294 struct bfa_s *bfa; /* bfa instance */
295 u16 uf_tag; /* identifying tag fw msgs */
296 u16 vf_id;
297 u16 src_rport_handle;
298 u16 rsvd;
299 u8 *data_ptr;
300 u16 data_len; /* actual receive length */
301 u16 pb_len; /* posted buffer length */
302 void *buf_kva; /* buffer virtual address */
303 u64 buf_pa; /* buffer physical address */
304 struct bfa_cb_qe_s hcb_qe; /* comp: BFA comp qelem */
305 struct bfa_sge_s sges[BFI_SGE_INLINE_MAX];
306};
307
308/**
309 * Callback prototype for unsolicited frame receive handler.
310 *
311 * @param[in] cbarg callback arg for receive handler
312 * @param[in] uf unsolicited frame descriptor
313 *
314 * @return None
315 */
316typedef void (*bfa_cb_uf_recv_t) (void *cbarg, struct bfa_uf_s *uf);
317
318struct bfa_uf_mod_s {
319 struct bfa_s *bfa; /* back pointer to BFA */
320 struct bfa_uf_s *uf_list; /* array of UFs */
321 u16 num_ufs; /* num unsolicited rx frames */
322 struct list_head uf_free_q; /* free UFs */
323 struct list_head uf_posted_q; /* UFs posted to IOC */
324 struct bfa_uf_buf_s *uf_pbs_kva; /* list UF bufs request pld */
325 u64 uf_pbs_pa; /* phy addr for UF bufs */
326 struct bfi_uf_buf_post_s *uf_buf_posts;
327 /* pre-built UF post msgs */
328 bfa_cb_uf_recv_t ufrecv; /* uf recv handler function */
329 void *cbarg; /* uf receive handler arg */
330};
331
332#define BFA_UF_MOD(__bfa) (&(__bfa)->modules.uf_mod)
333
334#define ufm_pbs_pa(_ufmod, _uftag) \
335 ((_ufmod)->uf_pbs_pa + sizeof(struct bfa_uf_buf_s) * (_uftag))
336
337void bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
338
339#define BFA_UF_BUFSZ (2 * 1024 + 256)
340
341/**
342 * @todo private
343 */
344struct bfa_uf_buf_s {
345 u8 d[BFA_UF_BUFSZ];
346};
347
348
349/**
350 * LPS - bfa lport login/logout service interface
351 */
352struct bfa_lps_s {
353 struct list_head qe; /* queue element */
354 struct bfa_s *bfa; /* parent bfa instance */
355 bfa_sm_t sm; /* finite state machine */
356 u8 lp_tag; /* lport tag */
357 u8 reqq; /* lport request queue */
358 u8 alpa; /* ALPA for loop topologies */
359 u32 lp_pid; /* lport port ID */
360 bfa_boolean_t fdisc; /* snd FDISC instead of FLOGI */
361 bfa_boolean_t auth_en; /* enable authentication */
362 bfa_boolean_t auth_req; /* authentication required */
363 bfa_boolean_t npiv_en; /* NPIV is allowed by peer */
364 bfa_boolean_t fport; /* attached peer is F_PORT */
365 bfa_boolean_t brcd_switch; /* attached peer is brcd sw */
366 bfa_status_t status; /* login status */
367 u16 pdusz; /* max receive PDU size */
368 u16 pr_bbcred; /* BB_CREDIT from peer */
369 u8 lsrjt_rsn; /* LSRJT reason */
370 u8 lsrjt_expl; /* LSRJT explanation */
371 wwn_t pwwn; /* port wwn of lport */
372 wwn_t nwwn; /* node wwn of lport */
373 wwn_t pr_pwwn; /* port wwn of lport peer */
374 wwn_t pr_nwwn; /* node wwn of lport peer */
375 mac_t lp_mac; /* fpma/spma MAC for lport */
376 mac_t fcf_mac; /* FCF MAC of lport */
377 struct bfa_reqq_wait_s wqe; /* request wait queue element */
378 void *uarg; /* user callback arg */
379 struct bfa_cb_qe_s hcb_qe; /* comp: callback qelem */
380 struct bfi_lps_login_rsp_s *loginrsp;
381 bfa_eproto_status_t ext_status;
382};
383
384struct bfa_lps_mod_s {
385 struct list_head lps_free_q;
386 struct list_head lps_active_q;
387 struct bfa_lps_s *lps_arr;
388 int num_lps;
389};
390
391#define BFA_LPS_MOD(__bfa) (&(__bfa)->modules.lps_mod)
392#define BFA_LPS_FROM_TAG(__mod, __tag) (&(__mod)->lps_arr[__tag])
393
394/*
395 * external functions
396 */
397void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
398
399
400/**
401 * FCPORT related defines
402 */
403
404#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port))
405typedef void (*bfa_cb_port_t) (void *cbarg, enum bfa_status status);
406
407/**
408 * Link notification data structure
409 */
410struct bfa_fcport_ln_s {
411 struct bfa_fcport_s *fcport;
412 bfa_sm_t sm;
413 struct bfa_cb_qe_s ln_qe; /* BFA callback queue elem for ln */
414 enum bfa_port_linkstate ln_event; /* ln event for callback */
415};
416
417struct bfa_fcport_trunk_s {
418 struct bfa_trunk_attr_s attr;
419};
420
421/**
422 * BFA FC port data structure
423 */
424struct bfa_fcport_s {
425 struct bfa_s *bfa; /* parent BFA instance */
426 bfa_sm_t sm; /* port state machine */
427 wwn_t nwwn; /* node wwn of physical port */
428 wwn_t pwwn; /* port wwn of physical oprt */
429 enum bfa_port_speed speed_sup;
430 /* supported speeds */
431 enum bfa_port_speed speed; /* current speed */
432 enum bfa_port_topology topology; /* current topology */
433 u8 myalpa; /* my ALPA in LOOP topology */
434 u8 rsvd[3];
435 struct bfa_port_cfg_s cfg; /* current port configuration */
436 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */
437 struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */
438 struct bfa_reqq_wait_s reqq_wait;
439 /* to wait for room in reqq */
440 struct bfa_reqq_wait_s svcreq_wait;
441 /* to wait for room in reqq */
442 struct bfa_reqq_wait_s stats_reqq_wait;
443 /* to wait for room in reqq (stats) */
444 void *event_cbarg;
445 void (*event_cbfn) (void *cbarg,
446 enum bfa_port_linkstate event);
447 union {
448 union bfi_fcport_i2h_msg_u i2hmsg;
449 } event_arg;
450 void *bfad; /* BFA driver handle */
451 struct bfa_fcport_ln_s ln; /* Link Notification */
452 struct bfa_cb_qe_s hcb_qe; /* BFA callback queue elem */
453 struct bfa_timer_s timer; /* timer */
454 u32 msgtag; /* fimrware msg tag for reply */
455 u8 *stats_kva;
456 u64 stats_pa;
457 union bfa_fcport_stats_u *stats;
458 union bfa_fcport_stats_u *stats_ret; /* driver stats location */
459 bfa_status_t stats_status; /* stats/statsclr status */
460 bfa_boolean_t stats_busy; /* outstanding stats/statsclr */
461 bfa_boolean_t stats_qfull;
462 u32 stats_reset_time; /* stats reset time stamp */
463 bfa_cb_port_t stats_cbfn; /* driver callback function */
464 void *stats_cbarg; /* *!< user callback arg */
465 bfa_boolean_t diag_busy; /* diag busy status */
466 bfa_boolean_t beacon; /* port beacon status */
467 bfa_boolean_t link_e2e_beacon; /* link beacon status */
468 struct bfa_fcport_trunk_s trunk;
469 u16 fcoe_vlan;
470};
471
472#define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport)
473
474/*
475 * protected functions
476 */
477void bfa_fcport_init(struct bfa_s *bfa);
478void bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
479
480/*
481 * bfa fcport API functions
482 */
483bfa_status_t bfa_fcport_enable(struct bfa_s *bfa);
484bfa_status_t bfa_fcport_disable(struct bfa_s *bfa);
485bfa_status_t bfa_fcport_cfg_speed(struct bfa_s *bfa,
486 enum bfa_port_speed speed);
487enum bfa_port_speed bfa_fcport_get_speed(struct bfa_s *bfa);
488bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa,
489 enum bfa_port_topology topo);
490enum bfa_port_topology bfa_fcport_get_topology(struct bfa_s *bfa);
491bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa);
492bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa);
493u8 bfa_fcport_get_myalpa(struct bfa_s *bfa);
494bfa_status_t bfa_fcport_clr_hardalpa(struct bfa_s *bfa);
495bfa_status_t bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize);
496u16 bfa_fcport_get_maxfrsize(struct bfa_s *bfa);
497u8 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa);
498void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr);
499wwn_t bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node);
500void bfa_fcport_event_register(struct bfa_s *bfa,
501 void (*event_cbfn) (void *cbarg,
502 enum bfa_port_linkstate event), void *event_cbarg);
503bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa);
504void bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off);
505void bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off);
506bfa_status_t bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa,
507 enum bfa_port_speed speed);
508enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa);
509
510void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit);
511void bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status);
512void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
513 bfa_boolean_t link_e2e_beacon);
514void bfa_fcport_qos_get_attr(struct bfa_s *bfa,
515 struct bfa_qos_attr_s *qos_attr);
516void bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
517 struct bfa_qos_vc_attr_s *qos_vc_attr);
518bfa_status_t bfa_fcport_get_qos_stats(struct bfa_s *bfa,
519 union bfa_fcport_stats_u *stats,
520 bfa_cb_port_t cbfn, void *cbarg);
521bfa_status_t bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn,
522 void *cbarg);
523bfa_status_t bfa_fcport_get_fcoe_stats(struct bfa_s *bfa,
524 union bfa_fcport_stats_u *stats,
525 bfa_cb_port_t cbfn, void *cbarg);
526bfa_status_t bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn,
527 void *cbarg);
528bfa_boolean_t bfa_fcport_is_ratelim(struct bfa_s *bfa);
529bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa);
530bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
531 union bfa_fcport_stats_u *stats,
532 bfa_cb_port_t cbfn, void *cbarg);
533bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn,
534 void *cbarg);
535bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa);
536
537/*
538 * bfa rport API functions
539 */
540struct bfa_rport_s *bfa_rport_create(struct bfa_s *bfa, void *rport_drv);
541void bfa_rport_delete(struct bfa_rport_s *rport);
542void bfa_rport_online(struct bfa_rport_s *rport,
543 struct bfa_rport_info_s *rport_info);
544void bfa_rport_offline(struct bfa_rport_s *rport);
545void bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed);
546void bfa_rport_get_stats(struct bfa_rport_s *rport,
547 struct bfa_rport_hal_stats_s *stats);
548void bfa_rport_clear_stats(struct bfa_rport_s *rport);
549void bfa_cb_rport_online(void *rport);
550void bfa_cb_rport_offline(void *rport);
551void bfa_cb_rport_qos_scn_flowid(void *rport,
552 struct bfa_rport_qos_attr_s old_qos_attr,
553 struct bfa_rport_qos_attr_s new_qos_attr);
554void bfa_cb_rport_qos_scn_prio(void *rport,
555 struct bfa_rport_qos_attr_s old_qos_attr,
556 struct bfa_rport_qos_attr_s new_qos_attr);
557void bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
558 struct bfa_rport_qos_attr_s *qos_attr);
559
560/*
561 * bfa fcxp API functions
562 */
563struct bfa_fcxp_s *bfa_fcxp_alloc(void *bfad_fcxp, struct bfa_s *bfa,
564 int nreq_sgles, int nrsp_sgles,
565 bfa_fcxp_get_sgaddr_t get_req_sga,
566 bfa_fcxp_get_sglen_t get_req_sglen,
567 bfa_fcxp_get_sgaddr_t get_rsp_sga,
568 bfa_fcxp_get_sglen_t get_rsp_sglen);
569void bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
570 bfa_fcxp_alloc_cbfn_t alloc_cbfn,
571 void *cbarg, void *bfad_fcxp,
572 int nreq_sgles, int nrsp_sgles,
573 bfa_fcxp_get_sgaddr_t get_req_sga,
574 bfa_fcxp_get_sglen_t get_req_sglen,
575 bfa_fcxp_get_sgaddr_t get_rsp_sga,
576 bfa_fcxp_get_sglen_t get_rsp_sglen);
577void bfa_fcxp_walloc_cancel(struct bfa_s *bfa,
578 struct bfa_fcxp_wqe_s *wqe);
579void bfa_fcxp_discard(struct bfa_fcxp_s *fcxp);
580
581void *bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp);
582void *bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp);
583
584void bfa_fcxp_free(struct bfa_fcxp_s *fcxp);
585
586void bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
587 u16 vf_id, u8 lp_tag,
588 bfa_boolean_t cts, enum fc_cos cos,
589 u32 reqlen, struct fchs_s *fchs,
590 bfa_cb_fcxp_send_t cbfn,
591 void *cbarg,
592 u32 rsp_maxlen, u8 rsp_timeout);
593bfa_status_t bfa_fcxp_abort(struct bfa_fcxp_s *fcxp);
594u32 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp);
595u32 bfa_fcxp_get_maxrsp(struct bfa_s *bfa);
596
597static inline void *
598bfa_uf_get_frmbuf(struct bfa_uf_s *uf)
599{
600 return uf->data_ptr;
601}
602
603static inline u16
604bfa_uf_get_frmlen(struct bfa_uf_s *uf)
605{
606 return uf->data_len;
607}
608
609/*
610 * bfa uf API functions
611 */
612void bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv,
613 void *cbarg);
614void bfa_uf_free(struct bfa_uf_s *uf);
615
616/**
617 * bfa lport service api
618 */
619
620u32 bfa_lps_get_max_vport(struct bfa_s *bfa);
621struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa);
622void bfa_lps_delete(struct bfa_lps_s *lps);
623void bfa_lps_discard(struct bfa_lps_s *lps);
624void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa,
625 u16 pdusz, wwn_t pwwn, wwn_t nwwn,
626 bfa_boolean_t auth_en);
627void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz,
628 wwn_t pwwn, wwn_t nwwn);
629void bfa_lps_flogo(struct bfa_lps_s *lps);
630void bfa_lps_fdisclogo(struct bfa_lps_s *lps);
631u8 bfa_lps_get_tag(struct bfa_lps_s *lps);
632bfa_boolean_t bfa_lps_is_npiv_en(struct bfa_lps_s *lps);
633bfa_boolean_t bfa_lps_is_fport(struct bfa_lps_s *lps);
634bfa_boolean_t bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps);
635bfa_boolean_t bfa_lps_is_authreq(struct bfa_lps_s *lps);
636bfa_eproto_status_t bfa_lps_get_extstatus(struct bfa_lps_s *lps);
637u32 bfa_lps_get_pid(struct bfa_lps_s *lps);
638u32 bfa_lps_get_base_pid(struct bfa_s *bfa);
639u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid);
640u16 bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps);
641wwn_t bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps);
642wwn_t bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps);
643u8 bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps);
644u8 bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps);
645mac_t bfa_lps_get_lp_mac(struct bfa_lps_s *lps);
646void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status);
647void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status);
648void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
649void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
650
651void bfa_trunk_enable_cfg(struct bfa_s *bfa);
652bfa_status_t bfa_trunk_enable(struct bfa_s *bfa);
653bfa_status_t bfa_trunk_disable(struct bfa_s *bfa);
654bfa_status_t bfa_trunk_get_attr(struct bfa_s *bfa,
655 struct bfa_trunk_attr_s *attr);
656
657#endif /* __BFA_SVC_H__ */
diff --git a/drivers/scsi/bfa/bfa_timer.c b/drivers/scsi/bfa/bfa_timer.c
deleted file mode 100644
index cb76481f5cb1..000000000000
--- a/drivers/scsi/bfa/bfa_timer.c
+++ /dev/null
@@ -1,90 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa_timer.h>
19#include <cs/bfa_debug.h>
20
21void
22bfa_timer_init(struct bfa_timer_mod_s *mod)
23{
24 INIT_LIST_HEAD(&mod->timer_q);
25}
26
27void
28bfa_timer_beat(struct bfa_timer_mod_s *mod)
29{
30 struct list_head *qh = &mod->timer_q;
31 struct list_head *qe, *qe_next;
32 struct bfa_timer_s *elem;
33 struct list_head timedout_q;
34
35 INIT_LIST_HEAD(&timedout_q);
36
37 qe = bfa_q_next(qh);
38
39 while (qe != qh) {
40 qe_next = bfa_q_next(qe);
41
42 elem = (struct bfa_timer_s *) qe;
43 if (elem->timeout <= BFA_TIMER_FREQ) {
44 elem->timeout = 0;
45 list_del(&elem->qe);
46 list_add_tail(&elem->qe, &timedout_q);
47 } else {
48 elem->timeout -= BFA_TIMER_FREQ;
49 }
50
51 qe = qe_next; /* go to next elem */
52 }
53
54 /*
55 * Pop all the timeout entries
56 */
57 while (!list_empty(&timedout_q)) {
58 bfa_q_deq(&timedout_q, &elem);
59 elem->timercb(elem->arg);
60 }
61}
62
63/**
64 * Should be called with lock protection
65 */
66void
67bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
68 void (*timercb) (void *), void *arg, unsigned int timeout)
69{
70
71 bfa_assert(timercb != NULL);
72 bfa_assert(!bfa_q_is_on_q(&mod->timer_q, timer));
73
74 timer->timeout = timeout;
75 timer->timercb = timercb;
76 timer->arg = arg;
77
78 list_add_tail(&timer->qe, &mod->timer_q);
79}
80
81/**
82 * Should be called with lock protection
83 */
84void
85bfa_timer_stop(struct bfa_timer_s *timer)
86{
87 bfa_assert(!list_empty(&timer->qe));
88
89 list_del(&timer->qe);
90}
diff --git a/drivers/scsi/bfa/bfa_trcmod_priv.h b/drivers/scsi/bfa/bfa_trcmod_priv.h
deleted file mode 100644
index a7a82610db85..000000000000
--- a/drivers/scsi/bfa/bfa_trcmod_priv.h
+++ /dev/null
@@ -1,64 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * hal_trcmod.h BFA trace modules
20 */
21
22#ifndef __BFA_TRCMOD_PRIV_H__
23#define __BFA_TRCMOD_PRIV_H__
24
25#include <cs/bfa_trc.h>
26
27/*
28 * !!! Only append to the enums defined here to avoid any versioning
29 * !!! needed between trace utility and driver version
30 */
31enum {
32 BFA_TRC_HAL_INTR = 1,
33 BFA_TRC_HAL_FCXP = 2,
34 BFA_TRC_HAL_UF = 3,
35 BFA_TRC_HAL_RPORT = 4,
36 BFA_TRC_HAL_FCPIM = 5,
37 BFA_TRC_HAL_IOIM = 6,
38 BFA_TRC_HAL_TSKIM = 7,
39 BFA_TRC_HAL_ITNIM = 8,
40 BFA_TRC_HAL_FCPORT = 9,
41 BFA_TRC_HAL_SGPG = 10,
42 BFA_TRC_HAL_FLASH = 11,
43 BFA_TRC_HAL_DEBUG = 12,
44 BFA_TRC_HAL_WWN = 13,
45 BFA_TRC_HAL_FLASH_RAW = 14,
46 BFA_TRC_HAL_SBOOT = 15,
47 BFA_TRC_HAL_SBOOT_IO = 16,
48 BFA_TRC_HAL_SBOOT_INTR = 17,
49 BFA_TRC_HAL_SBTEST = 18,
50 BFA_TRC_HAL_IPFC = 19,
51 BFA_TRC_HAL_IOCFC = 20,
52 BFA_TRC_HAL_FCPTM = 21,
53 BFA_TRC_HAL_IOTM = 22,
54 BFA_TRC_HAL_TSKTM = 23,
55 BFA_TRC_HAL_TIN = 24,
56 BFA_TRC_HAL_LPS = 25,
57 BFA_TRC_HAL_FCDIAG = 26,
58 BFA_TRC_HAL_PBIND = 27,
59 BFA_TRC_HAL_IOCFC_CT = 28,
60 BFA_TRC_HAL_IOCFC_CB = 29,
61 BFA_TRC_HAL_IOCFC_Q = 30,
62};
63
64#endif /* __BFA_TRCMOD_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_tskim.c b/drivers/scsi/bfa/bfa_tskim.c
deleted file mode 100644
index ad9aaaedd3f1..000000000000
--- a/drivers/scsi/bfa/bfa_tskim.c
+++ /dev/null
@@ -1,690 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfa_cb_ioim_macros.h>
20
21BFA_TRC_FILE(HAL, TSKIM);
22
23/**
24 * task management completion handling
25 */
26#define bfa_tskim_qcomp(__tskim, __cbfn) do { \
27 bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, \
28 __cbfn, (__tskim)); \
29 bfa_tskim_notify_comp(__tskim); \
30} while (0)
31
32#define bfa_tskim_notify_comp(__tskim) do { \
33 if ((__tskim)->notify) \
34 bfa_itnim_tskdone((__tskim)->itnim); \
35} while (0)
36
37/*
38 * forward declarations
39 */
40static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
41static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
42static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
43 lun_t lun);
44static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
45static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
46static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
47static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
48static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
49static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
50
51/**
52 * bfa_tskim_sm
53 */
54
55enum bfa_tskim_event {
56 BFA_TSKIM_SM_START = 1, /* TM command start */
57 BFA_TSKIM_SM_DONE = 2, /* TM completion */
58 BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
59 BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
60 BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
61 BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
62 BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
63 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
64};
65
66static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
67 enum bfa_tskim_event event);
68static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
69 enum bfa_tskim_event event);
70static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
71 enum bfa_tskim_event event);
72static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
73 enum bfa_tskim_event event);
74static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
75 enum bfa_tskim_event event);
76static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
77 enum bfa_tskim_event event);
78static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
79 enum bfa_tskim_event event);
80
81/**
82 * Task management command beginning state.
83 */
84static void
85bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
86{
87 bfa_trc(tskim->bfa, event);
88
89 switch (event) {
90 case BFA_TSKIM_SM_START:
91 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
92 bfa_tskim_gather_ios(tskim);
93
94 /**
95 * If device is offline, do not send TM on wire. Just cleanup
96 * any pending IO requests and complete TM request.
97 */
98 if (!bfa_itnim_is_online(tskim->itnim)) {
99 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
100 tskim->tsk_status = BFI_TSKIM_STS_OK;
101 bfa_tskim_cleanup_ios(tskim);
102 return;
103 }
104
105 if (!bfa_tskim_send(tskim)) {
106 bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
107 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
108 &tskim->reqq_wait);
109 }
110 break;
111
112 default:
113 bfa_sm_fault(tskim->bfa, event);
114 }
115}
116
117/**
118 * brief
119 * TM command is active, awaiting completion from firmware to
120 * cleanup IO requests in TM scope.
121 */
122static void
123bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
124{
125 bfa_trc(tskim->bfa, event);
126
127 switch (event) {
128 case BFA_TSKIM_SM_DONE:
129 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
130 bfa_tskim_cleanup_ios(tskim);
131 break;
132
133 case BFA_TSKIM_SM_CLEANUP:
134 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
135 if (!bfa_tskim_send_abort(tskim)) {
136 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
137 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
138 &tskim->reqq_wait);
139 }
140 break;
141
142 case BFA_TSKIM_SM_HWFAIL:
143 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
144 bfa_tskim_iocdisable_ios(tskim);
145 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
146 break;
147
148 default:
149 bfa_sm_fault(tskim->bfa, event);
150 }
151}
152
153/**
154 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
155 * completion event from firmware.
156 */
157static void
158bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
159{
160 bfa_trc(tskim->bfa, event);
161
162 switch (event) {
163 case BFA_TSKIM_SM_DONE:
164 /**
165 * Ignore and wait for ABORT completion from firmware.
166 */
167 break;
168
169 case BFA_TSKIM_SM_CLEANUP_DONE:
170 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
171 bfa_tskim_cleanup_ios(tskim);
172 break;
173
174 case BFA_TSKIM_SM_HWFAIL:
175 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
176 bfa_tskim_iocdisable_ios(tskim);
177 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
178 break;
179
180 default:
181 bfa_sm_fault(tskim->bfa, event);
182 }
183}
184
185static void
186bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
187{
188 bfa_trc(tskim->bfa, event);
189
190 switch (event) {
191 case BFA_TSKIM_SM_IOS_DONE:
192 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
193 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
194 break;
195
196 case BFA_TSKIM_SM_CLEANUP:
197 /**
198 * Ignore, TM command completed on wire.
199 * Notify TM conmpletion on IO cleanup completion.
200 */
201 break;
202
203 case BFA_TSKIM_SM_HWFAIL:
204 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
205 bfa_tskim_iocdisable_ios(tskim);
206 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
207 break;
208
209 default:
210 bfa_sm_fault(tskim->bfa, event);
211 }
212}
213
214/**
215 * Task management command is waiting for room in request CQ
216 */
217static void
218bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
219{
220 bfa_trc(tskim->bfa, event);
221
222 switch (event) {
223 case BFA_TSKIM_SM_QRESUME:
224 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
225 bfa_tskim_send(tskim);
226 break;
227
228 case BFA_TSKIM_SM_CLEANUP:
229 /**
230 * No need to send TM on wire since ITN is offline.
231 */
232 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
233 bfa_reqq_wcancel(&tskim->reqq_wait);
234 bfa_tskim_cleanup_ios(tskim);
235 break;
236
237 case BFA_TSKIM_SM_HWFAIL:
238 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
239 bfa_reqq_wcancel(&tskim->reqq_wait);
240 bfa_tskim_iocdisable_ios(tskim);
241 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
242 break;
243
244 default:
245 bfa_sm_fault(tskim->bfa, event);
246 }
247}
248
249/**
250 * Task management command is active, awaiting for room in request CQ
251 * to send clean up request.
252 */
253static void
254bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
255 enum bfa_tskim_event event)
256{
257 bfa_trc(tskim->bfa, event);
258
259 switch (event) {
260 case BFA_TSKIM_SM_DONE:
261 bfa_reqq_wcancel(&tskim->reqq_wait);
262 /**
263 *
264 * Fall through !!!
265 */
266
267 case BFA_TSKIM_SM_QRESUME:
268 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
269 bfa_tskim_send_abort(tskim);
270 break;
271
272 case BFA_TSKIM_SM_HWFAIL:
273 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
274 bfa_reqq_wcancel(&tskim->reqq_wait);
275 bfa_tskim_iocdisable_ios(tskim);
276 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
277 break;
278
279 default:
280 bfa_sm_fault(tskim->bfa, event);
281 }
282}
283
284/**
285 * BFA callback is pending
286 */
287static void
288bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
289{
290 bfa_trc(tskim->bfa, event);
291
292 switch (event) {
293 case BFA_TSKIM_SM_HCB:
294 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
295 bfa_tskim_free(tskim);
296 break;
297
298 case BFA_TSKIM_SM_CLEANUP:
299 bfa_tskim_notify_comp(tskim);
300 break;
301
302 case BFA_TSKIM_SM_HWFAIL:
303 break;
304
305 default:
306 bfa_sm_fault(tskim->bfa, event);
307 }
308}
309
310
311
312/**
313 * bfa_tskim_private
314 */
315
316static void
317__bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
318{
319 struct bfa_tskim_s *tskim = cbarg;
320
321 if (!complete) {
322 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
323 return;
324 }
325
326 bfa_stats(tskim->itnim, tm_success);
327 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
328}
329
330static void
331__bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
332{
333 struct bfa_tskim_s *tskim = cbarg;
334
335 if (!complete) {
336 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
337 return;
338 }
339
340 bfa_stats(tskim->itnim, tm_failures);
341 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
342 BFI_TSKIM_STS_FAILED);
343}
344
345static bfa_boolean_t
346bfa_tskim_match_scope(struct bfa_tskim_s *tskim, lun_t lun)
347{
348 switch (tskim->tm_cmnd) {
349 case FCP_TM_TARGET_RESET:
350 return BFA_TRUE;
351
352 case FCP_TM_ABORT_TASK_SET:
353 case FCP_TM_CLEAR_TASK_SET:
354 case FCP_TM_LUN_RESET:
355 case FCP_TM_CLEAR_ACA:
356 return (tskim->lun == lun);
357
358 default:
359 bfa_assert(0);
360 }
361
362 return BFA_FALSE;
363}
364
365/**
366 * Gather affected IO requests and task management commands.
367 */
368static void
369bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
370{
371 struct bfa_itnim_s *itnim = tskim->itnim;
372 struct bfa_ioim_s *ioim;
373 struct list_head *qe, *qen;
374
375 INIT_LIST_HEAD(&tskim->io_q);
376
377 /**
378 * Gather any active IO requests first.
379 */
380 list_for_each_safe(qe, qen, &itnim->io_q) {
381 ioim = (struct bfa_ioim_s *) qe;
382 if (bfa_tskim_match_scope
383 (tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
384 list_del(&ioim->qe);
385 list_add_tail(&ioim->qe, &tskim->io_q);
386 }
387 }
388
389 /**
390 * Failback any pending IO requests immediately.
391 */
392 list_for_each_safe(qe, qen, &itnim->pending_q) {
393 ioim = (struct bfa_ioim_s *) qe;
394 if (bfa_tskim_match_scope
395 (tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
396 list_del(&ioim->qe);
397 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
398 bfa_ioim_tov(ioim);
399 }
400 }
401}
402
403/**
404 * IO cleanup completion
405 */
406static void
407bfa_tskim_cleanp_comp(void *tskim_cbarg)
408{
409 struct bfa_tskim_s *tskim = tskim_cbarg;
410
411 bfa_stats(tskim->itnim, tm_io_comps);
412 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
413}
414
415/**
416 * Gather affected IO requests and task management commands.
417 */
418static void
419bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
420{
421 struct bfa_ioim_s *ioim;
422 struct list_head *qe, *qen;
423
424 bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
425
426 list_for_each_safe(qe, qen, &tskim->io_q) {
427 ioim = (struct bfa_ioim_s *) qe;
428 bfa_wc_up(&tskim->wc);
429 bfa_ioim_cleanup_tm(ioim, tskim);
430 }
431
432 bfa_wc_wait(&tskim->wc);
433}
434
435/**
436 * Send task management request to firmware.
437 */
438static bfa_boolean_t
439bfa_tskim_send(struct bfa_tskim_s *tskim)
440{
441 struct bfa_itnim_s *itnim = tskim->itnim;
442 struct bfi_tskim_req_s *m;
443
444 /**
445 * check for room in queue to send request now
446 */
447 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
448 if (!m)
449 return BFA_FALSE;
450
451 /**
452 * build i/o request message next
453 */
454 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
455 bfa_lpuid(tskim->bfa));
456
457 m->tsk_tag = bfa_os_htons(tskim->tsk_tag);
458 m->itn_fhdl = tskim->itnim->rport->fw_handle;
459 m->t_secs = tskim->tsecs;
460 m->lun = tskim->lun;
461 m->tm_flags = tskim->tm_cmnd;
462
463 /**
464 * queue I/O message to firmware
465 */
466 bfa_reqq_produce(tskim->bfa, itnim->reqq);
467 return BFA_TRUE;
468}
469
470/**
471 * Send abort request to cleanup an active TM to firmware.
472 */
473static bfa_boolean_t
474bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
475{
476 struct bfa_itnim_s *itnim = tskim->itnim;
477 struct bfi_tskim_abortreq_s *m;
478
479 /**
480 * check for room in queue to send request now
481 */
482 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
483 if (!m)
484 return BFA_FALSE;
485
486 /**
487 * build i/o request message next
488 */
489 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
490 bfa_lpuid(tskim->bfa));
491
492 m->tsk_tag = bfa_os_htons(tskim->tsk_tag);
493
494 /**
495 * queue I/O message to firmware
496 */
497 bfa_reqq_produce(tskim->bfa, itnim->reqq);
498 return BFA_TRUE;
499}
500
501/**
502 * Call to resume task management cmnd waiting for room in request queue.
503 */
504static void
505bfa_tskim_qresume(void *cbarg)
506{
507 struct bfa_tskim_s *tskim = cbarg;
508
509 bfa_fcpim_stats(tskim->fcpim, qresumes);
510 bfa_stats(tskim->itnim, tm_qresumes);
511 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
512}
513
514/**
515 * Cleanup IOs associated with a task mangement command on IOC failures.
516 */
517static void
518bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
519{
520 struct bfa_ioim_s *ioim;
521 struct list_head *qe, *qen;
522
523 list_for_each_safe(qe, qen, &tskim->io_q) {
524 ioim = (struct bfa_ioim_s *) qe;
525 bfa_ioim_iocdisable(ioim);
526 }
527}
528
529
530
531/**
532 * bfa_tskim_friend
533 */
534
535/**
536 * Notification on completions from related ioim.
537 */
538void
539bfa_tskim_iodone(struct bfa_tskim_s *tskim)
540{
541 bfa_wc_down(&tskim->wc);
542}
543
544/**
545 * Handle IOC h/w failure notification from itnim.
546 */
547void
548bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
549{
550 tskim->notify = BFA_FALSE;
551 bfa_stats(tskim->itnim, tm_iocdowns);
552 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
553}
554
555/**
556 * Cleanup TM command and associated IOs as part of ITNIM offline.
557 */
558void
559bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
560{
561 tskim->notify = BFA_TRUE;
562 bfa_stats(tskim->itnim, tm_cleanups);
563 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
564}
565
566/**
567 * Memory allocation and initialization.
568 */
569void
570bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
571{
572 struct bfa_tskim_s *tskim;
573 u16 i;
574
575 INIT_LIST_HEAD(&fcpim->tskim_free_q);
576
577 tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo);
578 fcpim->tskim_arr = tskim;
579
580 for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
581 /*
582 * initialize TSKIM
583 */
584 bfa_os_memset(tskim, 0, sizeof(struct bfa_tskim_s));
585 tskim->tsk_tag = i;
586 tskim->bfa = fcpim->bfa;
587 tskim->fcpim = fcpim;
588 tskim->notify = BFA_FALSE;
589 bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
590 tskim);
591 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
592
593 list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
594 }
595
596 bfa_meminfo_kva(minfo) = (u8 *) tskim;
597}
598
599void
600bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim)
601{
602 /**
603 * @todo
604 */
605}
606
607void
608bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
609{
610 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
611 struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
612 struct bfa_tskim_s *tskim;
613 u16 tsk_tag = bfa_os_ntohs(rsp->tsk_tag);
614
615 tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
616 bfa_assert(tskim->tsk_tag == tsk_tag);
617
618 tskim->tsk_status = rsp->tsk_status;
619
620 /**
621 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
622 * requests. All other statuses are for normal completions.
623 */
624 if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
625 bfa_stats(tskim->itnim, tm_cleanup_comps);
626 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
627 } else {
628 bfa_stats(tskim->itnim, tm_fw_rsps);
629 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
630 }
631}
632
633
634
635/**
636 * bfa_tskim_api
637 */
638
639
640struct bfa_tskim_s *
641bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
642{
643 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
644 struct bfa_tskim_s *tskim;
645
646 bfa_q_deq(&fcpim->tskim_free_q, &tskim);
647
648 if (!tskim)
649 bfa_fcpim_stats(fcpim, no_tskims);
650 else
651 tskim->dtsk = dtsk;
652
653 return tskim;
654}
655
656void
657bfa_tskim_free(struct bfa_tskim_s *tskim)
658{
659 bfa_assert(bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
660 list_del(&tskim->qe);
661 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
662}
663
664/**
665 * Start a task management command.
666 *
667 * @param[in] tskim BFA task management command instance
668 * @param[in] itnim i-t nexus for the task management command
669 * @param[in] lun lun, if applicable
670 * @param[in] tm_cmnd Task management command code.
671 * @param[in] t_secs Timeout in seconds
672 *
673 * @return None.
674 */
675void
676bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim, lun_t lun,
677 enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
678{
679 tskim->itnim = itnim;
680 tskim->lun = lun;
681 tskim->tm_cmnd = tm_cmnd;
682 tskim->tsecs = tsecs;
683 tskim->notify = BFA_FALSE;
684 bfa_stats(itnim, tm_cmnds);
685
686 list_add_tail(&tskim->qe, &itnim->tsk_q);
687 bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
688}
689
690
diff --git a/drivers/scsi/bfa/bfa_uf.c b/drivers/scsi/bfa/bfa_uf.c
deleted file mode 100644
index b9a9a686ef6a..000000000000
--- a/drivers/scsi/bfa/bfa_uf.c
+++ /dev/null
@@ -1,343 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_uf.c BFA unsolicited frame receive implementation
20 */
21
22#include <bfa.h>
23#include <bfa_svc.h>
24#include <bfi/bfi_uf.h>
25#include <cs/bfa_debug.h>
26
27BFA_TRC_FILE(HAL, UF);
28BFA_MODULE(uf);
29
30/*
31 *****************************************************************************
32 * Internal functions
33 *****************************************************************************
34 */
35static void
36__bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
37{
38 struct bfa_uf_s *uf = cbarg;
39 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
40
41 if (complete)
42 ufm->ufrecv(ufm->cbarg, uf);
43}
44
45static void
46claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
47{
48 u32 uf_pb_tot_sz;
49
50 ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi);
51 ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi);
52 uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs),
53 BFA_DMA_ALIGN_SZ);
54
55 bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
56 bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
57
58 bfa_os_memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
59}
60
61static void
62claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
63{
64 struct bfi_uf_buf_post_s *uf_bp_msg;
65 struct bfi_sge_s *sge;
66 union bfi_addr_u sga_zero = { {0} };
67 u16 i;
68 u16 buf_len;
69
70 ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi);
71 uf_bp_msg = ufm->uf_buf_posts;
72
73 for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
74 i++, uf_bp_msg++) {
75 bfa_os_memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
76
77 uf_bp_msg->buf_tag = i;
78 buf_len = sizeof(struct bfa_uf_buf_s);
79 uf_bp_msg->buf_len = bfa_os_htons(buf_len);
80 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
81 bfa_lpuid(ufm->bfa));
82
83 sge = uf_bp_msg->sge;
84 sge[0].sg_len = buf_len;
85 sge[0].flags = BFI_SGE_DATA_LAST;
86 bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i));
87 bfa_sge_to_be(sge);
88
89 sge[1].sg_len = buf_len;
90 sge[1].flags = BFI_SGE_PGDLEN;
91 sge[1].sga = sga_zero;
92 bfa_sge_to_be(&sge[1]);
93 }
94
95 /**
96 * advance pointer beyond consumed memory
97 */
98 bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
99}
100
101static void
102claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
103{
104 u16 i;
105 struct bfa_uf_s *uf;
106
107 /*
108 * Claim block of memory for UF list
109 */
110 ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi);
111
112 /*
113 * Initialize UFs and queue it in UF free queue
114 */
115 for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
116 bfa_os_memset(uf, 0, sizeof(struct bfa_uf_s));
117 uf->bfa = ufm->bfa;
118 uf->uf_tag = i;
119 uf->pb_len = sizeof(struct bfa_uf_buf_s);
120 uf->buf_kva = (void *)&ufm->uf_pbs_kva[i];
121 uf->buf_pa = ufm_pbs_pa(ufm, i);
122 list_add_tail(&uf->qe, &ufm->uf_free_q);
123 }
124
125 /**
126 * advance memory pointer
127 */
128 bfa_meminfo_kva(mi) = (u8 *) uf;
129}
130
131static void
132uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
133{
134 claim_uf_pbs(ufm, mi);
135 claim_ufs(ufm, mi);
136 claim_uf_post_msgs(ufm, mi);
137}
138
139static void
140bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
141{
142 u32 num_ufs = cfg->fwcfg.num_uf_bufs;
143
144 /*
145 * dma-able memory for UF posted bufs
146 */
147 *dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs),
148 BFA_DMA_ALIGN_SZ);
149
150 /*
151 * kernel Virtual memory for UFs and UF buf post msg copies
152 */
153 *ndm_len += sizeof(struct bfa_uf_s) * num_ufs;
154 *ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs;
155}
156
157static void
158bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
159 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
160{
161 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
162
163 bfa_os_memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
164 ufm->bfa = bfa;
165 ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
166 INIT_LIST_HEAD(&ufm->uf_free_q);
167 INIT_LIST_HEAD(&ufm->uf_posted_q);
168
169 uf_mem_claim(ufm, meminfo);
170}
171
172static void
173bfa_uf_detach(struct bfa_s *bfa)
174{
175}
176
177static struct bfa_uf_s *
178bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
179{
180 struct bfa_uf_s *uf;
181
182 bfa_q_deq(&uf_mod->uf_free_q, &uf);
183 return uf;
184}
185
186static void
187bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
188{
189 list_add_tail(&uf->qe, &uf_mod->uf_free_q);
190}
191
192static bfa_status_t
193bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
194{
195 struct bfi_uf_buf_post_s *uf_post_msg;
196
197 uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
198 if (!uf_post_msg)
199 return BFA_STATUS_FAILED;
200
201 bfa_os_memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
202 sizeof(struct bfi_uf_buf_post_s));
203 bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP);
204
205 bfa_trc(ufm->bfa, uf->uf_tag);
206
207 list_add_tail(&uf->qe, &ufm->uf_posted_q);
208 return BFA_STATUS_OK;
209}
210
211static void
212bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
213{
214 struct bfa_uf_s *uf;
215
216 while ((uf = bfa_uf_get(uf_mod)) != NULL) {
217 if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
218 break;
219 }
220}
221
222static void
223uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
224{
225 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
226 u16 uf_tag = m->buf_tag;
227 struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag];
228 struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
229 u8 *buf = &uf_buf->d[0];
230 struct fchs_s *fchs;
231
232 m->frm_len = bfa_os_ntohs(m->frm_len);
233 m->xfr_len = bfa_os_ntohs(m->xfr_len);
234
235 fchs = (struct fchs_s *) uf_buf;
236
237 list_del(&uf->qe); /* dequeue from posted queue */
238
239 uf->data_ptr = buf;
240 uf->data_len = m->xfr_len;
241
242 bfa_assert(uf->data_len >= sizeof(struct fchs_s));
243
244 if (uf->data_len == sizeof(struct fchs_s)) {
245 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
246 uf->data_len, (struct fchs_s *) buf);
247 } else {
248 u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
249 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
250 BFA_PL_EID_RX, uf->data_len,
251 (struct fchs_s *) buf, pld_w0);
252 }
253
254 if (bfa->fcs)
255 __bfa_cb_uf_recv(uf, BFA_TRUE);
256 else
257 bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
258}
259
260static void
261bfa_uf_stop(struct bfa_s *bfa)
262{
263}
264
265static void
266bfa_uf_iocdisable(struct bfa_s *bfa)
267{
268 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
269 struct bfa_uf_s *uf;
270 struct list_head *qe, *qen;
271
272 list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
273 uf = (struct bfa_uf_s *) qe;
274 list_del(&uf->qe);
275 bfa_uf_put(ufm, uf);
276 }
277}
278
279static void
280bfa_uf_start(struct bfa_s *bfa)
281{
282 bfa_uf_post_all(BFA_UF_MOD(bfa));
283}
284
285
286
287/**
288 * bfa_uf_api
289 */
290
291/**
292 * Register handler for all unsolicted recieve frames.
293 *
294 * @param[in] bfa BFA instance
295 * @param[in] ufrecv receive handler function
296 * @param[in] cbarg receive handler arg
297 */
298void
299bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
300{
301 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
302
303 ufm->ufrecv = ufrecv;
304 ufm->cbarg = cbarg;
305}
306
307/**
308 * Free an unsolicited frame back to BFA.
309 *
310 * @param[in] uf unsolicited frame to be freed
311 *
312 * @return None
313 */
314void
315bfa_uf_free(struct bfa_uf_s *uf)
316{
317 bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
318 bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
319}
320
321
322
323/**
324 * uf_pub BFA uf module public functions
325 */
326
327void
328bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
329{
330 bfa_trc(bfa, msg->mhdr.msg_id);
331
332 switch (msg->mhdr.msg_id) {
333 case BFI_UF_I2H_FRM_RCVD:
334 uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
335 break;
336
337 default:
338 bfa_trc(bfa, msg->mhdr.msg_id);
339 bfa_assert(0);
340 }
341}
342
343
diff --git a/drivers/scsi/bfa/bfa_uf_priv.h b/drivers/scsi/bfa/bfa_uf_priv.h
deleted file mode 100644
index bcb490f834f3..000000000000
--- a/drivers/scsi/bfa/bfa_uf_priv.h
+++ /dev/null
@@ -1,47 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_UF_PRIV_H__
18#define __BFA_UF_PRIV_H__
19
20#include <cs/bfa_sm.h>
21#include <bfa_svc.h>
22#include <bfi/bfi_uf.h>
23
24#define BFA_UF_MIN (4)
25
26struct bfa_uf_mod_s {
27 struct bfa_s *bfa; /* back pointer to BFA */
28 struct bfa_uf_s *uf_list; /* array of UFs */
29 u16 num_ufs; /* num unsolicited rx frames */
30 struct list_head uf_free_q; /* free UFs */
31 struct list_head uf_posted_q; /* UFs posted to IOC */
32 struct bfa_uf_buf_s *uf_pbs_kva; /* list UF bufs request pld */
33 u64 uf_pbs_pa; /* phy addr for UF bufs */
34 struct bfi_uf_buf_post_s *uf_buf_posts;
35 /* pre-built UF post msgs */
36 bfa_cb_uf_recv_t ufrecv; /* uf recv handler function */
37 void *cbarg; /* uf receive handler arg */
38};
39
40#define BFA_UF_MOD(__bfa) (&(__bfa)->modules.uf_mod)
41
42#define ufm_pbs_pa(_ufmod, _uftag) \
43 ((_ufmod)->uf_pbs_pa + sizeof(struct bfa_uf_buf_s) * (_uftag))
44
45void bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
46
47#endif /* __BFA_UF_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index ca04cc9d332f..4d8784e06e14 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -18,46 +18,62 @@
18/** 18/**
19 * bfad.c Linux driver PCI interface module. 19 * bfad.c Linux driver PCI interface module.
20 */ 20 */
21
22#include <linux/slab.h>
23#include <linux/module.h> 21#include <linux/module.h>
24#include <linux/kthread.h> 22#include <linux/kthread.h>
23#include <linux/errno.h>
24#include <linux/sched.h>
25#include <linux/init.h>
26#include <linux/fs.h>
27#include <linux/pci.h>
28#include <linux/firmware.h>
29#include <asm/uaccess.h>
30#include <asm/fcntl.h>
31
25#include "bfad_drv.h" 32#include "bfad_drv.h"
26#include "bfad_im.h" 33#include "bfad_im.h"
27#include "bfad_tm.h" 34#include "bfa_fcs.h"
28#include "bfad_ipfc.h" 35#include "bfa_os_inc.h"
29#include "bfad_trcmod.h" 36#include "bfa_defs.h"
30#include <fcb/bfa_fcb_vf.h> 37#include "bfa.h"
31#include <fcb/bfa_fcb_rport.h>
32#include <fcb/bfa_fcb_port.h>
33#include <fcb/bfa_fcb.h>
34 38
35BFA_TRC_FILE(LDRV, BFAD); 39BFA_TRC_FILE(LDRV, BFAD);
36DEFINE_MUTEX(bfad_mutex); 40DEFINE_MUTEX(bfad_mutex);
37LIST_HEAD(bfad_list); 41LIST_HEAD(bfad_list);
38static int bfad_inst; 42
39int bfad_supported_fc4s; 43static int bfad_inst;
40 44static int num_sgpgs_parm;
41static char *host_name; 45int supported_fc4s;
42static char *os_name; 46char *host_name, *os_name, *os_patch;
43static char *os_patch; 47int num_rports, num_ios, num_tms;
44static int num_rports; 48int num_fcxps, num_ufbufs;
45static int num_ios; 49int reqq_size, rspq_size, num_sgpgs;
46static int num_tms; 50int rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT;
47static int num_fcxps; 51int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH;
48static int num_ufbufs; 52int bfa_io_max_sge = BFAD_IO_MAX_SGE;
49static int reqq_size; 53int log_level = 3; /* WARNING log level */
50static int rspq_size; 54int ioc_auto_recover = BFA_TRUE;
51static int num_sgpgs; 55int bfa_linkup_delay = -1;
52static int rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT; 56int fdmi_enable = BFA_TRUE;
53static int bfa_io_max_sge = BFAD_IO_MAX_SGE; 57int pcie_max_read_reqsz;
54static int log_level = BFA_LOG_WARNING;
55static int ioc_auto_recover = BFA_TRUE;
56static int ipfc_enable = BFA_FALSE;
57static int fdmi_enable = BFA_TRUE;
58int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH;
59int bfa_linkup_delay = -1;
60int bfa_debugfs_enable = 1; 58int bfa_debugfs_enable = 1;
59int msix_disable_cb = 0, msix_disable_ct = 0;
60
61u32 bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size;
62u32 *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc;
63
64const char *msix_name_ct[] = {
65 "cpe0", "cpe1", "cpe2", "cpe3",
66 "rme0", "rme1", "rme2", "rme3",
67 "ctrl" };
68
69const char *msix_name_cb[] = {
70 "cpe0", "cpe1", "cpe2", "cpe3",
71 "rme0", "rme1", "rme2", "rme3",
72 "eemc", "elpu0", "elpu1", "epss", "mlpu" };
73
74MODULE_FIRMWARE(BFAD_FW_FILE_CT_FC);
75MODULE_FIRMWARE(BFAD_FW_FILE_CT_CNA);
76MODULE_FIRMWARE(BFAD_FW_FILE_CB_FC);
61 77
62module_param(os_name, charp, S_IRUGO | S_IWUSR); 78module_param(os_name, charp, S_IRUGO | S_IWUSR);
63MODULE_PARM_DESC(os_name, "OS name of the hba host machine"); 79MODULE_PARM_DESC(os_name, "OS name of the hba host machine");
@@ -66,8 +82,8 @@ MODULE_PARM_DESC(os_patch, "OS patch level of the hba host machine");
66module_param(host_name, charp, S_IRUGO | S_IWUSR); 82module_param(host_name, charp, S_IRUGO | S_IWUSR);
67MODULE_PARM_DESC(host_name, "Hostname of the hba host machine"); 83MODULE_PARM_DESC(host_name, "Hostname of the hba host machine");
68module_param(num_rports, int, S_IRUGO | S_IWUSR); 84module_param(num_rports, int, S_IRUGO | S_IWUSR);
69MODULE_PARM_DESC(num_rports, "Max number of rports supported per port" 85MODULE_PARM_DESC(num_rports, "Max number of rports supported per port "
70 " (physical/logical), default=1024"); 86 "(physical/logical), default=1024");
71module_param(num_ios, int, S_IRUGO | S_IWUSR); 87module_param(num_ios, int, S_IRUGO | S_IWUSR);
72MODULE_PARM_DESC(num_ios, "Max number of ioim requests, default=2000"); 88MODULE_PARM_DESC(num_ios, "Max number of ioim requests, default=2000");
73module_param(num_tms, int, S_IRUGO | S_IWUSR); 89module_param(num_tms, int, S_IRUGO | S_IWUSR);
@@ -75,120 +91,277 @@ MODULE_PARM_DESC(num_tms, "Max number of task im requests, default=128");
75module_param(num_fcxps, int, S_IRUGO | S_IWUSR); 91module_param(num_fcxps, int, S_IRUGO | S_IWUSR);
76MODULE_PARM_DESC(num_fcxps, "Max number of fcxp requests, default=64"); 92MODULE_PARM_DESC(num_fcxps, "Max number of fcxp requests, default=64");
77module_param(num_ufbufs, int, S_IRUGO | S_IWUSR); 93module_param(num_ufbufs, int, S_IRUGO | S_IWUSR);
78MODULE_PARM_DESC(num_ufbufs, "Max number of unsolicited frame buffers," 94MODULE_PARM_DESC(num_ufbufs, "Max number of unsolicited frame "
79 " default=64"); 95 "buffers, default=64");
80module_param(reqq_size, int, S_IRUGO | S_IWUSR); 96module_param(reqq_size, int, S_IRUGO | S_IWUSR);
81MODULE_PARM_DESC(reqq_size, "Max number of request queue elements," 97MODULE_PARM_DESC(reqq_size, "Max number of request queue elements, "
82 " default=256"); 98 "default=256");
83module_param(rspq_size, int, S_IRUGO | S_IWUSR); 99module_param(rspq_size, int, S_IRUGO | S_IWUSR);
84MODULE_PARM_DESC(rspq_size, "Max number of response queue elements," 100MODULE_PARM_DESC(rspq_size, "Max number of response queue elements, "
85 " default=64"); 101 "default=64");
86module_param(num_sgpgs, int, S_IRUGO | S_IWUSR); 102module_param(num_sgpgs, int, S_IRUGO | S_IWUSR);
87MODULE_PARM_DESC(num_sgpgs, "Number of scatter/gather pages, default=2048"); 103MODULE_PARM_DESC(num_sgpgs, "Number of scatter/gather pages, default=2048");
88module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR); 104module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR);
89MODULE_PARM_DESC(rport_del_timeout, "Rport delete timeout, default=90 secs," 105MODULE_PARM_DESC(rport_del_timeout, "Rport delete timeout, default=90 secs, "
90 " Range[>0]"); 106 "Range[>0]");
91module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR); 107module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR);
92MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32," 108MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32, Range[>0]");
93 " Range[>0]");
94module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR); 109module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR);
95MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255"); 110MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255");
96module_param(log_level, int, S_IRUGO | S_IWUSR); 111module_param(log_level, int, S_IRUGO | S_IWUSR);
97MODULE_PARM_DESC(log_level, "Driver log level, default=3," 112MODULE_PARM_DESC(log_level, "Driver log level, default=3, "
98 " Range[Critical:1|Error:2|Warning:3|Info:4]"); 113 "Range[Critical:1|Error:2|Warning:3|Info:4]");
99module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR); 114module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR);
100MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1," 115MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1, "
101 " Range[off:0|on:1]"); 116 "Range[off:0|on:1]");
102module_param(ipfc_enable, int, S_IRUGO | S_IWUSR);
103MODULE_PARM_DESC(ipfc_enable, "Enable IPoFC, default=0, Range[off:0|on:1]");
104module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR); 117module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR);
105MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for boot" 118MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for "
106 " port. Otherwise Range[>0]"); 119 "boot port. Otherwise 10 secs in RHEL4 & 0 for "
120 "[RHEL5, SLES10, ESX40] Range[>0]");
121module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR);
122MODULE_PARM_DESC(msix_disable_cb, "Disable Message Signaled Interrupts "
123 "for Brocade-415/425/815/825 cards, default=0, "
124 " Range[false:0|true:1]");
125module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR);
126MODULE_PARM_DESC(msix_disable_ct, "Disable Message Signaled Interrupts "
127 "if possible for Brocade-1010/1020/804/1007/902/1741 "
128 "cards, default=0, Range[false:0|true:1]");
107module_param(fdmi_enable, int, S_IRUGO | S_IWUSR); 129module_param(fdmi_enable, int, S_IRUGO | S_IWUSR);
108MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1," 130MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1, "
109 " Range[false:0|true:1]"); 131 "Range[false:0|true:1]");
132module_param(pcie_max_read_reqsz, int, S_IRUGO | S_IWUSR);
133MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 "
134 "(use system setting), Range[128|256|512|1024|2048|4096]");
110module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR); 135module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR);
111MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1," 136MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1,"
112 " Range[false:0|true:1]"); 137 " Range[false:0|true:1]");
113 138
114/* 139static void
115 * Stores the module parm num_sgpgs value; 140bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event);
116 * used to reset for bfad next instance. 141static void
142bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event);
143static void
144bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event);
145static void
146bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event);
147static void
148bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event);
149static void
150bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event);
151static void
152bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event);
153
154/**
155 * Beginning state for the driver instance, awaiting the pci_probe event
117 */ 156 */
118static int num_sgpgs_parm; 157static void
158bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event)
159{
160 bfa_trc(bfad, event);
161
162 switch (event) {
163 case BFAD_E_CREATE:
164 bfa_sm_set_state(bfad, bfad_sm_created);
165 bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad,
166 "%s", "bfad_worker");
167 if (IS_ERR(bfad->bfad_tsk)) {
168 printk(KERN_INFO "bfad[%d]: Kernel thread "
169 "creation failed!\n", bfad->inst_no);
170 bfa_sm_send_event(bfad, BFAD_E_KTHREAD_CREATE_FAILED);
171 }
172 bfa_sm_send_event(bfad, BFAD_E_INIT);
173 break;
174
175 case BFAD_E_STOP:
176 /* Ignore stop; already in uninit */
177 break;
178
179 default:
180 bfa_sm_fault(bfad, event);
181 }
182}
119 183
120static bfa_status_t 184/**
121bfad_fc4_probe(struct bfad_s *bfad) 185 * Driver Instance is created, awaiting event INIT to initialize the bfad
186 */
187static void
188bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event)
122{ 189{
123 int rc; 190 unsigned long flags;
124 191
125 rc = bfad_im_probe(bfad); 192 bfa_trc(bfad, event);
126 if (rc != BFA_STATUS_OK)
127 goto ext;
128 193
129 bfad_tm_probe(bfad); 194 switch (event) {
195 case BFAD_E_INIT:
196 bfa_sm_set_state(bfad, bfad_sm_initializing);
130 197
131 if (ipfc_enable) 198 init_completion(&bfad->comp);
132 bfad_ipfc_probe(bfad);
133 199
134 bfad->bfad_flags |= BFAD_FC4_PROBE_DONE; 200 /* Enable Interrupt and wait bfa_init completion */
135ext: 201 if (bfad_setup_intr(bfad)) {
136 return rc; 202 printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n",
203 bfad->inst_no);
204 bfa_sm_send_event(bfad, BFAD_E_INTR_INIT_FAILED);
205 break;
206 }
207
208 spin_lock_irqsave(&bfad->bfad_lock, flags);
209 bfa_init(&bfad->bfa);
210 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
211
212 /* Set up interrupt handler for each vectors */
213 if ((bfad->bfad_flags & BFAD_MSIX_ON) &&
214 bfad_install_msix_handler(bfad)) {
215 printk(KERN_WARNING "%s: install_msix failed, bfad%d\n",
216 __func__, bfad->inst_no);
217 }
218
219 bfad_init_timer(bfad);
220
221 wait_for_completion(&bfad->comp);
222
223 if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
224 bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
225 } else {
226 bfad->bfad_flags |= BFAD_HAL_INIT_FAIL;
227 bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
228 }
229
230 break;
231
232 case BFAD_E_KTHREAD_CREATE_FAILED:
233 bfa_sm_set_state(bfad, bfad_sm_uninit);
234 break;
235
236 default:
237 bfa_sm_fault(bfad, event);
238 }
137} 239}
138 240
139static void 241static void
140bfad_fc4_probe_undo(struct bfad_s *bfad) 242bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event)
141{ 243{
142 bfad_im_probe_undo(bfad); 244 int retval;
143 bfad_tm_probe_undo(bfad); 245 unsigned long flags;
144 if (ipfc_enable) 246
145 bfad_ipfc_probe_undo(bfad); 247 bfa_trc(bfad, event);
146 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; 248
249 switch (event) {
250 case BFAD_E_INIT_SUCCESS:
251 kthread_stop(bfad->bfad_tsk);
252 spin_lock_irqsave(&bfad->bfad_lock, flags);
253 bfad->bfad_tsk = NULL;
254 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
255
256 retval = bfad_start_ops(bfad);
257 if (retval != BFA_STATUS_OK)
258 break;
259 bfa_sm_set_state(bfad, bfad_sm_operational);
260 break;
261
262 case BFAD_E_INTR_INIT_FAILED:
263 bfa_sm_set_state(bfad, bfad_sm_uninit);
264 kthread_stop(bfad->bfad_tsk);
265 spin_lock_irqsave(&bfad->bfad_lock, flags);
266 bfad->bfad_tsk = NULL;
267 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
268 break;
269
270 case BFAD_E_INIT_FAILED:
271 bfa_sm_set_state(bfad, bfad_sm_failed);
272 break;
273 default:
274 bfa_sm_fault(bfad, event);
275 }
147} 276}
148 277
149static void 278static void
150bfad_fc4_probe_post(struct bfad_s *bfad) 279bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event)
151{ 280{
152 if (bfad->im) 281 int retval;
153 bfad_im_probe_post(bfad->im);
154 282
155 bfad_tm_probe_post(bfad); 283 bfa_trc(bfad, event);
156 if (ipfc_enable) 284
157 bfad_ipfc_probe_post(bfad); 285 switch (event) {
286 case BFAD_E_INIT_SUCCESS:
287 retval = bfad_start_ops(bfad);
288 if (retval != BFA_STATUS_OK)
289 break;
290 bfa_sm_set_state(bfad, bfad_sm_operational);
291 break;
292
293 case BFAD_E_STOP:
294 if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE)
295 bfad_uncfg_pport(bfad);
296 if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE) {
297 bfad_im_probe_undo(bfad);
298 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
299 }
300 bfad_stop(bfad);
301 break;
302
303 case BFAD_E_EXIT_COMP:
304 bfa_sm_set_state(bfad, bfad_sm_uninit);
305 bfad_remove_intr(bfad);
306 del_timer_sync(&bfad->hal_tmo);
307 break;
308
309 default:
310 bfa_sm_fault(bfad, event);
311 }
158} 312}
159 313
160static bfa_status_t 314static void
161bfad_fc4_port_new(struct bfad_s *bfad, struct bfad_port_s *port, int roles) 315bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event)
162{ 316{
163 int rc = BFA_STATUS_FAILED; 317 bfa_trc(bfad, event);
164 318
165 if (roles & BFA_PORT_ROLE_FCP_IM) 319 switch (event) {
166 rc = bfad_im_port_new(bfad, port); 320 case BFAD_E_STOP:
167 if (rc != BFA_STATUS_OK) 321 bfa_sm_set_state(bfad, bfad_sm_fcs_exit);
168 goto ext; 322 bfad_fcs_stop(bfad);
323 break;
169 324
170 if (roles & BFA_PORT_ROLE_FCP_TM) 325 default:
171 rc = bfad_tm_port_new(bfad, port); 326 bfa_sm_fault(bfad, event);
172 if (rc != BFA_STATUS_OK) 327 }
173 goto ext; 328}
174 329
175 if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) 330static void
176 rc = bfad_ipfc_port_new(bfad, port, port->pvb_type); 331bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event)
177ext: 332{
178 return rc; 333 bfa_trc(bfad, event);
334
335 switch (event) {
336 case BFAD_E_FCS_EXIT_COMP:
337 bfa_sm_set_state(bfad, bfad_sm_stopping);
338 bfad_stop(bfad);
339 break;
340
341 default:
342 bfa_sm_fault(bfad, event);
343 }
179} 344}
180 345
181static void 346static void
182bfad_fc4_port_delete(struct bfad_s *bfad, struct bfad_port_s *port, int roles) 347bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event)
183{ 348{
184 if (roles & BFA_PORT_ROLE_FCP_IM) 349 bfa_trc(bfad, event);
185 bfad_im_port_delete(bfad, port);
186 350
187 if (roles & BFA_PORT_ROLE_FCP_TM) 351 switch (event) {
188 bfad_tm_port_delete(bfad, port); 352 case BFAD_E_EXIT_COMP:
353 bfa_sm_set_state(bfad, bfad_sm_uninit);
354 bfad_remove_intr(bfad);
355 del_timer_sync(&bfad->hal_tmo);
356 bfad_im_probe_undo(bfad);
357 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
358 bfad_uncfg_pport(bfad);
359 break;
189 360
190 if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) 361 default:
191 bfad_ipfc_port_delete(bfad, port); 362 bfa_sm_fault(bfad, event);
363 break;
364 }
192} 365}
193 366
194/** 367/**
@@ -209,12 +382,13 @@ bfad_hcb_comp(void *arg, bfa_status_t status)
209void 382void
210bfa_cb_init(void *drv, bfa_status_t init_status) 383bfa_cb_init(void *drv, bfa_status_t init_status)
211{ 384{
212 struct bfad_s *bfad = drv; 385 struct bfad_s *bfad = drv;
213 386
214 if (init_status == BFA_STATUS_OK) { 387 if (init_status == BFA_STATUS_OK) {
215 bfad->bfad_flags |= BFAD_HAL_INIT_DONE; 388 bfad->bfad_flags |= BFAD_HAL_INIT_DONE;
216 389
217 /* If BFAD_HAL_INIT_FAIL flag is set: 390 /*
391 * If BFAD_HAL_INIT_FAIL flag is set:
218 * Wake up the kernel thread to start 392 * Wake up the kernel thread to start
219 * the bfad operations after HAL init done 393 * the bfad operations after HAL init done
220 */ 394 */
@@ -227,26 +401,16 @@ bfa_cb_init(void *drv, bfa_status_t init_status)
227 complete(&bfad->comp); 401 complete(&bfad->comp);
228} 402}
229 403
230
231
232/** 404/**
233 * BFA_FCS callbacks 405 * BFA_FCS callbacks
234 */ 406 */
235static struct bfad_port_s *
236bfad_get_drv_port(struct bfad_s *bfad, struct bfad_vf_s *vf_drv,
237 struct bfad_vport_s *vp_drv)
238{
239 return (vp_drv) ? (&(vp_drv)->drv_port)
240 : ((vf_drv) ? (&(vf_drv)->base_port) : (&(bfad)->pport));
241}
242
243struct bfad_port_s * 407struct bfad_port_s *
244bfa_fcb_port_new(struct bfad_s *bfad, struct bfa_fcs_port_s *port, 408bfa_fcb_lport_new(struct bfad_s *bfad, struct bfa_fcs_lport_s *port,
245 enum bfa_port_role roles, struct bfad_vf_s *vf_drv, 409 enum bfa_lport_role roles, struct bfad_vf_s *vf_drv,
246 struct bfad_vport_s *vp_drv) 410 struct bfad_vport_s *vp_drv)
247{ 411{
248 bfa_status_t rc; 412 bfa_status_t rc;
249 struct bfad_port_s *port_drv; 413 struct bfad_port_s *port_drv;
250 414
251 if (!vp_drv && !vf_drv) { 415 if (!vp_drv && !vf_drv) {
252 port_drv = &bfad->pport; 416 port_drv = &bfad->pport;
@@ -264,71 +428,32 @@ bfa_fcb_port_new(struct bfad_s *bfad, struct bfa_fcs_port_s *port,
264 428
265 port_drv->fcs_port = port; 429 port_drv->fcs_port = port;
266 port_drv->roles = roles; 430 port_drv->roles = roles;
267 rc = bfad_fc4_port_new(bfad, port_drv, roles); 431
268 if (rc != BFA_STATUS_OK) { 432 if (roles & BFA_LPORT_ROLE_FCP_IM) {
269 bfad_fc4_port_delete(bfad, port_drv, roles); 433 rc = bfad_im_port_new(bfad, port_drv);
270 port_drv = NULL; 434 if (rc != BFA_STATUS_OK) {
435 bfad_im_port_delete(bfad, port_drv);
436 port_drv = NULL;
437 }
271 } 438 }
272 439
273 return port_drv; 440 return port_drv;
274} 441}
275 442
276void 443void
277bfa_fcb_port_delete(struct bfad_s *bfad, enum bfa_port_role roles, 444bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
278 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv) 445 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv)
279{ 446{
280 struct bfad_port_s *port_drv; 447 struct bfad_port_s *port_drv;
281 448
282 /* 449 /* this will be only called from rmmod context */
283 * this will be only called from rmmod context
284 */
285 if (vp_drv && !vp_drv->comp_del) { 450 if (vp_drv && !vp_drv->comp_del) {
286 port_drv = bfad_get_drv_port(bfad, vf_drv, vp_drv); 451 port_drv = (vp_drv) ? (&(vp_drv)->drv_port) :
452 ((vf_drv) ? (&(vf_drv)->base_port) :
453 (&(bfad)->pport));
287 bfa_trc(bfad, roles); 454 bfa_trc(bfad, roles);
288 bfad_fc4_port_delete(bfad, port_drv, roles); 455 if (roles & BFA_LPORT_ROLE_FCP_IM)
289 } 456 bfad_im_port_delete(bfad, port_drv);
290}
291
292void
293bfa_fcb_port_online(struct bfad_s *bfad, enum bfa_port_role roles,
294 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv)
295{
296 struct bfad_port_s *port_drv = bfad_get_drv_port(bfad, vf_drv, vp_drv);
297
298 if (roles & BFA_PORT_ROLE_FCP_IM)
299 bfad_im_port_online(bfad, port_drv);
300
301 if (roles & BFA_PORT_ROLE_FCP_TM)
302 bfad_tm_port_online(bfad, port_drv);
303
304 if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable)
305 bfad_ipfc_port_online(bfad, port_drv);
306
307 bfad->bfad_flags |= BFAD_PORT_ONLINE;
308}
309
310void
311bfa_fcb_port_offline(struct bfad_s *bfad, enum bfa_port_role roles,
312 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv)
313{
314 struct bfad_port_s *port_drv = bfad_get_drv_port(bfad, vf_drv, vp_drv);
315
316 if (roles & BFA_PORT_ROLE_FCP_IM)
317 bfad_im_port_offline(bfad, port_drv);
318
319 if (roles & BFA_PORT_ROLE_FCP_TM)
320 bfad_tm_port_offline(bfad, port_drv);
321
322 if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable)
323 bfad_ipfc_port_offline(bfad, port_drv);
324}
325
326void
327bfa_fcb_vport_delete(struct bfad_vport_s *vport_drv)
328{
329 if (vport_drv->comp_del) {
330 complete(vport_drv->comp_del);
331 return;
332 } 457 }
333} 458}
334 459
@@ -339,7 +464,7 @@ bfa_status_t
339bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport, 464bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport,
340 struct bfad_rport_s **rport_drv) 465 struct bfad_rport_s **rport_drv)
341{ 466{
342 bfa_status_t rc = BFA_STATUS_OK; 467 bfa_status_t rc = BFA_STATUS_OK;
343 468
344 *rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC); 469 *rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC);
345 if (*rport_drv == NULL) { 470 if (*rport_drv == NULL) {
@@ -354,35 +479,43 @@ ext:
354} 479}
355 480
356/** 481/**
357 * @brief
358 * FCS PBC VPORT Create 482 * FCS PBC VPORT Create
359 */ 483 */
360void 484void
361bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport) 485bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport)
362{ 486{
363 487
364 struct bfad_pcfg_s *pcfg; 488 struct bfa_lport_cfg_s port_cfg = {0};
489 struct bfad_vport_s *vport;
490 int rc;
365 491
366 pcfg = kzalloc(sizeof(struct bfad_pcfg_s), GFP_ATOMIC); 492 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
367 if (!pcfg) { 493 if (!vport) {
368 bfa_trc(bfad, 0); 494 bfa_trc(bfad, 0);
369 return; 495 return;
370 } 496 }
371 497
372 pcfg->port_cfg.roles = BFA_PORT_ROLE_FCP_IM; 498 vport->drv_port.bfad = bfad;
373 pcfg->port_cfg.pwwn = pbc_vport.vp_pwwn; 499 port_cfg.roles = BFA_LPORT_ROLE_FCP_IM;
374 pcfg->port_cfg.nwwn = pbc_vport.vp_nwwn; 500 port_cfg.pwwn = pbc_vport.vp_pwwn;
375 pcfg->port_cfg.preboot_vp = BFA_TRUE; 501 port_cfg.nwwn = pbc_vport.vp_nwwn;
502 port_cfg.preboot_vp = BFA_TRUE;
503
504 rc = bfa_fcs_pbc_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, 0,
505 &port_cfg, vport);
376 506
377 list_add_tail(&pcfg->list_entry, &bfad->pbc_pcfg_list); 507 if (rc != BFA_STATUS_OK) {
508 bfa_trc(bfad, 0);
509 return;
510 }
378 511
379 return; 512 list_add_tail(&vport->list_entry, &bfad->pbc_vport_list);
380} 513}
381 514
382void 515void
383bfad_hal_mem_release(struct bfad_s *bfad) 516bfad_hal_mem_release(struct bfad_s *bfad)
384{ 517{
385 int i; 518 int i;
386 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; 519 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
387 struct bfa_mem_elem_s *meminfo_elem; 520 struct bfa_mem_elem_s *meminfo_elem;
388 521
@@ -395,9 +528,9 @@ bfad_hal_mem_release(struct bfad_s *bfad)
395 break; 528 break;
396 case BFA_MEM_TYPE_DMA: 529 case BFA_MEM_TYPE_DMA:
397 dma_free_coherent(&bfad->pcidev->dev, 530 dma_free_coherent(&bfad->pcidev->dev,
398 meminfo_elem->mem_len, 531 meminfo_elem->mem_len,
399 meminfo_elem->kva, 532 meminfo_elem->kva,
400 (dma_addr_t) meminfo_elem->dma); 533 (dma_addr_t) meminfo_elem->dma);
401 break; 534 break;
402 default: 535 default:
403 bfa_assert(0); 536 bfa_assert(0);
@@ -434,27 +567,27 @@ bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg)
434 * otherwise, the default values will be shown as 0 in sysfs 567 * otherwise, the default values will be shown as 0 in sysfs
435 */ 568 */
436 num_rports = bfa_cfg->fwcfg.num_rports; 569 num_rports = bfa_cfg->fwcfg.num_rports;
437 num_ios = bfa_cfg->fwcfg.num_ioim_reqs; 570 num_ios = bfa_cfg->fwcfg.num_ioim_reqs;
438 num_tms = bfa_cfg->fwcfg.num_tskim_reqs; 571 num_tms = bfa_cfg->fwcfg.num_tskim_reqs;
439 num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs; 572 num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs;
440 num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs; 573 num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs;
441 reqq_size = bfa_cfg->drvcfg.num_reqq_elems; 574 reqq_size = bfa_cfg->drvcfg.num_reqq_elems;
442 rspq_size = bfa_cfg->drvcfg.num_rspq_elems; 575 rspq_size = bfa_cfg->drvcfg.num_rspq_elems;
443 num_sgpgs = bfa_cfg->drvcfg.num_sgpgs; 576 num_sgpgs = bfa_cfg->drvcfg.num_sgpgs;
444} 577}
445 578
446bfa_status_t 579bfa_status_t
447bfad_hal_mem_alloc(struct bfad_s *bfad) 580bfad_hal_mem_alloc(struct bfad_s *bfad)
448{ 581{
582 int i;
449 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; 583 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
450 struct bfa_mem_elem_s *meminfo_elem; 584 struct bfa_mem_elem_s *meminfo_elem;
451 bfa_status_t rc = BFA_STATUS_OK; 585 dma_addr_t phys_addr;
452 dma_addr_t phys_addr; 586 void *kva;
453 int retry_count = 0; 587 bfa_status_t rc = BFA_STATUS_OK;
454 int reset_value = 1; 588 int retry_count = 0;
455 int min_num_sgpgs = 512; 589 int reset_value = 1;
456 void *kva; 590 int min_num_sgpgs = 512;
457 int i;
458 591
459 bfa_cfg_get_default(&bfad->ioc_cfg); 592 bfa_cfg_get_default(&bfad->ioc_cfg);
460 593
@@ -478,8 +611,7 @@ retry:
478 break; 611 break;
479 case BFA_MEM_TYPE_DMA: 612 case BFA_MEM_TYPE_DMA:
480 kva = dma_alloc_coherent(&bfad->pcidev->dev, 613 kva = dma_alloc_coherent(&bfad->pcidev->dev,
481 meminfo_elem->mem_len, 614 meminfo_elem->mem_len, &phys_addr, GFP_KERNEL);
482 &phys_addr, GFP_KERNEL);
483 if (kva == NULL) { 615 if (kva == NULL) {
484 bfad_hal_mem_release(bfad); 616 bfad_hal_mem_release(bfad);
485 /* 617 /*
@@ -487,14 +619,14 @@ retry:
487 * num_sgpages try with half the value. 619 * num_sgpages try with half the value.
488 */ 620 */
489 if (num_sgpgs > min_num_sgpgs) { 621 if (num_sgpgs > min_num_sgpgs) {
490 printk(KERN_INFO "bfad[%d]: memory" 622 printk(KERN_INFO
491 " allocation failed with" 623 "bfad[%d]: memory allocation failed"
492 " num_sgpgs: %d\n", 624 " with num_sgpgs: %d\n",
493 bfad->inst_no, num_sgpgs); 625 bfad->inst_no, num_sgpgs);
494 nextLowerInt(&num_sgpgs); 626 nextLowerInt(&num_sgpgs);
495 printk(KERN_INFO "bfad[%d]: trying to" 627 printk(KERN_INFO
496 " allocate memory with" 628 "bfad[%d]: trying to allocate memory"
497 " num_sgpgs: %d\n", 629 " with num_sgpgs: %d\n",
498 bfad->inst_no, num_sgpgs); 630 bfad->inst_no, num_sgpgs);
499 retry_count++; 631 retry_count++;
500 goto retry; 632 goto retry;
@@ -536,11 +668,11 @@ ext:
536 */ 668 */
537bfa_status_t 669bfa_status_t
538bfad_vport_create(struct bfad_s *bfad, u16 vf_id, 670bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
539 struct bfa_port_cfg_s *port_cfg, struct device *dev) 671 struct bfa_lport_cfg_s *port_cfg, struct device *dev)
540{ 672{
541 struct bfad_vport_s *vport; 673 struct bfad_vport_s *vport;
542 int rc = BFA_STATUS_OK; 674 int rc = BFA_STATUS_OK;
543 unsigned long flags; 675 unsigned long flags;
544 struct completion fcomp; 676 struct completion fcomp;
545 677
546 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL); 678 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
@@ -551,18 +683,14 @@ bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
551 683
552 vport->drv_port.bfad = bfad; 684 vport->drv_port.bfad = bfad;
553 spin_lock_irqsave(&bfad->bfad_lock, flags); 685 spin_lock_irqsave(&bfad->bfad_lock, flags);
554 if (port_cfg->preboot_vp == BFA_TRUE) 686 rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id,
555 rc = bfa_fcs_pbc_vport_create(&vport->fcs_vport, 687 port_cfg, vport);
556 &bfad->bfa_fcs, vf_id, port_cfg, vport);
557 else
558 rc = bfa_fcs_vport_create(&vport->fcs_vport,
559 &bfad->bfa_fcs, vf_id, port_cfg, vport);
560 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 688 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
561 689
562 if (rc != BFA_STATUS_OK) 690 if (rc != BFA_STATUS_OK)
563 goto ext_free_vport; 691 goto ext_free_vport;
564 692
565 if (port_cfg->roles & BFA_PORT_ROLE_FCP_IM) { 693 if (port_cfg->roles & BFA_LPORT_ROLE_FCP_IM) {
566 rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port, 694 rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port,
567 dev); 695 dev);
568 if (rc != BFA_STATUS_OK) 696 if (rc != BFA_STATUS_OK)
@@ -593,10 +721,10 @@ ext:
593 */ 721 */
594bfa_status_t 722bfa_status_t
595bfad_vf_create(struct bfad_s *bfad, u16 vf_id, 723bfad_vf_create(struct bfad_s *bfad, u16 vf_id,
596 struct bfa_port_cfg_s *port_cfg) 724 struct bfa_lport_cfg_s *port_cfg)
597{ 725{
598 struct bfad_vf_s *vf; 726 struct bfad_vf_s *vf;
599 int rc = BFA_STATUS_OK; 727 int rc = BFA_STATUS_OK;
600 728
601 vf = kzalloc(sizeof(struct bfad_vf_s), GFP_KERNEL); 729 vf = kzalloc(sizeof(struct bfad_vf_s), GFP_KERNEL);
602 if (!vf) { 730 if (!vf) {
@@ -615,9 +743,9 @@ ext:
615void 743void
616bfad_bfa_tmo(unsigned long data) 744bfad_bfa_tmo(unsigned long data)
617{ 745{
618 struct bfad_s *bfad = (struct bfad_s *)data; 746 struct bfad_s *bfad = (struct bfad_s *) data;
619 unsigned long flags; 747 unsigned long flags;
620 struct list_head doneq; 748 struct list_head doneq;
621 749
622 spin_lock_irqsave(&bfad->bfad_lock, flags); 750 spin_lock_irqsave(&bfad->bfad_lock, flags);
623 751
@@ -633,7 +761,8 @@ bfad_bfa_tmo(unsigned long data)
633 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 761 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
634 } 762 }
635 763
636 mod_timer(&bfad->hal_tmo, jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); 764 mod_timer(&bfad->hal_tmo,
765 jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
637} 766}
638 767
639void 768void
@@ -643,16 +772,17 @@ bfad_init_timer(struct bfad_s *bfad)
643 bfad->hal_tmo.function = bfad_bfa_tmo; 772 bfad->hal_tmo.function = bfad_bfa_tmo;
644 bfad->hal_tmo.data = (unsigned long)bfad; 773 bfad->hal_tmo.data = (unsigned long)bfad;
645 774
646 mod_timer(&bfad->hal_tmo, jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); 775 mod_timer(&bfad->hal_tmo,
776 jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
647} 777}
648 778
649int 779int
650bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) 780bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
651{ 781{
652 int rc = -ENODEV; 782 int rc = -ENODEV;
653 783
654 if (pci_enable_device(pdev)) { 784 if (pci_enable_device(pdev)) {
655 BFA_PRINTF(BFA_ERR, "pci_enable_device fail %p\n", pdev); 785 printk(KERN_ERR "pci_enable_device fail %p\n", pdev);
656 goto out; 786 goto out;
657 } 787 }
658 788
@@ -664,14 +794,14 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
664 794
665 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) 795 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
666 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { 796 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
667 BFA_PRINTF(BFA_ERR, "pci_set_dma_mask fail %p\n", pdev); 797 printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev);
668 goto out_release_region; 798 goto out_release_region;
669 } 799 }
670 800
671 bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); 801 bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
672 802
673 if (bfad->pci_bar0_kva == NULL) { 803 if (bfad->pci_bar0_kva == NULL) {
674 BFA_PRINTF(BFA_ERR, "Fail to map bar0\n"); 804 printk(KERN_ERR "Fail to map bar0\n");
675 goto out_release_region; 805 goto out_release_region;
676 } 806 }
677 807
@@ -688,6 +818,54 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
688 bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn); 818 bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn);
689 819
690 bfad->pcidev = pdev; 820 bfad->pcidev = pdev;
821
822 /* Adjust PCIe Maximum Read Request Size */
823 if (pcie_max_read_reqsz > 0) {
824 int pcie_cap_reg;
825 u16 pcie_dev_ctl;
826 u16 mask = 0xffff;
827
828 switch (pcie_max_read_reqsz) {
829 case 128:
830 mask = 0x0;
831 break;
832 case 256:
833 mask = 0x1000;
834 break;
835 case 512:
836 mask = 0x2000;
837 break;
838 case 1024:
839 mask = 0x3000;
840 break;
841 case 2048:
842 mask = 0x4000;
843 break;
844 case 4096:
845 mask = 0x5000;
846 break;
847 default:
848 break;
849 }
850
851 pcie_cap_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
852 if (mask != 0xffff && pcie_cap_reg) {
853 pcie_cap_reg += 0x08;
854 pci_read_config_word(pdev, pcie_cap_reg, &pcie_dev_ctl);
855 if ((pcie_dev_ctl & 0x7000) != mask) {
856 printk(KERN_WARNING "BFA[%s]: "
857 "pcie_max_read_request_size is %d, "
858 "reset to %d\n", bfad->pci_name,
859 (1 << ((pcie_dev_ctl & 0x7000) >> 12)) << 7,
860 pcie_max_read_reqsz);
861
862 pcie_dev_ctl &= ~0x7000;
863 pci_write_config_word(pdev, pcie_cap_reg,
864 pcie_dev_ctl | mask);
865 }
866 }
867 }
868
691 return 0; 869 return 0;
692 870
693out_release_region: 871out_release_region:
@@ -710,25 +888,22 @@ bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
710void 888void
711bfad_fcs_port_cfg(struct bfad_s *bfad) 889bfad_fcs_port_cfg(struct bfad_s *bfad)
712{ 890{
713 struct bfa_port_cfg_s port_cfg; 891 struct bfa_lport_cfg_s port_cfg;
714 struct bfa_pport_attr_s attr; 892 struct bfa_port_attr_s attr;
715 char symname[BFA_SYMNAME_MAXLEN]; 893 char symname[BFA_SYMNAME_MAXLEN];
716 894
717 sprintf(symname, "%s-%d", BFAD_DRIVER_NAME, bfad->inst_no); 895 sprintf(symname, "%s-%d", BFAD_DRIVER_NAME, bfad->inst_no);
718 memcpy(port_cfg.sym_name.symname, symname, strlen(symname)); 896 memcpy(port_cfg.sym_name.symname, symname, strlen(symname));
719 bfa_fcport_get_attr(&bfad->bfa, &attr); 897 bfa_fcport_get_attr(&bfad->bfa, &attr);
720 port_cfg.nwwn = attr.nwwn; 898 port_cfg.nwwn = attr.nwwn;
721 port_cfg.pwwn = attr.pwwn; 899 port_cfg.pwwn = attr.pwwn;
722
723 bfa_fcs_cfg_base_port(&bfad->bfa_fcs, &port_cfg);
724} 900}
725 901
726bfa_status_t 902bfa_status_t
727bfad_drv_init(struct bfad_s *bfad) 903bfad_drv_init(struct bfad_s *bfad)
728{ 904{
729 bfa_status_t rc; 905 bfa_status_t rc;
730 unsigned long flags; 906 unsigned long flags;
731 struct bfa_fcs_driver_info_s driver_info;
732 907
733 bfad->cfg_data.rport_del_timeout = rport_del_timeout; 908 bfad->cfg_data.rport_del_timeout = rport_del_timeout;
734 bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth; 909 bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth;
@@ -740,15 +915,12 @@ bfad_drv_init(struct bfad_s *bfad)
740 printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n", 915 printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n",
741 bfad->inst_no); 916 bfad->inst_no);
742 printk(KERN_WARNING 917 printk(KERN_WARNING
743 "Not enough memory to attach all Brocade HBA ports," 918 "Not enough memory to attach all Brocade HBA ports, %s",
744 " System may need more memory.\n"); 919 "System may need more memory.\n");
745 goto out_hal_mem_alloc_failure; 920 goto out_hal_mem_alloc_failure;
746 } 921 }
747 922
748 bfa_init_log(&bfad->bfa, bfad->logmod);
749 bfa_init_trc(&bfad->bfa, bfad->trcmod); 923 bfa_init_trc(&bfad->bfa, bfad->trcmod);
750 bfa_init_aen(&bfad->bfa, bfad->aen);
751 memset(bfad->file_map, 0, sizeof(bfad->file_map));
752 bfa_init_plog(&bfad->bfa, &bfad->plog_buf); 924 bfa_init_plog(&bfad->bfa, &bfad->plog_buf);
753 bfa_plog_init(&bfad->plog_buf); 925 bfa_plog_init(&bfad->plog_buf);
754 bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START, 926 bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START,
@@ -757,77 +929,17 @@ bfad_drv_init(struct bfad_s *bfad)
757 bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo, 929 bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo,
758 &bfad->hal_pcidev); 930 &bfad->hal_pcidev);
759 931
760 init_completion(&bfad->comp); 932 /* FCS INIT */
761
762 /*
763 * Enable Interrupt and wait bfa_init completion
764 */
765 if (bfad_setup_intr(bfad)) {
766 printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n",
767 bfad->inst_no);
768 goto out_setup_intr_failure;
769 }
770
771 spin_lock_irqsave(&bfad->bfad_lock, flags); 933 spin_lock_irqsave(&bfad->bfad_lock, flags);
772 bfa_init(&bfad->bfa);
773 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
774
775 /*
776 * Set up interrupt handler for each vectors
777 */
778 if ((bfad->bfad_flags & BFAD_MSIX_ON)
779 && bfad_install_msix_handler(bfad)) {
780 printk(KERN_WARNING "%s: install_msix failed, bfad%d\n",
781 __func__, bfad->inst_no);
782 }
783
784 bfad_init_timer(bfad);
785
786 wait_for_completion(&bfad->comp);
787
788 memset(&driver_info, 0, sizeof(driver_info));
789 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
790 sizeof(driver_info.version) - 1);
791 __kernel_param_lock();
792 if (host_name)
793 strncpy(driver_info.host_machine_name, host_name,
794 sizeof(driver_info.host_machine_name) - 1);
795 if (os_name)
796 strncpy(driver_info.host_os_name, os_name,
797 sizeof(driver_info.host_os_name) - 1);
798 if (os_patch)
799 strncpy(driver_info.host_os_patch, os_patch,
800 sizeof(driver_info.host_os_patch) - 1);
801 __kernel_param_unlock();
802
803 strncpy(driver_info.os_device_name, bfad->pci_name,
804 sizeof(driver_info.os_device_name - 1));
805
806 /*
807 * FCS INIT
808 */
809 spin_lock_irqsave(&bfad->bfad_lock, flags);
810 bfa_fcs_log_init(&bfad->bfa_fcs, bfad->logmod);
811 bfa_fcs_trc_init(&bfad->bfa_fcs, bfad->trcmod); 934 bfa_fcs_trc_init(&bfad->bfa_fcs, bfad->trcmod);
812 bfa_fcs_aen_init(&bfad->bfa_fcs, bfad->aen);
813 bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); 935 bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
814
815 /* Do FCS init only when HAL init is done */
816 if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
817 bfa_fcs_init(&bfad->bfa_fcs);
818 bfad->bfad_flags |= BFAD_FCS_INIT_DONE;
819 }
820
821 bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info);
822 bfa_fcs_set_fdmi_param(&bfad->bfa_fcs, fdmi_enable); 936 bfa_fcs_set_fdmi_param(&bfad->bfa_fcs, fdmi_enable);
823 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 937 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
824 938
825 bfad->bfad_flags |= BFAD_DRV_INIT_DONE; 939 bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
940
826 return BFA_STATUS_OK; 941 return BFA_STATUS_OK;
827 942
828out_setup_intr_failure:
829 bfa_detach(&bfad->bfa);
830 bfad_hal_mem_release(bfad);
831out_hal_mem_alloc_failure: 943out_hal_mem_alloc_failure:
832 return BFA_STATUS_FAILED; 944 return BFA_STATUS_FAILED;
833} 945}
@@ -855,7 +967,7 @@ bfad_drv_uninit(struct bfad_s *bfad)
855void 967void
856bfad_drv_start(struct bfad_s *bfad) 968bfad_drv_start(struct bfad_s *bfad)
857{ 969{
858 unsigned long flags; 970 unsigned long flags;
859 971
860 spin_lock_irqsave(&bfad->bfad_lock, flags); 972 spin_lock_irqsave(&bfad->bfad_lock, flags);
861 bfa_start(&bfad->bfa); 973 bfa_start(&bfad->bfa);
@@ -863,13 +975,14 @@ bfad_drv_start(struct bfad_s *bfad)
863 bfad->bfad_flags |= BFAD_HAL_START_DONE; 975 bfad->bfad_flags |= BFAD_HAL_START_DONE;
864 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 976 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
865 977
866 bfad_fc4_probe_post(bfad); 978 if (bfad->im)
979 flush_workqueue(bfad->im->drv_workq);
867} 980}
868 981
869void 982void
870bfad_drv_stop(struct bfad_s *bfad) 983bfad_fcs_stop(struct bfad_s *bfad)
871{ 984{
872 unsigned long flags; 985 unsigned long flags;
873 986
874 spin_lock_irqsave(&bfad->bfad_lock, flags); 987 spin_lock_irqsave(&bfad->bfad_lock, flags);
875 init_completion(&bfad->comp); 988 init_completion(&bfad->comp);
@@ -878,24 +991,32 @@ bfad_drv_stop(struct bfad_s *bfad)
878 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 991 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
879 wait_for_completion(&bfad->comp); 992 wait_for_completion(&bfad->comp);
880 993
994 bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP);
995}
996
997void
998bfad_stop(struct bfad_s *bfad)
999{
1000 unsigned long flags;
1001
881 spin_lock_irqsave(&bfad->bfad_lock, flags); 1002 spin_lock_irqsave(&bfad->bfad_lock, flags);
882 init_completion(&bfad->comp); 1003 init_completion(&bfad->comp);
883 bfa_stop(&bfad->bfa); 1004 bfa_stop(&bfad->bfa);
884 bfad->bfad_flags &= ~BFAD_HAL_START_DONE; 1005 bfad->bfad_flags &= ~BFAD_HAL_START_DONE;
885 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1006 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
886 wait_for_completion(&bfad->comp); 1007 wait_for_completion(&bfad->comp);
1008
1009 bfa_sm_send_event(bfad, BFAD_E_EXIT_COMP);
887} 1010}
888 1011
889bfa_status_t 1012bfa_status_t
890bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role) 1013bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role)
891{ 1014{
892 int rc = BFA_STATUS_OK; 1015 int rc = BFA_STATUS_OK;
893 1016
894 /* 1017 /* Allocate scsi_host for the physical port */
895 * Allocate scsi_host for the physical port 1018 if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
896 */ 1019 (role & BFA_LPORT_ROLE_FCP_IM)) {
897 if ((bfad_supported_fc4s & BFA_PORT_ROLE_FCP_IM)
898 && (role & BFA_PORT_ROLE_FCP_IM)) {
899 if (bfad->pport.im_port == NULL) { 1020 if (bfad->pport.im_port == NULL) {
900 rc = BFA_STATUS_FAILED; 1021 rc = BFA_STATUS_FAILED;
901 goto out; 1022 goto out;
@@ -906,7 +1027,7 @@ bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role)
906 if (rc != BFA_STATUS_OK) 1027 if (rc != BFA_STATUS_OK)
907 goto out; 1028 goto out;
908 1029
909 bfad->pport.roles |= BFA_PORT_ROLE_FCP_IM; 1030 bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM;
910 } 1031 }
911 1032
912 /* Setup the debugfs node for this scsi_host */ 1033 /* Setup the debugfs node for this scsi_host */
@@ -922,74 +1043,102 @@ out:
922void 1043void
923bfad_uncfg_pport(struct bfad_s *bfad) 1044bfad_uncfg_pport(struct bfad_s *bfad)
924{ 1045{
925 /* Remove the debugfs node for this scsi_host */ 1046 /* Remove the debugfs node for this scsi_host */
926 kfree(bfad->regdata); 1047 kfree(bfad->regdata);
927 bfad_debugfs_exit(&bfad->pport); 1048 bfad_debugfs_exit(&bfad->pport);
928 1049
929 if ((bfad->pport.roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) { 1050 if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
930 bfad_ipfc_port_delete(bfad, &bfad->pport); 1051 (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) {
931 bfad->pport.roles &= ~BFA_PORT_ROLE_FCP_IPFC;
932 }
933
934 if ((bfad_supported_fc4s & BFA_PORT_ROLE_FCP_IM)
935 && (bfad->pport.roles & BFA_PORT_ROLE_FCP_IM)) {
936 bfad_im_scsi_host_free(bfad, bfad->pport.im_port); 1052 bfad_im_scsi_host_free(bfad, bfad->pport.im_port);
937 bfad_im_port_clean(bfad->pport.im_port); 1053 bfad_im_port_clean(bfad->pport.im_port);
938 kfree(bfad->pport.im_port); 1054 kfree(bfad->pport.im_port);
939 bfad->pport.roles &= ~BFA_PORT_ROLE_FCP_IM; 1055 bfad->pport.roles &= ~BFA_LPORT_ROLE_FCP_IM;
940 } 1056 }
941 1057
942 bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE; 1058 bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE;
943} 1059}
944 1060
945void
946bfad_drv_log_level_set(struct bfad_s *bfad)
947{
948 if (log_level > BFA_LOG_INVALID && log_level <= BFA_LOG_LEVEL_MAX)
949 bfa_log_set_level_all(&bfad->log_data, log_level);
950}
951
952bfa_status_t 1061bfa_status_t
953bfad_start_ops(struct bfad_s *bfad) 1062bfad_start_ops(struct bfad_s *bfad) {
954{ 1063
955 int retval; 1064 int retval;
956 struct bfad_pcfg_s *pcfg, *pcfg_new; 1065 unsigned long flags;
1066 struct bfad_vport_s *vport, *vport_new;
1067 struct bfa_fcs_driver_info_s driver_info;
1068
1069 /* Fill the driver_info info to fcs*/
1070 memset(&driver_info, 0, sizeof(driver_info));
1071 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
1072 sizeof(driver_info.version) - 1);
1073 if (host_name)
1074 strncpy(driver_info.host_machine_name, host_name,
1075 sizeof(driver_info.host_machine_name) - 1);
1076 if (os_name)
1077 strncpy(driver_info.host_os_name, os_name,
1078 sizeof(driver_info.host_os_name) - 1);
1079 if (os_patch)
1080 strncpy(driver_info.host_os_patch, os_patch,
1081 sizeof(driver_info.host_os_patch) - 1);
1082
1083 strncpy(driver_info.os_device_name, bfad->pci_name,
1084 sizeof(driver_info.os_device_name - 1));
1085
1086 /* FCS INIT */
1087 spin_lock_irqsave(&bfad->bfad_lock, flags);
1088 bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info);
1089 bfa_fcs_init(&bfad->bfa_fcs);
1090 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
957 1091
958 /* PPORT FCS config */ 1092 /* PPORT FCS config */
959 bfad_fcs_port_cfg(bfad); 1093 bfad_fcs_port_cfg(bfad);
960 1094
961 retval = bfad_cfg_pport(bfad, BFA_PORT_ROLE_FCP_IM); 1095 retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
962 if (retval != BFA_STATUS_OK)
963 goto out_cfg_pport_failure;
964
965 /* BFAD level FC4 (IM/TM/IPFC) specific resource allocation */
966 retval = bfad_fc4_probe(bfad);
967 if (retval != BFA_STATUS_OK) { 1096 if (retval != BFA_STATUS_OK) {
968 printk(KERN_WARNING "bfad_fc4_probe failed\n"); 1097 if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
969 goto out_fc4_probe_failure; 1098 bfa_sm_set_state(bfad, bfad_sm_failed);
1099 bfad_stop(bfad);
1100 return BFA_STATUS_FAILED;
970 } 1101 }
971 1102
1103 /* BFAD level FC4 IM specific resource allocation */
1104 retval = bfad_im_probe(bfad);
1105 if (retval != BFA_STATUS_OK) {
1106 printk(KERN_WARNING "bfad_im_probe failed\n");
1107 if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
1108 bfa_sm_set_state(bfad, bfad_sm_failed);
1109 bfad_im_probe_undo(bfad);
1110 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
1111 bfad_uncfg_pport(bfad);
1112 bfad_stop(bfad);
1113 return BFA_STATUS_FAILED;
1114 } else
1115 bfad->bfad_flags |= BFAD_FC4_PROBE_DONE;
1116
972 bfad_drv_start(bfad); 1117 bfad_drv_start(bfad);
973 1118
974 /* pbc vport creation */ 1119 /* Complete pbc vport create */
975 list_for_each_entry_safe(pcfg, pcfg_new, &bfad->pbc_pcfg_list, 1120 list_for_each_entry_safe(vport, vport_new, &bfad->pbc_vport_list,
976 list_entry) { 1121 list_entry) {
977 struct fc_vport_identifiers vid; 1122 struct fc_vport_identifiers vid;
978 struct fc_vport *fc_vport; 1123 struct fc_vport *fc_vport;
1124 char pwwn_buf[BFA_STRING_32];
979 1125
980 memset(&vid, 0, sizeof(vid)); 1126 memset(&vid, 0, sizeof(vid));
981 vid.roles = FC_PORT_ROLE_FCP_INITIATOR; 1127 vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
982 vid.vport_type = FC_PORTTYPE_NPIV; 1128 vid.vport_type = FC_PORTTYPE_NPIV;
983 vid.disable = false; 1129 vid.disable = false;
984 vid.node_name = wwn_to_u64((u8 *)&pcfg->port_cfg.nwwn); 1130 vid.node_name = wwn_to_u64((u8 *)
985 vid.port_name = wwn_to_u64((u8 *)&pcfg->port_cfg.pwwn); 1131 (&((vport->fcs_vport).lport.port_cfg.nwwn)));
1132 vid.port_name = wwn_to_u64((u8 *)
1133 (&((vport->fcs_vport).lport.port_cfg.pwwn)));
986 fc_vport = fc_vport_create(bfad->pport.im_port->shost, 0, &vid); 1134 fc_vport = fc_vport_create(bfad->pport.im_port->shost, 0, &vid);
987 if (!fc_vport) 1135 if (!fc_vport) {
1136 wwn2str(pwwn_buf, vid.port_name);
988 printk(KERN_WARNING "bfad%d: failed to create pbc vport" 1137 printk(KERN_WARNING "bfad%d: failed to create pbc vport"
989 " %llx\n", bfad->inst_no, vid.port_name); 1138 " %s\n", bfad->inst_no, pwwn_buf);
990 list_del(&pcfg->list_entry); 1139 }
991 kfree(pcfg); 1140 list_del(&vport->list_entry);
992 1141 kfree(vport);
993 } 1142 }
994 1143
995 /* 1144 /*
@@ -998,24 +1147,15 @@ bfad_start_ops(struct bfad_s *bfad)
998 * passed in module param value as the bfa_linkup_delay. 1147 * passed in module param value as the bfa_linkup_delay.
999 */ 1148 */
1000 if (bfa_linkup_delay < 0) { 1149 if (bfa_linkup_delay < 0) {
1001
1002 bfa_linkup_delay = bfad_os_get_linkup_delay(bfad); 1150 bfa_linkup_delay = bfad_os_get_linkup_delay(bfad);
1003 bfad_os_rport_online_wait(bfad); 1151 bfad_os_rport_online_wait(bfad);
1004 bfa_linkup_delay = -1; 1152 bfa_linkup_delay = -1;
1005 1153 } else
1006 } else {
1007 bfad_os_rport_online_wait(bfad); 1154 bfad_os_rport_online_wait(bfad);
1008 }
1009 1155
1010 bfa_log(bfad->logmod, BFA_LOG_LINUX_DEVICE_CLAIMED, bfad->pci_name); 1156 BFA_LOG(KERN_INFO, bfad, log_level, "bfa device claimed\n");
1011 1157
1012 return BFA_STATUS_OK; 1158 return BFA_STATUS_OK;
1013
1014out_fc4_probe_failure:
1015 bfad_fc4_probe_undo(bfad);
1016 bfad_uncfg_pport(bfad);
1017out_cfg_pport_failure:
1018 return BFA_STATUS_FAILED;
1019} 1159}
1020 1160
1021int 1161int
@@ -1028,18 +1168,8 @@ bfad_worker(void *ptr)
1028 1168
1029 while (!kthread_should_stop()) { 1169 while (!kthread_should_stop()) {
1030 1170
1031 /* Check if the FCS init is done from bfad_drv_init; 1171 /* Send event BFAD_E_INIT_SUCCESS */
1032 * if not done do FCS init and set the flag. 1172 bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
1033 */
1034 if (!(bfad->bfad_flags & BFAD_FCS_INIT_DONE)) {
1035 spin_lock_irqsave(&bfad->bfad_lock, flags);
1036 bfa_fcs_init(&bfad->bfa_fcs);
1037 bfad->bfad_flags |= BFAD_FCS_INIT_DONE;
1038 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1039 }
1040
1041 /* Start the bfad operations after HAL init done */
1042 bfad_start_ops(bfad);
1043 1173
1044 spin_lock_irqsave(&bfad->bfad_lock, flags); 1174 spin_lock_irqsave(&bfad->bfad_lock, flags);
1045 bfad->bfad_tsk = NULL; 1175 bfad->bfad_tsk = NULL;
@@ -1051,9 +1181,198 @@ bfad_worker(void *ptr)
1051 return 0; 1181 return 0;
1052} 1182}
1053 1183
1054 /* 1184/**
1055 * PCI_entry PCI driver entries * { 1185 * BFA driver interrupt functions
1056 */ 1186 */
1187irqreturn_t
1188bfad_intx(int irq, void *dev_id)
1189{
1190 struct bfad_s *bfad = dev_id;
1191 struct list_head doneq;
1192 unsigned long flags;
1193 bfa_boolean_t rc;
1194
1195 spin_lock_irqsave(&bfad->bfad_lock, flags);
1196 rc = bfa_intx(&bfad->bfa);
1197 if (!rc) {
1198 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1199 return IRQ_NONE;
1200 }
1201
1202 bfa_comp_deq(&bfad->bfa, &doneq);
1203 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1204
1205 if (!list_empty(&doneq)) {
1206 bfa_comp_process(&bfad->bfa, &doneq);
1207
1208 spin_lock_irqsave(&bfad->bfad_lock, flags);
1209 bfa_comp_free(&bfad->bfa, &doneq);
1210 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1211 bfa_trc_fp(bfad, irq);
1212 }
1213
1214 return IRQ_HANDLED;
1215
1216}
1217
1218static irqreturn_t
1219bfad_msix(int irq, void *dev_id)
1220{
1221 struct bfad_msix_s *vec = dev_id;
1222 struct bfad_s *bfad = vec->bfad;
1223 struct list_head doneq;
1224 unsigned long flags;
1225
1226 spin_lock_irqsave(&bfad->bfad_lock, flags);
1227
1228 bfa_msix(&bfad->bfa, vec->msix.entry);
1229 bfa_comp_deq(&bfad->bfa, &doneq);
1230 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1231
1232 if (!list_empty(&doneq)) {
1233 bfa_comp_process(&bfad->bfa, &doneq);
1234
1235 spin_lock_irqsave(&bfad->bfad_lock, flags);
1236 bfa_comp_free(&bfad->bfa, &doneq);
1237 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1238 }
1239
1240 return IRQ_HANDLED;
1241}
1242
1243/**
1244 * Initialize the MSIX entry table.
1245 */
1246static void
1247bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries,
1248 int mask, int max_bit)
1249{
1250 int i;
1251 int match = 0x00000001;
1252
1253 for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) {
1254 if (mask & match) {
1255 bfad->msix_tab[bfad->nvec].msix.entry = i;
1256 bfad->msix_tab[bfad->nvec].bfad = bfad;
1257 msix_entries[bfad->nvec].entry = i;
1258 bfad->nvec++;
1259 }
1260
1261 match <<= 1;
1262 }
1263
1264}
1265
1266int
1267bfad_install_msix_handler(struct bfad_s *bfad)
1268{
1269 int i, error = 0;
1270
1271 for (i = 0; i < bfad->nvec; i++) {
1272 sprintf(bfad->msix_tab[i].name, "bfa-%s-%s",
1273 bfad->pci_name,
1274 ((bfa_asic_id_ct(bfad->hal_pcidev.device_id)) ?
1275 msix_name_ct[i] : msix_name_cb[i]));
1276
1277 error = request_irq(bfad->msix_tab[i].msix.vector,
1278 (irq_handler_t) bfad_msix, 0,
1279 bfad->msix_tab[i].name, &bfad->msix_tab[i]);
1280 bfa_trc(bfad, i);
1281 bfa_trc(bfad, bfad->msix_tab[i].msix.vector);
1282 if (error) {
1283 int j;
1284
1285 for (j = 0; j < i; j++)
1286 free_irq(bfad->msix_tab[j].msix.vector,
1287 &bfad->msix_tab[j]);
1288
1289 return 1;
1290 }
1291 }
1292
1293 return 0;
1294}
1295
1296/**
1297 * Setup MSIX based interrupt.
1298 */
1299int
1300bfad_setup_intr(struct bfad_s *bfad)
1301{
1302 int error = 0;
1303 u32 mask = 0, i, num_bit = 0, max_bit = 0;
1304 struct msix_entry msix_entries[MAX_MSIX_ENTRY];
1305 struct pci_dev *pdev = bfad->pcidev;
1306
1307 /* Call BFA to get the msix map for this PCI function. */
1308 bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit);
1309
1310 /* Set up the msix entry table */
1311 bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
1312
1313 if ((bfa_asic_id_ct(pdev->device) && !msix_disable_ct) ||
1314 (!bfa_asic_id_ct(pdev->device) && !msix_disable_cb)) {
1315
1316 error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
1317 if (error) {
1318 /*
1319 * Only error number of vector is available.
1320 * We don't have a mechanism to map multiple
1321 * interrupts into one vector, so even if we
1322 * can try to request less vectors, we don't
1323 * know how to associate interrupt events to
1324 * vectors. Linux doesn't dupicate vectors
1325 * in the MSIX table for this case.
1326 */
1327
1328 printk(KERN_WARNING "bfad%d: "
1329 "pci_enable_msix failed (%d),"
1330 " use line based.\n", bfad->inst_no, error);
1331
1332 goto line_based;
1333 }
1334
1335 /* Save the vectors */
1336 for (i = 0; i < bfad->nvec; i++) {
1337 bfa_trc(bfad, msix_entries[i].vector);
1338 bfad->msix_tab[i].msix.vector = msix_entries[i].vector;
1339 }
1340
1341 bfa_msix_init(&bfad->bfa, bfad->nvec);
1342
1343 bfad->bfad_flags |= BFAD_MSIX_ON;
1344
1345 return error;
1346 }
1347
1348line_based:
1349 error = 0;
1350 if (request_irq
1351 (bfad->pcidev->irq, (irq_handler_t) bfad_intx, BFAD_IRQ_FLAGS,
1352 BFAD_DRIVER_NAME, bfad) != 0) {
1353 /* Enable interrupt handler failed */
1354 return 1;
1355 }
1356
1357 return error;
1358}
1359
1360void
1361bfad_remove_intr(struct bfad_s *bfad)
1362{
1363 int i;
1364
1365 if (bfad->bfad_flags & BFAD_MSIX_ON) {
1366 for (i = 0; i < bfad->nvec; i++)
1367 free_irq(bfad->msix_tab[i].msix.vector,
1368 &bfad->msix_tab[i]);
1369
1370 pci_disable_msix(bfad->pcidev);
1371 bfad->bfad_flags &= ~BFAD_MSIX_ON;
1372 } else {
1373 free_irq(bfad->pcidev->irq, bfad);
1374 }
1375}
1057 1376
1058/** 1377/**
1059 * PCI probe entry. 1378 * PCI probe entry.
@@ -1061,18 +1380,14 @@ bfad_worker(void *ptr)
1061int 1380int
1062bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) 1381bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1063{ 1382{
1064 struct bfad_s *bfad; 1383 struct bfad_s *bfad;
1065 int error = -ENODEV, retval; 1384 int error = -ENODEV, retval;
1066 1385
1067 /* 1386 /* For single port cards - only claim function 0 */
1068 * For single port cards - only claim function 0 1387 if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) &&
1069 */ 1388 (PCI_FUNC(pdev->devfn) != 0))
1070 if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P)
1071 && (PCI_FUNC(pdev->devfn) != 0))
1072 return -ENODEV; 1389 return -ENODEV;
1073 1390
1074 BFA_TRACE(BFA_INFO, "bfad_pci_probe entry");
1075
1076 bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL); 1391 bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL);
1077 if (!bfad) { 1392 if (!bfad) {
1078 error = -ENOMEM; 1393 error = -ENOMEM;
@@ -1086,21 +1401,11 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1086 goto out_alloc_trace_failure; 1401 goto out_alloc_trace_failure;
1087 } 1402 }
1088 1403
1089 /* 1404 /* TRACE INIT */
1090 * LOG/TRACE INIT
1091 */
1092 bfa_trc_init(bfad->trcmod); 1405 bfa_trc_init(bfad->trcmod);
1093 bfa_trc(bfad, bfad_inst); 1406 bfa_trc(bfad, bfad_inst);
1094 1407
1095 bfad->logmod = &bfad->log_data;
1096 bfa_log_init(bfad->logmod, (char *)pci_name(pdev), bfa_os_printf);
1097
1098 bfad_drv_log_level_set(bfad);
1099
1100 bfad->aen = &bfad->aen_buf;
1101
1102 if (!(bfad_load_fwimg(pdev))) { 1408 if (!(bfad_load_fwimg(pdev))) {
1103 printk(KERN_WARNING "bfad_load_fwimg failure!\n");
1104 kfree(bfad->trcmod); 1409 kfree(bfad->trcmod);
1105 goto out_alloc_trace_failure; 1410 goto out_alloc_trace_failure;
1106 } 1411 }
@@ -1117,46 +1422,31 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1117 list_add_tail(&bfad->list_entry, &bfad_list); 1422 list_add_tail(&bfad->list_entry, &bfad_list);
1118 mutex_unlock(&bfad_mutex); 1423 mutex_unlock(&bfad_mutex);
1119 1424
1425 /* Initializing the state machine: State set to uninit */
1426 bfa_sm_set_state(bfad, bfad_sm_uninit);
1427
1120 spin_lock_init(&bfad->bfad_lock); 1428 spin_lock_init(&bfad->bfad_lock);
1121 pci_set_drvdata(pdev, bfad); 1429 pci_set_drvdata(pdev, bfad);
1122 1430
1123 bfad->ref_count = 0; 1431 bfad->ref_count = 0;
1124 bfad->pport.bfad = bfad; 1432 bfad->pport.bfad = bfad;
1125 INIT_LIST_HEAD(&bfad->pbc_pcfg_list); 1433 INIT_LIST_HEAD(&bfad->pbc_vport_list);
1126
1127 bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad, "%s",
1128 "bfad_worker");
1129 if (IS_ERR(bfad->bfad_tsk)) {
1130 printk(KERN_INFO "bfad[%d]: Kernel thread"
1131 " creation failed!\n",
1132 bfad->inst_no);
1133 goto out_kthread_create_failure;
1134 }
1135 1434
1136 retval = bfad_drv_init(bfad); 1435 retval = bfad_drv_init(bfad);
1137 if (retval != BFA_STATUS_OK) 1436 if (retval != BFA_STATUS_OK)
1138 goto out_drv_init_failure; 1437 goto out_drv_init_failure;
1139 if (!(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
1140 bfad->bfad_flags |= BFAD_HAL_INIT_FAIL;
1141 printk(KERN_WARNING "bfad%d: hal init failed\n", bfad->inst_no);
1142 goto ok;
1143 }
1144 1438
1145 retval = bfad_start_ops(bfad); 1439 bfa_sm_send_event(bfad, BFAD_E_CREATE);
1146 if (retval != BFA_STATUS_OK)
1147 goto out_start_ops_failure;
1148 1440
1149 kthread_stop(bfad->bfad_tsk); 1441 if (bfa_sm_cmp_state(bfad, bfad_sm_uninit))
1150 bfad->bfad_tsk = NULL; 1442 goto out_bfad_sm_failure;
1151 1443
1152ok:
1153 return 0; 1444 return 0;
1154 1445
1155out_start_ops_failure: 1446out_bfad_sm_failure:
1156 bfad_drv_uninit(bfad); 1447 bfa_detach(&bfad->bfa);
1448 bfad_hal_mem_release(bfad);
1157out_drv_init_failure: 1449out_drv_init_failure:
1158 kthread_stop(bfad->bfad_tsk);
1159out_kthread_create_failure:
1160 mutex_lock(&bfad_mutex); 1450 mutex_lock(&bfad_mutex);
1161 bfad_inst--; 1451 bfad_inst--;
1162 list_del(&bfad->list_entry); 1452 list_del(&bfad->list_entry);
@@ -1176,62 +1466,29 @@ out:
1176void 1466void
1177bfad_pci_remove(struct pci_dev *pdev) 1467bfad_pci_remove(struct pci_dev *pdev)
1178{ 1468{
1179 struct bfad_s *bfad = pci_get_drvdata(pdev); 1469 struct bfad_s *bfad = pci_get_drvdata(pdev);
1180 unsigned long flags; 1470 unsigned long flags;
1181 1471
1182 bfa_trc(bfad, bfad->inst_no); 1472 bfa_trc(bfad, bfad->inst_no);
1183 1473
1184 spin_lock_irqsave(&bfad->bfad_lock, flags); 1474 spin_lock_irqsave(&bfad->bfad_lock, flags);
1185 if (bfad->bfad_tsk != NULL) 1475 if (bfad->bfad_tsk != NULL) {
1186 kthread_stop(bfad->bfad_tsk);
1187 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1188
1189 if ((bfad->bfad_flags & BFAD_DRV_INIT_DONE)
1190 && !(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
1191
1192 spin_lock_irqsave(&bfad->bfad_lock, flags);
1193 init_completion(&bfad->comp);
1194 bfa_stop(&bfad->bfa);
1195 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1476 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1196 wait_for_completion(&bfad->comp); 1477 kthread_stop(bfad->bfad_tsk);
1197 1478 } else {
1198 bfad_remove_intr(bfad);
1199 del_timer_sync(&bfad->hal_tmo);
1200 goto hal_detach;
1201 } else if (!(bfad->bfad_flags & BFAD_DRV_INIT_DONE)) {
1202 goto remove_sysfs;
1203 }
1204
1205 if (bfad->bfad_flags & BFAD_HAL_START_DONE) {
1206 bfad_drv_stop(bfad);
1207 } else if (bfad->bfad_flags & BFAD_DRV_INIT_DONE) {
1208 /* Invoking bfa_stop() before bfa_detach
1209 * when HAL and DRV init are success
1210 * but HAL start did not occur.
1211 */
1212 spin_lock_irqsave(&bfad->bfad_lock, flags);
1213 init_completion(&bfad->comp);
1214 bfa_stop(&bfad->bfa);
1215 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1479 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1216 wait_for_completion(&bfad->comp);
1217 } 1480 }
1218 1481
1219 bfad_remove_intr(bfad); 1482 /* Send Event BFAD_E_STOP */
1220 del_timer_sync(&bfad->hal_tmo); 1483 bfa_sm_send_event(bfad, BFAD_E_STOP);
1221 1484
1222 if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE) 1485 /* Driver detach and dealloc mem */
1223 bfad_fc4_probe_undo(bfad);
1224
1225 if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE)
1226 bfad_uncfg_pport(bfad);
1227
1228hal_detach:
1229 spin_lock_irqsave(&bfad->bfad_lock, flags); 1486 spin_lock_irqsave(&bfad->bfad_lock, flags);
1230 bfa_detach(&bfad->bfa); 1487 bfa_detach(&bfad->bfa);
1231 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1488 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1232 bfad_hal_mem_release(bfad); 1489 bfad_hal_mem_release(bfad);
1233remove_sysfs:
1234 1490
1491 /* Cleaning the BFAD instance */
1235 mutex_lock(&bfad_mutex); 1492 mutex_lock(&bfad_mutex);
1236 bfad_inst--; 1493 bfad_inst--;
1237 list_del(&bfad->list_entry); 1494 list_del(&bfad->list_entry);
@@ -1242,35 +1499,34 @@ remove_sysfs:
1242 kfree(bfad); 1499 kfree(bfad);
1243} 1500}
1244 1501
1245 1502struct pci_device_id bfad_id_table[] = {
1246static struct pci_device_id bfad_id_table[] = {
1247 { 1503 {
1248 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1504 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1249 .device = BFA_PCI_DEVICE_ID_FC_8G2P, 1505 .device = BFA_PCI_DEVICE_ID_FC_8G2P,
1250 .subvendor = PCI_ANY_ID, 1506 .subvendor = PCI_ANY_ID,
1251 .subdevice = PCI_ANY_ID, 1507 .subdevice = PCI_ANY_ID,
1252 }, 1508 },
1253 { 1509 {
1254 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1510 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1255 .device = BFA_PCI_DEVICE_ID_FC_8G1P, 1511 .device = BFA_PCI_DEVICE_ID_FC_8G1P,
1256 .subvendor = PCI_ANY_ID, 1512 .subvendor = PCI_ANY_ID,
1257 .subdevice = PCI_ANY_ID, 1513 .subdevice = PCI_ANY_ID,
1258 }, 1514 },
1259 { 1515 {
1260 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1516 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1261 .device = BFA_PCI_DEVICE_ID_CT, 1517 .device = BFA_PCI_DEVICE_ID_CT,
1262 .subvendor = PCI_ANY_ID, 1518 .subvendor = PCI_ANY_ID,
1263 .subdevice = PCI_ANY_ID, 1519 .subdevice = PCI_ANY_ID,
1264 .class = (PCI_CLASS_SERIAL_FIBER << 8), 1520 .class = (PCI_CLASS_SERIAL_FIBER << 8),
1265 .class_mask = ~0, 1521 .class_mask = ~0,
1266 }, 1522 },
1267 { 1523 {
1268 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1524 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1269 .device = BFA_PCI_DEVICE_ID_CT_FC, 1525 .device = BFA_PCI_DEVICE_ID_CT_FC,
1270 .subvendor = PCI_ANY_ID, 1526 .subvendor = PCI_ANY_ID,
1271 .subdevice = PCI_ANY_ID, 1527 .subdevice = PCI_ANY_ID,
1272 .class = (PCI_CLASS_SERIAL_FIBER << 8), 1528 .class = (PCI_CLASS_SERIAL_FIBER << 8),
1273 .class_mask = ~0, 1529 .class_mask = ~0,
1274 }, 1530 },
1275 1531
1276 {0, 0}, 1532 {0, 0},
@@ -1286,89 +1542,104 @@ static struct pci_driver bfad_pci_driver = {
1286}; 1542};
1287 1543
1288/** 1544/**
1289 * Linux driver module functions
1290 */
1291bfa_status_t
1292bfad_fc4_module_init(void)
1293{
1294 int rc;
1295
1296 rc = bfad_im_module_init();
1297 if (rc != BFA_STATUS_OK)
1298 goto ext;
1299
1300 bfad_tm_module_init();
1301 if (ipfc_enable)
1302 bfad_ipfc_module_init();
1303ext:
1304 return rc;
1305}
1306
1307void
1308bfad_fc4_module_exit(void)
1309{
1310 if (ipfc_enable)
1311 bfad_ipfc_module_exit();
1312 bfad_tm_module_exit();
1313 bfad_im_module_exit();
1314}
1315
1316/**
1317 * Driver module init. 1545 * Driver module init.
1318 */ 1546 */
1319static int __init 1547static int __init
1320bfad_init(void) 1548bfad_init(void)
1321{ 1549{
1322 int error = 0; 1550 int error = 0;
1323 1551
1324 printk(KERN_INFO "Brocade BFA FC/FCOE SCSI driver - version: %s\n", 1552 printk(KERN_INFO "Brocade BFA FC/FCOE SCSI driver - version: %s\n",
1325 BFAD_DRIVER_VERSION); 1553 BFAD_DRIVER_VERSION);
1326 1554
1327 if (num_sgpgs > 0) 1555 if (num_sgpgs > 0)
1328 num_sgpgs_parm = num_sgpgs; 1556 num_sgpgs_parm = num_sgpgs;
1329 1557
1330 error = bfad_fc4_module_init(); 1558 error = bfad_im_module_init();
1331 if (error) { 1559 if (error) {
1332 error = -ENOMEM; 1560 error = -ENOMEM;
1333 printk(KERN_WARNING "bfad_fc4_module_init failure\n"); 1561 printk(KERN_WARNING "bfad_im_module_init failure\n");
1334 goto ext; 1562 goto ext;
1335 } 1563 }
1336 1564
1337 if (!strcmp(FCPI_NAME, " fcpim")) 1565 if (strcmp(FCPI_NAME, " fcpim") == 0)
1338 bfad_supported_fc4s |= BFA_PORT_ROLE_FCP_IM; 1566 supported_fc4s |= BFA_LPORT_ROLE_FCP_IM;
1339 if (!strcmp(FCPT_NAME, " fcptm"))
1340 bfad_supported_fc4s |= BFA_PORT_ROLE_FCP_TM;
1341 if (!strcmp(IPFC_NAME, " ipfc"))
1342 bfad_supported_fc4s |= BFA_PORT_ROLE_FCP_IPFC;
1343 1567
1344 bfa_ioc_auto_recover(ioc_auto_recover); 1568 bfa_ioc_auto_recover(ioc_auto_recover);
1345 bfa_fcs_rport_set_del_timeout(rport_del_timeout); 1569 bfa_fcs_rport_set_del_timeout(rport_del_timeout);
1346 error = pci_register_driver(&bfad_pci_driver);
1347 1570
1571 error = pci_register_driver(&bfad_pci_driver);
1348 if (error) { 1572 if (error) {
1349 printk(KERN_WARNING "bfad pci_register_driver failure\n"); 1573 printk(KERN_WARNING "pci_register_driver failure\n");
1350 goto ext; 1574 goto ext;
1351 } 1575 }
1352 1576
1353 return 0; 1577 return 0;
1354 1578
1355ext: 1579ext:
1356 bfad_fc4_module_exit(); 1580 bfad_im_module_exit();
1357 return error; 1581 return error;
1358} 1582}
1359 1583
1360/** 1584/**
1361 * Driver module exit. 1585 * Driver module exit.
1362 */ 1586 */
1363static void __exit 1587static void __exit
1364bfad_exit(void) 1588bfad_exit(void)
1365{ 1589{
1366 pci_unregister_driver(&bfad_pci_driver); 1590 pci_unregister_driver(&bfad_pci_driver);
1367 bfad_fc4_module_exit(); 1591 bfad_im_module_exit();
1368 bfad_free_fwimg(); 1592 bfad_free_fwimg();
1369} 1593}
1370 1594
1371#define BFAD_PROTO_NAME FCPI_NAME FCPT_NAME IPFC_NAME 1595/* Firmware handling */
1596u32 *
1597bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
1598 u32 *bfi_image_size, char *fw_name)
1599{
1600 const struct firmware *fw;
1601
1602 if (request_firmware(&fw, fw_name, &pdev->dev)) {
1603 printk(KERN_ALERT "Can't locate firmware %s\n", fw_name);
1604 goto error;
1605 }
1606
1607 *bfi_image = vmalloc(fw->size);
1608 if (NULL == *bfi_image) {
1609 printk(KERN_ALERT "Fail to allocate buffer for fw image "
1610 "size=%x!\n", (u32) fw->size);
1611 goto error;
1612 }
1613
1614 memcpy(*bfi_image, fw->data, fw->size);
1615 *bfi_image_size = fw->size/sizeof(u32);
1616
1617 return *bfi_image;
1618
1619error:
1620 return NULL;
1621}
1622
1623u32 *
1624bfad_get_firmware_buf(struct pci_dev *pdev)
1625{
1626 if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) {
1627 if (bfi_image_ct_fc_size == 0)
1628 bfad_read_firmware(pdev, &bfi_image_ct_fc,
1629 &bfi_image_ct_fc_size, BFAD_FW_FILE_CT_FC);
1630 return bfi_image_ct_fc;
1631 } else if (pdev->device == BFA_PCI_DEVICE_ID_CT) {
1632 if (bfi_image_ct_cna_size == 0)
1633 bfad_read_firmware(pdev, &bfi_image_ct_cna,
1634 &bfi_image_ct_cna_size, BFAD_FW_FILE_CT_CNA);
1635 return bfi_image_ct_cna;
1636 } else {
1637 if (bfi_image_cb_fc_size == 0)
1638 bfad_read_firmware(pdev, &bfi_image_cb_fc,
1639 &bfi_image_cb_fc_size, BFAD_FW_FILE_CB_FC);
1640 return bfi_image_cb_fc;
1641 }
1642}
1372 1643
1373module_init(bfad_init); 1644module_init(bfad_init);
1374module_exit(bfad_exit); 1645module_exit(bfad_exit);
@@ -1376,5 +1647,3 @@ MODULE_LICENSE("GPL");
1376MODULE_DESCRIPTION("Brocade Fibre Channel HBA Driver" BFAD_PROTO_NAME); 1647MODULE_DESCRIPTION("Brocade Fibre Channel HBA Driver" BFAD_PROTO_NAME);
1377MODULE_AUTHOR("Brocade Communications Systems, Inc."); 1648MODULE_AUTHOR("Brocade Communications Systems, Inc.");
1378MODULE_VERSION(BFAD_DRIVER_VERSION); 1649MODULE_VERSION(BFAD_DRIVER_VERSION);
1379
1380
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 0818eb07ef88..d8843720eac1 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -19,15 +19,8 @@
19 * bfa_attr.c Linux driver configuration interface module. 19 * bfa_attr.c Linux driver configuration interface module.
20 */ 20 */
21 21
22#include <linux/slab.h>
23#include "bfad_drv.h" 22#include "bfad_drv.h"
24#include "bfad_im.h" 23#include "bfad_im.h"
25#include "bfad_trcmod.h"
26#include "bfad_attr.h"
27
28/**
29 * FC_transport_template FC transport template
30 */
31 24
32/** 25/**
33 * FC transport template entry, get SCSI target port ID. 26 * FC transport template entry, get SCSI target port ID.
@@ -42,7 +35,7 @@ bfad_im_get_starget_port_id(struct scsi_target *starget)
42 u32 fc_id = -1; 35 u32 fc_id = -1;
43 unsigned long flags; 36 unsigned long flags;
44 37
45 shost = bfad_os_starget_to_shost(starget); 38 shost = dev_to_shost(starget->dev.parent);
46 im_port = (struct bfad_im_port_s *) shost->hostdata[0]; 39 im_port = (struct bfad_im_port_s *) shost->hostdata[0];
47 bfad = im_port->bfad; 40 bfad = im_port->bfad;
48 spin_lock_irqsave(&bfad->bfad_lock, flags); 41 spin_lock_irqsave(&bfad->bfad_lock, flags);
@@ -68,7 +61,7 @@ bfad_im_get_starget_node_name(struct scsi_target *starget)
68 u64 node_name = 0; 61 u64 node_name = 0;
69 unsigned long flags; 62 unsigned long flags;
70 63
71 shost = bfad_os_starget_to_shost(starget); 64 shost = dev_to_shost(starget->dev.parent);
72 im_port = (struct bfad_im_port_s *) shost->hostdata[0]; 65 im_port = (struct bfad_im_port_s *) shost->hostdata[0];
73 bfad = im_port->bfad; 66 bfad = im_port->bfad;
74 spin_lock_irqsave(&bfad->bfad_lock, flags); 67 spin_lock_irqsave(&bfad->bfad_lock, flags);
@@ -94,7 +87,7 @@ bfad_im_get_starget_port_name(struct scsi_target *starget)
94 u64 port_name = 0; 87 u64 port_name = 0;
95 unsigned long flags; 88 unsigned long flags;
96 89
97 shost = bfad_os_starget_to_shost(starget); 90 shost = dev_to_shost(starget->dev.parent);
98 im_port = (struct bfad_im_port_s *) shost->hostdata[0]; 91 im_port = (struct bfad_im_port_s *) shost->hostdata[0];
99 bfad = im_port->bfad; 92 bfad = im_port->bfad;
100 spin_lock_irqsave(&bfad->bfad_lock, flags); 93 spin_lock_irqsave(&bfad->bfad_lock, flags);
@@ -118,17 +111,7 @@ bfad_im_get_host_port_id(struct Scsi_Host *shost)
118 struct bfad_port_s *port = im_port->port; 111 struct bfad_port_s *port = im_port->port;
119 112
120 fc_host_port_id(shost) = 113 fc_host_port_id(shost) =
121 bfa_os_hton3b(bfa_fcs_port_get_fcid(port->fcs_port)); 114 bfa_os_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port));
122}
123
124
125
126
127
128struct Scsi_Host *
129bfad_os_starget_to_shost(struct scsi_target *starget)
130{
131 return dev_to_shost(starget->dev.parent);
132} 115}
133 116
134/** 117/**
@@ -140,21 +123,21 @@ bfad_im_get_host_port_type(struct Scsi_Host *shost)
140 struct bfad_im_port_s *im_port = 123 struct bfad_im_port_s *im_port =
141 (struct bfad_im_port_s *) shost->hostdata[0]; 124 (struct bfad_im_port_s *) shost->hostdata[0];
142 struct bfad_s *bfad = im_port->bfad; 125 struct bfad_s *bfad = im_port->bfad;
143 struct bfa_pport_attr_s attr; 126 struct bfa_lport_attr_s port_attr;
144 127
145 bfa_fcport_get_attr(&bfad->bfa, &attr); 128 bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
146 129
147 switch (attr.port_type) { 130 switch (port_attr.port_type) {
148 case BFA_PPORT_TYPE_NPORT: 131 case BFA_PORT_TYPE_NPORT:
149 fc_host_port_type(shost) = FC_PORTTYPE_NPORT; 132 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
150 break; 133 break;
151 case BFA_PPORT_TYPE_NLPORT: 134 case BFA_PORT_TYPE_NLPORT:
152 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; 135 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
153 break; 136 break;
154 case BFA_PPORT_TYPE_P2P: 137 case BFA_PORT_TYPE_P2P:
155 fc_host_port_type(shost) = FC_PORTTYPE_PTP; 138 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
156 break; 139 break;
157 case BFA_PPORT_TYPE_LPORT: 140 case BFA_PORT_TYPE_LPORT:
158 fc_host_port_type(shost) = FC_PORTTYPE_LPORT; 141 fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
159 break; 142 break;
160 default: 143 default:
@@ -172,25 +155,28 @@ bfad_im_get_host_port_state(struct Scsi_Host *shost)
172 struct bfad_im_port_s *im_port = 155 struct bfad_im_port_s *im_port =
173 (struct bfad_im_port_s *) shost->hostdata[0]; 156 (struct bfad_im_port_s *) shost->hostdata[0];
174 struct bfad_s *bfad = im_port->bfad; 157 struct bfad_s *bfad = im_port->bfad;
175 struct bfa_pport_attr_s attr; 158 struct bfa_port_attr_s attr;
176 159
177 bfa_fcport_get_attr(&bfad->bfa, &attr); 160 bfa_fcport_get_attr(&bfad->bfa, &attr);
178 161
179 switch (attr.port_state) { 162 switch (attr.port_state) {
180 case BFA_PPORT_ST_LINKDOWN: 163 case BFA_PORT_ST_LINKDOWN:
181 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; 164 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
182 break; 165 break;
183 case BFA_PPORT_ST_LINKUP: 166 case BFA_PORT_ST_LINKUP:
184 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; 167 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
185 break; 168 break;
186 case BFA_PPORT_ST_UNINIT: 169 case BFA_PORT_ST_DISABLED:
187 case BFA_PPORT_ST_ENABLING_QWAIT: 170 case BFA_PORT_ST_STOPPED:
188 case BFA_PPORT_ST_ENABLING: 171 case BFA_PORT_ST_IOCDOWN:
189 case BFA_PPORT_ST_DISABLING_QWAIT: 172 case BFA_PORT_ST_IOCDIS:
190 case BFA_PPORT_ST_DISABLING: 173 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
191 case BFA_PPORT_ST_DISABLED: 174 break;
192 case BFA_PPORT_ST_STOPPED: 175 case BFA_PORT_ST_UNINIT:
193 case BFA_PPORT_ST_IOCDOWN: 176 case BFA_PORT_ST_ENABLING_QWAIT:
177 case BFA_PORT_ST_ENABLING:
178 case BFA_PORT_ST_DISABLING_QWAIT:
179 case BFA_PORT_ST_DISABLING:
194 default: 180 default:
195 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; 181 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
196 break; 182 break;
@@ -210,13 +196,9 @@ bfad_im_get_host_active_fc4s(struct Scsi_Host *shost)
210 memset(fc_host_active_fc4s(shost), 0, 196 memset(fc_host_active_fc4s(shost), 0,
211 sizeof(fc_host_active_fc4s(shost))); 197 sizeof(fc_host_active_fc4s(shost)));
212 198
213 if (port->supported_fc4s & 199 if (port->supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
214 (BFA_PORT_ROLE_FCP_IM | BFA_PORT_ROLE_FCP_TM))
215 fc_host_active_fc4s(shost)[2] = 1; 200 fc_host_active_fc4s(shost)[2] = 1;
216 201
217 if (port->supported_fc4s & BFA_PORT_ROLE_FCP_IPFC)
218 fc_host_active_fc4s(shost)[3] = 0x20;
219
220 fc_host_active_fc4s(shost)[7] = 1; 202 fc_host_active_fc4s(shost)[7] = 1;
221} 203}
222 204
@@ -229,29 +211,29 @@ bfad_im_get_host_speed(struct Scsi_Host *shost)
229 struct bfad_im_port_s *im_port = 211 struct bfad_im_port_s *im_port =
230 (struct bfad_im_port_s *) shost->hostdata[0]; 212 (struct bfad_im_port_s *) shost->hostdata[0];
231 struct bfad_s *bfad = im_port->bfad; 213 struct bfad_s *bfad = im_port->bfad;
232 struct bfa_pport_attr_s attr; 214 struct bfa_port_attr_s attr;
233 unsigned long flags;
234 215
235 spin_lock_irqsave(shost->host_lock, flags);
236 bfa_fcport_get_attr(&bfad->bfa, &attr); 216 bfa_fcport_get_attr(&bfad->bfa, &attr);
237 switch (attr.speed) { 217 switch (attr.speed) {
238 case BFA_PPORT_SPEED_8GBPS: 218 case BFA_PORT_SPEED_10GBPS:
219 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
220 break;
221 case BFA_PORT_SPEED_8GBPS:
239 fc_host_speed(shost) = FC_PORTSPEED_8GBIT; 222 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
240 break; 223 break;
241 case BFA_PPORT_SPEED_4GBPS: 224 case BFA_PORT_SPEED_4GBPS:
242 fc_host_speed(shost) = FC_PORTSPEED_4GBIT; 225 fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
243 break; 226 break;
244 case BFA_PPORT_SPEED_2GBPS: 227 case BFA_PORT_SPEED_2GBPS:
245 fc_host_speed(shost) = FC_PORTSPEED_2GBIT; 228 fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
246 break; 229 break;
247 case BFA_PPORT_SPEED_1GBPS: 230 case BFA_PORT_SPEED_1GBPS:
248 fc_host_speed(shost) = FC_PORTSPEED_1GBIT; 231 fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
249 break; 232 break;
250 default: 233 default:
251 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 234 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
252 break; 235 break;
253 } 236 }
254 spin_unlock_irqrestore(shost->host_lock, flags);
255} 237}
256 238
257/** 239/**
@@ -265,7 +247,7 @@ bfad_im_get_host_fabric_name(struct Scsi_Host *shost)
265 struct bfad_port_s *port = im_port->port; 247 struct bfad_port_s *port = im_port->port;
266 wwn_t fabric_nwwn = 0; 248 wwn_t fabric_nwwn = 0;
267 249
268 fabric_nwwn = bfa_fcs_port_get_fabric_name(port->fcs_port); 250 fabric_nwwn = bfa_fcs_lport_get_fabric_name(port->fcs_port);
269 251
270 fc_host_fabric_name(shost) = bfa_os_htonll(fabric_nwwn); 252 fc_host_fabric_name(shost) = bfa_os_htonll(fabric_nwwn);
271 253
@@ -281,23 +263,44 @@ bfad_im_get_stats(struct Scsi_Host *shost)
281 (struct bfad_im_port_s *) shost->hostdata[0]; 263 (struct bfad_im_port_s *) shost->hostdata[0];
282 struct bfad_s *bfad = im_port->bfad; 264 struct bfad_s *bfad = im_port->bfad;
283 struct bfad_hal_comp fcomp; 265 struct bfad_hal_comp fcomp;
266 union bfa_port_stats_u *fcstats;
284 struct fc_host_statistics *hstats; 267 struct fc_host_statistics *hstats;
285 bfa_status_t rc; 268 bfa_status_t rc;
286 unsigned long flags; 269 unsigned long flags;
287 270
271 fcstats = kzalloc(sizeof(union bfa_port_stats_u), GFP_KERNEL);
272 if (fcstats == NULL)
273 return NULL;
274
288 hstats = &bfad->link_stats; 275 hstats = &bfad->link_stats;
289 init_completion(&fcomp.comp); 276 init_completion(&fcomp.comp);
290 spin_lock_irqsave(&bfad->bfad_lock, flags); 277 spin_lock_irqsave(&bfad->bfad_lock, flags);
291 memset(hstats, 0, sizeof(struct fc_host_statistics)); 278 memset(hstats, 0, sizeof(struct fc_host_statistics));
292 rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa), 279 rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa),
293 (union bfa_pport_stats_u *) hstats, 280 fcstats, bfad_hcb_comp, &fcomp);
294 bfad_hcb_comp, &fcomp);
295 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 281 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
296 if (rc != BFA_STATUS_OK) 282 if (rc != BFA_STATUS_OK)
297 return NULL; 283 return NULL;
298 284
299 wait_for_completion(&fcomp.comp); 285 wait_for_completion(&fcomp.comp);
300 286
287 /* Fill the fc_host_statistics structure */
288 hstats->seconds_since_last_reset = fcstats->fc.secs_reset;
289 hstats->tx_frames = fcstats->fc.tx_frames;
290 hstats->tx_words = fcstats->fc.tx_words;
291 hstats->rx_frames = fcstats->fc.rx_frames;
292 hstats->rx_words = fcstats->fc.rx_words;
293 hstats->lip_count = fcstats->fc.lip_count;
294 hstats->nos_count = fcstats->fc.nos_count;
295 hstats->error_frames = fcstats->fc.error_frames;
296 hstats->dumped_frames = fcstats->fc.dropped_frames;
297 hstats->link_failure_count = fcstats->fc.link_failures;
298 hstats->loss_of_sync_count = fcstats->fc.loss_of_syncs;
299 hstats->loss_of_signal_count = fcstats->fc.loss_of_signals;
300 hstats->prim_seq_protocol_err_count = fcstats->fc.primseq_errs;
301 hstats->invalid_crc_count = fcstats->fc.invalid_crcs;
302
303 kfree(fcstats);
301 return hstats; 304 return hstats;
302} 305}
303 306
@@ -317,7 +320,7 @@ bfad_im_reset_stats(struct Scsi_Host *shost)
317 init_completion(&fcomp.comp); 320 init_completion(&fcomp.comp);
318 spin_lock_irqsave(&bfad->bfad_lock, flags); 321 spin_lock_irqsave(&bfad->bfad_lock, flags);
319 rc = bfa_port_clear_stats(BFA_FCPORT(&bfad->bfa), bfad_hcb_comp, 322 rc = bfa_port_clear_stats(BFA_FCPORT(&bfad->bfa), bfad_hcb_comp,
320 &fcomp); 323 &fcomp);
321 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 324 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
322 325
323 if (rc != BFA_STATUS_OK) 326 if (rc != BFA_STATUS_OK)
@@ -372,8 +375,8 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
372 struct bfad_im_port_s *im_port = 375 struct bfad_im_port_s *im_port =
373 (struct bfad_im_port_s *) shost->hostdata[0]; 376 (struct bfad_im_port_s *) shost->hostdata[0];
374 struct bfad_s *bfad = im_port->bfad; 377 struct bfad_s *bfad = im_port->bfad;
375 struct bfa_port_cfg_s port_cfg; 378 struct bfa_lport_cfg_s port_cfg;
376 struct bfad_pcfg_s *pcfg; 379 struct bfad_vport_s *vp;
377 int status = 0, rc; 380 int status = 0, rc;
378 unsigned long flags; 381 unsigned long flags;
379 382
@@ -382,12 +385,14 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
382 u64_to_wwn(fc_vport->port_name, (u8 *)&port_cfg.pwwn); 385 u64_to_wwn(fc_vport->port_name, (u8 *)&port_cfg.pwwn);
383 if (strlen(vname) > 0) 386 if (strlen(vname) > 0)
384 strcpy((char *)&port_cfg.sym_name, vname); 387 strcpy((char *)&port_cfg.sym_name, vname);
385 port_cfg.roles = BFA_PORT_ROLE_FCP_IM; 388 port_cfg.roles = BFA_LPORT_ROLE_FCP_IM;
386 389
387 spin_lock_irqsave(&bfad->bfad_lock, flags); 390 spin_lock_irqsave(&bfad->bfad_lock, flags);
388 list_for_each_entry(pcfg, &bfad->pbc_pcfg_list, list_entry) { 391 list_for_each_entry(vp, &bfad->pbc_vport_list, list_entry) {
389 if (port_cfg.pwwn == pcfg->port_cfg.pwwn) { 392 if (port_cfg.pwwn ==
390 port_cfg.preboot_vp = pcfg->port_cfg.preboot_vp; 393 vp->fcs_vport.lport.port_cfg.pwwn) {
394 port_cfg.preboot_vp =
395 vp->fcs_vport.lport.port_cfg.preboot_vp;
391 break; 396 break;
392 } 397 }
393 } 398 }
@@ -638,7 +643,7 @@ bfad_im_serial_num_show(struct device *dev, struct device_attribute *attr,
638 struct Scsi_Host *shost = class_to_shost(dev); 643 struct Scsi_Host *shost = class_to_shost(dev);
639 struct bfad_im_port_s *im_port = 644 struct bfad_im_port_s *im_port =
640 (struct bfad_im_port_s *) shost->hostdata[0]; 645 (struct bfad_im_port_s *) shost->hostdata[0];
641 struct bfad_s *bfad = im_port->bfad; 646 struct bfad_s *bfad = im_port->bfad;
642 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; 647 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
643 648
644 bfa_get_adapter_serial_num(&bfad->bfa, serial_num); 649 bfa_get_adapter_serial_num(&bfad->bfa, serial_num);
@@ -652,7 +657,7 @@ bfad_im_model_show(struct device *dev, struct device_attribute *attr,
652 struct Scsi_Host *shost = class_to_shost(dev); 657 struct Scsi_Host *shost = class_to_shost(dev);
653 struct bfad_im_port_s *im_port = 658 struct bfad_im_port_s *im_port =
654 (struct bfad_im_port_s *) shost->hostdata[0]; 659 (struct bfad_im_port_s *) shost->hostdata[0];
655 struct bfad_s *bfad = im_port->bfad; 660 struct bfad_s *bfad = im_port->bfad;
656 char model[BFA_ADAPTER_MODEL_NAME_LEN]; 661 char model[BFA_ADAPTER_MODEL_NAME_LEN];
657 662
658 bfa_get_adapter_model(&bfad->bfa, model); 663 bfa_get_adapter_model(&bfad->bfa, model);
@@ -666,10 +671,54 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
666 struct Scsi_Host *shost = class_to_shost(dev); 671 struct Scsi_Host *shost = class_to_shost(dev);
667 struct bfad_im_port_s *im_port = 672 struct bfad_im_port_s *im_port =
668 (struct bfad_im_port_s *) shost->hostdata[0]; 673 (struct bfad_im_port_s *) shost->hostdata[0];
669 struct bfad_s *bfad = im_port->bfad; 674 struct bfad_s *bfad = im_port->bfad;
675 char model[BFA_ADAPTER_MODEL_NAME_LEN];
670 char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN]; 676 char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
671 677
672 bfa_get_adapter_model(&bfad->bfa, model_descr); 678 bfa_get_adapter_model(&bfad->bfa, model);
679 if (!strcmp(model, "Brocade-425"))
680 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
681 "Brocade 4Gbps PCIe dual port FC HBA");
682 else if (!strcmp(model, "Brocade-825"))
683 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
684 "Brocade 8Gbps PCIe dual port FC HBA");
685 else if (!strcmp(model, "Brocade-42B"))
686 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
687 "HP 4Gbps PCIe dual port FC HBA");
688 else if (!strcmp(model, "Brocade-82B"))
689 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
690 "HP 8Gbps PCIe dual port FC HBA");
691 else if (!strcmp(model, "Brocade-1010"))
692 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
693 "Brocade 10Gbps single port CNA");
694 else if (!strcmp(model, "Brocade-1020"))
695 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
696 "Brocade 10Gbps dual port CNA");
697 else if (!strcmp(model, "Brocade-1007"))
698 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
699 "Brocade 10Gbps CNA");
700 else if (!strcmp(model, "Brocade-415"))
701 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
702 "Brocade 4Gbps PCIe single port FC HBA");
703 else if (!strcmp(model, "Brocade-815"))
704 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
705 "Brocade 8Gbps PCIe single port FC HBA");
706 else if (!strcmp(model, "Brocade-41B"))
707 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
708 "HP 4Gbps PCIe single port FC HBA");
709 else if (!strcmp(model, "Brocade-81B"))
710 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
711 "HP 8Gbps PCIe single port FC HBA");
712 else if (!strcmp(model, "Brocade-804"))
713 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
714 "HP Bladesystem C-class 8Gbps FC HBA");
715 else if (!strcmp(model, "Brocade-902"))
716 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
717 "Brocade 10Gbps CNA");
718 else
719 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
720 "Invalid Model");
721
673 return snprintf(buf, PAGE_SIZE, "%s\n", model_descr); 722 return snprintf(buf, PAGE_SIZE, "%s\n", model_descr);
674} 723}
675 724
@@ -683,7 +732,7 @@ bfad_im_node_name_show(struct device *dev, struct device_attribute *attr,
683 struct bfad_port_s *port = im_port->port; 732 struct bfad_port_s *port = im_port->port;
684 u64 nwwn; 733 u64 nwwn;
685 734
686 nwwn = bfa_fcs_port_get_nwwn(port->fcs_port); 735 nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port);
687 return snprintf(buf, PAGE_SIZE, "0x%llx\n", bfa_os_htonll(nwwn)); 736 return snprintf(buf, PAGE_SIZE, "0x%llx\n", bfa_os_htonll(nwwn));
688} 737}
689 738
@@ -694,14 +743,14 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr,
694 struct Scsi_Host *shost = class_to_shost(dev); 743 struct Scsi_Host *shost = class_to_shost(dev);
695 struct bfad_im_port_s *im_port = 744 struct bfad_im_port_s *im_port =
696 (struct bfad_im_port_s *) shost->hostdata[0]; 745 (struct bfad_im_port_s *) shost->hostdata[0];
697 struct bfad_s *bfad = im_port->bfad; 746 struct bfad_s *bfad = im_port->bfad;
698 char model[BFA_ADAPTER_MODEL_NAME_LEN]; 747 struct bfa_lport_attr_s port_attr;
699 char fw_ver[BFA_VERSION_LEN]; 748 char symname[BFA_SYMNAME_MAXLEN];
700 749
701 bfa_get_adapter_model(&bfad->bfa, model); 750 bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
702 bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver); 751 strncpy(symname, port_attr.port_cfg.sym_name.symname,
703 return snprintf(buf, PAGE_SIZE, "Brocade %s FV%s DV%s\n", 752 BFA_SYMNAME_MAXLEN);
704 model, fw_ver, BFAD_DRIVER_VERSION); 753 return snprintf(buf, PAGE_SIZE, "%s\n", symname);
705} 754}
706 755
707static ssize_t 756static ssize_t
@@ -711,7 +760,7 @@ bfad_im_hw_version_show(struct device *dev, struct device_attribute *attr,
711 struct Scsi_Host *shost = class_to_shost(dev); 760 struct Scsi_Host *shost = class_to_shost(dev);
712 struct bfad_im_port_s *im_port = 761 struct bfad_im_port_s *im_port =
713 (struct bfad_im_port_s *) shost->hostdata[0]; 762 (struct bfad_im_port_s *) shost->hostdata[0];
714 struct bfad_s *bfad = im_port->bfad; 763 struct bfad_s *bfad = im_port->bfad;
715 char hw_ver[BFA_VERSION_LEN]; 764 char hw_ver[BFA_VERSION_LEN];
716 765
717 bfa_get_pci_chip_rev(&bfad->bfa, hw_ver); 766 bfa_get_pci_chip_rev(&bfad->bfa, hw_ver);
@@ -732,7 +781,7 @@ bfad_im_optionrom_version_show(struct device *dev,
732 struct Scsi_Host *shost = class_to_shost(dev); 781 struct Scsi_Host *shost = class_to_shost(dev);
733 struct bfad_im_port_s *im_port = 782 struct bfad_im_port_s *im_port =
734 (struct bfad_im_port_s *) shost->hostdata[0]; 783 (struct bfad_im_port_s *) shost->hostdata[0];
735 struct bfad_s *bfad = im_port->bfad; 784 struct bfad_s *bfad = im_port->bfad;
736 char optrom_ver[BFA_VERSION_LEN]; 785 char optrom_ver[BFA_VERSION_LEN];
737 786
738 bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver); 787 bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver);
@@ -746,7 +795,7 @@ bfad_im_fw_version_show(struct device *dev, struct device_attribute *attr,
746 struct Scsi_Host *shost = class_to_shost(dev); 795 struct Scsi_Host *shost = class_to_shost(dev);
747 struct bfad_im_port_s *im_port = 796 struct bfad_im_port_s *im_port =
748 (struct bfad_im_port_s *) shost->hostdata[0]; 797 (struct bfad_im_port_s *) shost->hostdata[0];
749 struct bfad_s *bfad = im_port->bfad; 798 struct bfad_s *bfad = im_port->bfad;
750 char fw_ver[BFA_VERSION_LEN]; 799 char fw_ver[BFA_VERSION_LEN];
751 800
752 bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver); 801 bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
@@ -760,10 +809,10 @@ bfad_im_num_of_ports_show(struct device *dev, struct device_attribute *attr,
760 struct Scsi_Host *shost = class_to_shost(dev); 809 struct Scsi_Host *shost = class_to_shost(dev);
761 struct bfad_im_port_s *im_port = 810 struct bfad_im_port_s *im_port =
762 (struct bfad_im_port_s *) shost->hostdata[0]; 811 (struct bfad_im_port_s *) shost->hostdata[0];
763 struct bfad_s *bfad = im_port->bfad; 812 struct bfad_s *bfad = im_port->bfad;
764 813
765 return snprintf(buf, PAGE_SIZE, "%d\n", 814 return snprintf(buf, PAGE_SIZE, "%d\n",
766 bfa_get_nports(&bfad->bfa)); 815 bfa_get_nports(&bfad->bfa));
767} 816}
768 817
769static ssize_t 818static ssize_t
@@ -788,10 +837,10 @@ bfad_im_num_of_discovered_ports_show(struct device *dev,
788 837
789 rports = kzalloc(sizeof(wwn_t) * nrports , GFP_ATOMIC); 838 rports = kzalloc(sizeof(wwn_t) * nrports , GFP_ATOMIC);
790 if (rports == NULL) 839 if (rports == NULL)
791 return -ENOMEM; 840 return snprintf(buf, PAGE_SIZE, "Failed\n");
792 841
793 spin_lock_irqsave(&bfad->bfad_lock, flags); 842 spin_lock_irqsave(&bfad->bfad_lock, flags);
794 bfa_fcs_port_get_rports(port->fcs_port, rports, &nrports); 843 bfa_fcs_lport_get_rports(port->fcs_port, rports, &nrports);
795 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 844 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
796 kfree(rports); 845 kfree(rports);
797 846
@@ -837,19 +886,19 @@ struct device_attribute *bfad_im_host_attrs[] = {
837}; 886};
838 887
839struct device_attribute *bfad_im_vport_attrs[] = { 888struct device_attribute *bfad_im_vport_attrs[] = {
840 &dev_attr_serial_number, 889 &dev_attr_serial_number,
841 &dev_attr_model, 890 &dev_attr_model,
842 &dev_attr_model_description, 891 &dev_attr_model_description,
843 &dev_attr_node_name, 892 &dev_attr_node_name,
844 &dev_attr_symbolic_name, 893 &dev_attr_symbolic_name,
845 &dev_attr_hardware_version, 894 &dev_attr_hardware_version,
846 &dev_attr_driver_version, 895 &dev_attr_driver_version,
847 &dev_attr_option_rom_version, 896 &dev_attr_option_rom_version,
848 &dev_attr_firmware_version, 897 &dev_attr_firmware_version,
849 &dev_attr_number_of_ports, 898 &dev_attr_number_of_ports,
850 &dev_attr_driver_name, 899 &dev_attr_driver_name,
851 &dev_attr_number_of_discovered_ports, 900 &dev_attr_number_of_discovered_ports,
852 NULL, 901 NULL,
853}; 902};
854 903
855 904
diff --git a/drivers/scsi/bfa/bfad_attr.h b/drivers/scsi/bfa/bfad_attr.h
deleted file mode 100644
index bf0102076508..000000000000
--- a/drivers/scsi/bfa/bfad_attr.h
+++ /dev/null
@@ -1,56 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFAD_ATTR_H__
19#define __BFAD_ATTR_H__
20
21/**
22 * FC_transport_template FC transport template
23 */
24
25struct Scsi_Host*
26bfad_os_dev_to_shost(struct scsi_target *starget);
27
28/**
29 * FC transport template entry, get SCSI target port ID.
30 */
31void
32bfad_im_get_starget_port_id(struct scsi_target *starget);
33
34/**
35 * FC transport template entry, get SCSI target nwwn.
36 */
37void
38bfad_im_get_starget_node_name(struct scsi_target *starget);
39
40/**
41 * FC transport template entry, get SCSI target pwwn.
42 */
43void
44bfad_im_get_starget_port_name(struct scsi_target *starget);
45
46/**
47 * FC transport template entry, get SCSI host port ID.
48 */
49void
50bfad_im_get_host_port_id(struct Scsi_Host *shost);
51
52struct Scsi_Host*
53bfad_os_starget_to_shost(struct scsi_target *starget);
54
55
56#endif /* __BFAD_ATTR_H__ */
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 4b82f12aad62..69ed1c4a903e 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -17,8 +17,8 @@
17 17
18#include <linux/debugfs.h> 18#include <linux/debugfs.h>
19 19
20#include <bfad_drv.h> 20#include "bfad_drv.h"
21#include <bfad_im.h> 21#include "bfad_im.h"
22 22
23/* 23/*
24 * BFA debufs interface 24 * BFA debufs interface
@@ -28,7 +28,7 @@
28 * mount -t debugfs none /sys/kernel/debug 28 * mount -t debugfs none /sys/kernel/debug
29 * 29 *
30 * BFA Hierarchy: 30 * BFA Hierarchy:
31 * - bfa/host# 31 * - bfa/host#
32 * where the host number corresponds to the one under /sys/class/scsi_host/host# 32 * where the host number corresponds to the one under /sys/class/scsi_host/host#
33 * 33 *
34 * Debugging service available per host: 34 * Debugging service available per host:
@@ -217,7 +217,7 @@ bfad_debugfs_read(struct file *file, char __user *buf,
217#define BFA_REG_ADDRSZ(__bfa) \ 217#define BFA_REG_ADDRSZ(__bfa) \
218 ((bfa_ioc_devid(&(__bfa)->ioc) == BFA_PCI_DEVICE_ID_CT) ? \ 218 ((bfa_ioc_devid(&(__bfa)->ioc) == BFA_PCI_DEVICE_ID_CT) ? \
219 BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ) 219 BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ)
220#define BFA_REG_ADDRMSK(__bfa) ((uint32_t)(BFA_REG_ADDRSZ(__bfa) - 1)) 220#define BFA_REG_ADDRMSK(__bfa) ((u32)(BFA_REG_ADDRSZ(__bfa) - 1))
221 221
222static bfa_status_t 222static bfa_status_t
223bfad_reg_offset_check(struct bfa_s *bfa, u32 offset, u32 len) 223bfad_reg_offset_check(struct bfa_s *bfa, u32 offset, u32 len)
@@ -359,7 +359,7 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
359 return -EINVAL; 359 return -EINVAL;
360 } 360 }
361 361
362 reg_addr = (uint32_t *) ((uint8_t *) bfa_ioc_bar0(ioc) + addr); 362 reg_addr = (u32 *) ((u8 *) bfa_ioc_bar0(ioc) + addr);
363 spin_lock_irqsave(&bfad->bfad_lock, flags); 363 spin_lock_irqsave(&bfad->bfad_lock, flags);
364 bfa_reg_write(reg_addr, val); 364 bfa_reg_write(reg_addr, val);
365 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 365 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 465b8b86ec9c..98420bbb4f3f 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -28,30 +28,27 @@
28 28
29#include "bfa_os_inc.h" 29#include "bfa_os_inc.h"
30 30
31#include <bfa.h> 31#include "bfa_modules.h"
32#include <bfa_svc.h> 32#include "bfa_fcs.h"
33#include <fcs/bfa_fcs.h> 33#include "bfa_defs_fcs.h"
34#include <defs/bfa_defs_pci.h> 34
35#include <defs/bfa_defs_port.h> 35#include "bfa_plog.h"
36#include <defs/bfa_defs_rport.h> 36#include "bfa_cs.h"
37#include <fcs/bfa_fcs_rport.h> 37
38#include <defs/bfa_defs_vport.h> 38#define BFAD_DRIVER_NAME "bfa"
39#include <fcs/bfa_fcs_vport.h>
40
41#include <cs/bfa_plog.h>
42#include "aen/bfa_aen.h"
43#include <log/bfa_log_linux.h>
44
45#define BFAD_DRIVER_NAME "bfa"
46#ifdef BFA_DRIVER_VERSION 39#ifdef BFA_DRIVER_VERSION
47#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION 40#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
48#else 41#else
49#define BFAD_DRIVER_VERSION "2.2.2.1" 42#define BFAD_DRIVER_VERSION "2.3.2.0"
50#endif 43#endif
51 44
52 45#define BFAD_PROTO_NAME FCPI_NAME
53#define BFAD_IRQ_FLAGS IRQF_SHARED 46#define BFAD_IRQ_FLAGS IRQF_SHARED
54 47
48#ifndef FC_PORTSPEED_8GBIT
49#define FC_PORTSPEED_8GBIT 0x10
50#endif
51
55/* 52/*
56 * BFAD flags 53 * BFAD flags
57 */ 54 */
@@ -62,9 +59,9 @@
62#define BFAD_HAL_START_DONE 0x00000010 59#define BFAD_HAL_START_DONE 0x00000010
63#define BFAD_PORT_ONLINE 0x00000020 60#define BFAD_PORT_ONLINE 0x00000020
64#define BFAD_RPORT_ONLINE 0x00000040 61#define BFAD_RPORT_ONLINE 0x00000040
65#define BFAD_FCS_INIT_DONE 0x00000080 62#define BFAD_FCS_INIT_DONE 0x00000080
66#define BFAD_HAL_INIT_FAIL 0x00000100 63#define BFAD_HAL_INIT_FAIL 0x00000100
67#define BFAD_FC4_PROBE_DONE 0x00000200 64#define BFAD_FC4_PROBE_DONE 0x00000200
68#define BFAD_PORT_DELETE 0x00000001 65#define BFAD_PORT_DELETE 0x00000001
69 66
70/* 67/*
@@ -77,8 +74,8 @@
77/* 74/*
78 * BFAD configuration parameter default values 75 * BFAD configuration parameter default values
79 */ 76 */
80#define BFAD_LUN_QUEUE_DEPTH 32 77#define BFAD_LUN_QUEUE_DEPTH 32
81#define BFAD_IO_MAX_SGE SG_ALL 78#define BFAD_IO_MAX_SGE SG_ALL
82 79
83#define bfad_isr_t irq_handler_t 80#define bfad_isr_t irq_handler_t
84 81
@@ -87,6 +84,16 @@
87struct bfad_msix_s { 84struct bfad_msix_s {
88 struct bfad_s *bfad; 85 struct bfad_s *bfad;
89 struct msix_entry msix; 86 struct msix_entry msix;
87 char name[32];
88};
89
90/*
91 * Only append to the enums defined here to avoid any versioning
92 * needed between trace utility and driver version
93 */
94enum {
95 BFA_TRC_LDRV_BFAD = 1,
96 BFA_TRC_LDRV_IM = 2,
90}; 97};
91 98
92enum bfad_port_pvb_type { 99enum bfad_port_pvb_type {
@@ -101,17 +108,13 @@ enum bfad_port_pvb_type {
101 */ 108 */
102struct bfad_port_s { 109struct bfad_port_s {
103 struct list_head list_entry; 110 struct list_head list_entry;
104 struct bfad_s *bfad; 111 struct bfad_s *bfad;
105 struct bfa_fcs_port_s *fcs_port; 112 struct bfa_fcs_lport_s *fcs_port;
106 u32 roles; 113 u32 roles;
107 s32 flags; 114 s32 flags;
108 u32 supported_fc4s; 115 u32 supported_fc4s;
109 u8 ipfc_flags;
110 enum bfad_port_pvb_type pvb_type; 116 enum bfad_port_pvb_type pvb_type;
111 struct bfad_im_port_s *im_port; /* IM specific data */ 117 struct bfad_im_port_s *im_port; /* IM specific data */
112 struct bfad_tm_port_s *tm_port; /* TM specific data */
113 struct bfad_ipfc_port_s *ipfc_port; /* IPFC specific data */
114
115 /* port debugfs specific data */ 118 /* port debugfs specific data */
116 struct dentry *port_debugfs_root; 119 struct dentry *port_debugfs_root;
117}; 120};
@@ -124,7 +127,6 @@ struct bfad_vport_s {
124 struct bfa_fcs_vport_s fcs_vport; 127 struct bfa_fcs_vport_s fcs_vport;
125 struct completion *comp_del; 128 struct completion *comp_del;
126 struct list_head list_entry; 129 struct list_head list_entry;
127 struct bfa_port_cfg_s port_cfg;
128}; 130};
129 131
130/* 132/*
@@ -137,20 +139,35 @@ struct bfad_vf_s {
137}; 139};
138 140
139struct bfad_cfg_param_s { 141struct bfad_cfg_param_s {
140 u32 rport_del_timeout; 142 u32 rport_del_timeout;
141 u32 ioc_queue_depth; 143 u32 ioc_queue_depth;
142 u32 lun_queue_depth; 144 u32 lun_queue_depth;
143 u32 io_max_sge; 145 u32 io_max_sge;
144 u32 binding_method; 146 u32 binding_method;
147};
148
149union bfad_tmp_buf {
150 /* From struct bfa_adapter_attr_s */
151 char manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
152 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
153 char model[BFA_ADAPTER_MODEL_NAME_LEN];
154 char fw_ver[BFA_VERSION_LEN];
155 char optrom_ver[BFA_VERSION_LEN];
156
157 /* From struct bfa_ioc_pci_attr_s */
158 u8 chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */
159
160 wwn_t wwn[BFA_FCS_MAX_LPORTS];
145}; 161};
146 162
147/* 163/*
148 * BFAD (PCI function) data structure 164 * BFAD (PCI function) data structure
149 */ 165 */
150struct bfad_s { 166struct bfad_s {
167 bfa_sm_t sm; /* state machine */
151 struct list_head list_entry; 168 struct list_head list_entry;
152 struct bfa_s bfa; 169 struct bfa_s bfa;
153 struct bfa_fcs_s bfa_fcs; 170 struct bfa_fcs_s bfa_fcs;
154 struct pci_dev *pcidev; 171 struct pci_dev *pcidev;
155 const char *pci_name; 172 const char *pci_name;
156 struct bfa_pcidev_s hal_pcidev; 173 struct bfa_pcidev_s hal_pcidev;
@@ -163,41 +180,41 @@ struct bfad_s {
163 struct bfad_port_s pport; /* physical port of the BFAD */ 180 struct bfad_port_s pport; /* physical port of the BFAD */
164 struct bfa_meminfo_s meminfo; 181 struct bfa_meminfo_s meminfo;
165 struct bfa_iocfc_cfg_s ioc_cfg; 182 struct bfa_iocfc_cfg_s ioc_cfg;
166 u32 inst_no; /* BFAD instance number */ 183 u32 inst_no; /* BFAD instance number */
167 u32 bfad_flags; 184 u32 bfad_flags;
168 spinlock_t bfad_lock; 185 spinlock_t bfad_lock;
169 struct task_struct *bfad_tsk; 186 struct task_struct *bfad_tsk;
170 struct bfad_cfg_param_s cfg_data; 187 struct bfad_cfg_param_s cfg_data;
171 struct bfad_msix_s msix_tab[MAX_MSIX_ENTRY]; 188 struct bfad_msix_s msix_tab[MAX_MSIX_ENTRY];
172 int nvec; 189 int nvec;
173 char adapter_name[BFA_ADAPTER_SYM_NAME_LEN]; 190 char adapter_name[BFA_ADAPTER_SYM_NAME_LEN];
174 char port_name[BFA_ADAPTER_SYM_NAME_LEN]; 191 char port_name[BFA_ADAPTER_SYM_NAME_LEN];
175 struct timer_list hal_tmo; 192 struct timer_list hal_tmo;
176 unsigned long hs_start; 193 unsigned long hs_start;
177 struct bfad_im_s *im; /* IM specific data */ 194 struct bfad_im_s *im; /* IM specific data */
178 struct bfad_tm_s *tm; /* TM specific data */
179 struct bfad_ipfc_s *ipfc; /* IPFC specific data */
180 struct bfa_log_mod_s log_data;
181 struct bfa_trc_mod_s *trcmod; 195 struct bfa_trc_mod_s *trcmod;
182 struct bfa_log_mod_s *logmod;
183 struct bfa_aen_s *aen;
184 struct bfa_aen_s aen_buf;
185 void *file_map[BFA_AEN_MAX_APP];
186 struct bfa_plog_s plog_buf; 196 struct bfa_plog_s plog_buf;
187 int ref_count; 197 int ref_count;
188 bfa_boolean_t ipfc_enabled; 198 union bfad_tmp_buf tmp_buf;
189 struct fc_host_statistics link_stats; 199 struct fc_host_statistics link_stats;
190 struct list_head pbc_pcfg_list; 200 struct list_head pbc_vport_list;
191 atomic_t wq_reqcnt;
192 /* debugfs specific data */ 201 /* debugfs specific data */
193 char *regdata; 202 char *regdata;
194 u32 reglen; 203 u32 reglen;
195 struct dentry *bfad_dentry_files[5]; 204 struct dentry *bfad_dentry_files[5];
196}; 205};
197 206
198struct bfad_pcfg_s { 207/* BFAD state machine events */
199 struct list_head list_entry; 208enum bfad_sm_event {
200 struct bfa_port_cfg_s port_cfg; 209 BFAD_E_CREATE = 1,
210 BFAD_E_KTHREAD_CREATE_FAILED = 2,
211 BFAD_E_INIT = 3,
212 BFAD_E_INIT_SUCCESS = 4,
213 BFAD_E_INIT_FAILED = 5,
214 BFAD_E_INTR_INIT_FAILED = 6,
215 BFAD_E_FCS_EXIT_COMP = 7,
216 BFAD_E_EXIT_COMP = 8,
217 BFAD_E_STOP = 9
201}; 218};
202 219
203/* 220/*
@@ -208,30 +225,30 @@ struct bfad_rport_s {
208}; 225};
209 226
210struct bfad_buf_info { 227struct bfad_buf_info {
211 void *virt; 228 void *virt;
212 dma_addr_t phys; 229 dma_addr_t phys;
213 u32 size; 230 u32 size;
214}; 231};
215 232
216struct bfad_fcxp { 233struct bfad_fcxp {
217 struct bfad_port_s *port; 234 struct bfad_port_s *port;
218 struct bfa_rport_s *bfa_rport; 235 struct bfa_rport_s *bfa_rport;
219 bfa_status_t req_status; 236 bfa_status_t req_status;
220 u16 tag; 237 u16 tag;
221 u16 rsp_len; 238 u16 rsp_len;
222 u16 rsp_maxlen; 239 u16 rsp_maxlen;
223 u8 use_ireqbuf; 240 u8 use_ireqbuf;
224 u8 use_irspbuf; 241 u8 use_irspbuf;
225 u32 num_req_sgles; 242 u32 num_req_sgles;
226 u32 num_rsp_sgles; 243 u32 num_rsp_sgles;
227 struct fchs_s fchs; 244 struct fchs_s fchs;
228 void *reqbuf_info; 245 void *reqbuf_info;
229 void *rspbuf_info; 246 void *rspbuf_info;
230 struct bfa_sge_s *req_sge; 247 struct bfa_sge_s *req_sge;
231 struct bfa_sge_s *rsp_sge; 248 struct bfa_sge_s *rsp_sge;
232 fcxp_send_cb_t send_cbfn; 249 fcxp_send_cb_t send_cbfn;
233 void *send_cbarg; 250 void *send_cbarg;
234 void *bfa_fcxp; 251 void *bfa_fcxp;
235 struct completion comp; 252 struct completion comp;
236}; 253};
237 254
@@ -244,34 +261,48 @@ struct bfad_hal_comp {
244 * Macro to obtain the immediate lower power 261 * Macro to obtain the immediate lower power
245 * of two for the integer. 262 * of two for the integer.
246 */ 263 */
247#define nextLowerInt(x) \ 264#define nextLowerInt(x) \
248do { \ 265do { \
249 int j; \ 266 int i; \
250 (*x)--; \ 267 (*x)--; \
251 for (j = 1; j < (sizeof(int) * 8); j <<= 1) \ 268 for (i = 1; i < (sizeof(int)*8); i <<= 1) \
252 (*x) = (*x) | (*x) >> j; \ 269 (*x) = (*x) | (*x) >> i; \
253 (*x)++; \ 270 (*x)++; \
254 (*x) = (*x) >> 1; \ 271 (*x) = (*x) >> 1; \
255} while (0) 272} while (0)
256 273
257 274
258bfa_status_t bfad_vport_create(struct bfad_s *bfad, u16 vf_id, 275#define list_remove_head(list, entry, type, member) \
259 struct bfa_port_cfg_s *port_cfg, struct device *dev); 276do { \
260bfa_status_t bfad_vf_create(struct bfad_s *bfad, u16 vf_id, 277 entry = NULL; \
261 struct bfa_port_cfg_s *port_cfg); 278 if (!list_empty(list)) { \
262bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role); 279 entry = list_entry((list)->next, type, member); \
263bfa_status_t bfad_drv_init(struct bfad_s *bfad); 280 list_del_init(&entry->member); \
264bfa_status_t bfad_start_ops(struct bfad_s *bfad); 281 } \
265void bfad_drv_start(struct bfad_s *bfad); 282} while (0)
266void bfad_uncfg_pport(struct bfad_s *bfad);
267void bfad_drv_stop(struct bfad_s *bfad);
268void bfad_remove_intr(struct bfad_s *bfad);
269void bfad_hal_mem_release(struct bfad_s *bfad);
270void bfad_hcb_comp(void *arg, bfa_status_t status);
271
272int bfad_setup_intr(struct bfad_s *bfad);
273void bfad_remove_intr(struct bfad_s *bfad);
274 283
284#define list_get_first(list, type, member) \
285((list_empty(list)) ? NULL : \
286 list_entry((list)->next, type, member))
287
288bfa_status_t bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
289 struct bfa_lport_cfg_s *port_cfg,
290 struct device *dev);
291bfa_status_t bfad_vf_create(struct bfad_s *bfad, u16 vf_id,
292 struct bfa_lport_cfg_s *port_cfg);
293bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role);
294bfa_status_t bfad_drv_init(struct bfad_s *bfad);
295bfa_status_t bfad_start_ops(struct bfad_s *bfad);
296void bfad_drv_start(struct bfad_s *bfad);
297void bfad_uncfg_pport(struct bfad_s *bfad);
298void bfad_stop(struct bfad_s *bfad);
299void bfad_fcs_stop(struct bfad_s *bfad);
300void bfad_remove_intr(struct bfad_s *bfad);
301void bfad_hal_mem_release(struct bfad_s *bfad);
302void bfad_hcb_comp(void *arg, bfa_status_t status);
303
304int bfad_setup_intr(struct bfad_s *bfad);
305void bfad_remove_intr(struct bfad_s *bfad);
275void bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg); 306void bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg);
276bfa_status_t bfad_hal_mem_alloc(struct bfad_s *bfad); 307bfa_status_t bfad_hal_mem_alloc(struct bfad_s *bfad);
277void bfad_bfa_tmo(unsigned long data); 308void bfad_bfa_tmo(unsigned long data);
@@ -280,9 +311,6 @@ int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad);
280void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad); 311void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad);
281void bfad_fcs_port_cfg(struct bfad_s *bfad); 312void bfad_fcs_port_cfg(struct bfad_s *bfad);
282void bfad_drv_uninit(struct bfad_s *bfad); 313void bfad_drv_uninit(struct bfad_s *bfad);
283void bfad_drv_log_level_set(struct bfad_s *bfad);
284bfa_status_t bfad_fc4_module_init(void);
285void bfad_fc4_module_exit(void);
286int bfad_worker(void *ptr); 314int bfad_worker(void *ptr);
287void bfad_debugfs_init(struct bfad_port_s *port); 315void bfad_debugfs_init(struct bfad_port_s *port);
288void bfad_debugfs_exit(struct bfad_port_s *port); 316void bfad_debugfs_exit(struct bfad_port_s *port);
@@ -294,10 +322,30 @@ int bfad_os_get_linkup_delay(struct bfad_s *bfad);
294int bfad_install_msix_handler(struct bfad_s *bfad); 322int bfad_install_msix_handler(struct bfad_s *bfad);
295 323
296extern struct idr bfad_im_port_index; 324extern struct idr bfad_im_port_index;
325extern struct pci_device_id bfad_id_table[];
297extern struct list_head bfad_list; 326extern struct list_head bfad_list;
298extern int bfa_lun_queue_depth; 327extern char *os_name;
299extern int bfad_supported_fc4s; 328extern char *os_patch;
300extern int bfa_linkup_delay; 329extern char *host_name;
330extern int num_rports;
331extern int num_ios;
332extern int num_tms;
333extern int num_fcxps;
334extern int num_ufbufs;
335extern int reqq_size;
336extern int rspq_size;
337extern int num_sgpgs;
338extern int rport_del_timeout;
339extern int bfa_lun_queue_depth;
340extern int bfa_io_max_sge;
341extern int log_level;
342extern int ioc_auto_recover;
343extern int bfa_linkup_delay;
344extern int msix_disable_cb;
345extern int msix_disable_ct;
346extern int fdmi_enable;
347extern int supported_fc4s;
348extern int pcie_max_read_reqsz;
301extern int bfa_debugfs_enable; 349extern int bfa_debugfs_enable;
302extern struct mutex bfad_mutex; 350extern struct mutex bfad_mutex;
303 351
diff --git a/drivers/scsi/bfa/bfad_fwimg.c b/drivers/scsi/bfa/bfad_fwimg.c
deleted file mode 100644
index 1baca1a12085..000000000000
--- a/drivers/scsi/bfa/bfad_fwimg.c
+++ /dev/null
@@ -1,131 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfad_fwimg.c Linux driver PCI interface module.
20 */
21#include <bfa_os_inc.h>
22#include <bfad_drv.h>
23#include <bfad_im_compat.h>
24#include <defs/bfa_defs_version.h>
25#include <linux/errno.h>
26#include <linux/sched.h>
27#include <linux/init.h>
28#include <linux/fs.h>
29#include <asm/uaccess.h>
30#include <asm/fcntl.h>
31#include <linux/pci.h>
32#include <linux/firmware.h>
33#include <bfa_fwimg_priv.h>
34#include <bfa.h>
35
36u32 bfi_image_ct_fc_size;
37u32 bfi_image_ct_cna_size;
38u32 bfi_image_cb_fc_size;
39u32 *bfi_image_ct_fc;
40u32 *bfi_image_ct_cna;
41u32 *bfi_image_cb_fc;
42
43
44#define BFAD_FW_FILE_CT_FC "ctfw_fc.bin"
45#define BFAD_FW_FILE_CT_CNA "ctfw_cna.bin"
46#define BFAD_FW_FILE_CB_FC "cbfw_fc.bin"
47MODULE_FIRMWARE(BFAD_FW_FILE_CT_FC);
48MODULE_FIRMWARE(BFAD_FW_FILE_CT_CNA);
49MODULE_FIRMWARE(BFAD_FW_FILE_CB_FC);
50
51u32 *
52bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
53 u32 *bfi_image_size, char *fw_name)
54{
55 const struct firmware *fw;
56
57 if (request_firmware(&fw, fw_name, &pdev->dev)) {
58 printk(KERN_ALERT "Can't locate firmware %s\n", fw_name);
59 goto error;
60 }
61
62 *bfi_image = vmalloc(fw->size);
63 if (NULL == *bfi_image) {
64 printk(KERN_ALERT "Fail to allocate buffer for fw image "
65 "size=%x!\n", (u32) fw->size);
66 goto error;
67 }
68
69 memcpy(*bfi_image, fw->data, fw->size);
70 *bfi_image_size = fw->size/sizeof(u32);
71
72 return *bfi_image;
73
74error:
75 return NULL;
76}
77
78u32 *
79bfad_get_firmware_buf(struct pci_dev *pdev)
80{
81 if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) {
82 if (bfi_image_ct_fc_size == 0)
83 bfad_read_firmware(pdev, &bfi_image_ct_fc,
84 &bfi_image_ct_fc_size, BFAD_FW_FILE_CT_FC);
85 return bfi_image_ct_fc;
86 } else if (pdev->device == BFA_PCI_DEVICE_ID_CT) {
87 if (bfi_image_ct_cna_size == 0)
88 bfad_read_firmware(pdev, &bfi_image_ct_cna,
89 &bfi_image_ct_cna_size, BFAD_FW_FILE_CT_CNA);
90 return bfi_image_ct_cna;
91 } else {
92 if (bfi_image_cb_fc_size == 0)
93 bfad_read_firmware(pdev, &bfi_image_cb_fc,
94 &bfi_image_cb_fc_size, BFAD_FW_FILE_CB_FC);
95 return bfi_image_cb_fc;
96 }
97}
98
99u32 *
100bfi_image_ct_fc_get_chunk(u32 off)
101{ return (u32 *)(bfi_image_ct_fc + off); }
102
103u32 *
104bfi_image_ct_cna_get_chunk(u32 off)
105{ return (u32 *)(bfi_image_ct_cna + off); }
106
107u32 *
108bfi_image_cb_fc_get_chunk(u32 off)
109{ return (u32 *)(bfi_image_cb_fc + off); }
110
111uint32_t *
112bfi_image_get_chunk(int type, uint32_t off)
113{
114 switch (type) {
115 case BFI_IMAGE_CT_FC: return bfi_image_ct_fc_get_chunk(off); break;
116 case BFI_IMAGE_CT_CNA: return bfi_image_ct_cna_get_chunk(off); break;
117 case BFI_IMAGE_CB_FC: return bfi_image_cb_fc_get_chunk(off); break;
118 default: return 0; break;
119 }
120}
121
122uint32_t
123bfi_image_get_size(int type)
124{
125 switch (type) {
126 case BFI_IMAGE_CT_FC: return bfi_image_ct_fc_size; break;
127 case BFI_IMAGE_CT_CNA: return bfi_image_ct_cna_size; break;
128 case BFI_IMAGE_CB_FC: return bfi_image_cb_fc_size; break;
129 default: return 0; break;
130 }
131}
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 6ef87f6fcdbb..d950ee44016e 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -19,12 +19,10 @@
19 * bfad_im.c Linux driver IM module. 19 * bfad_im.c Linux driver IM module.
20 */ 20 */
21 21
22#include <linux/slab.h>
23#include "bfad_drv.h" 22#include "bfad_drv.h"
24#include "bfad_im.h" 23#include "bfad_im.h"
25#include "bfad_trcmod.h" 24#include "bfa_cb_ioim.h"
26#include "bfa_cb_ioim_macros.h" 25#include "bfa_fcs.h"
27#include <fcb/bfa_fcb_fcpim.h>
28 26
29BFA_TRC_FILE(LDRV, IM); 27BFA_TRC_FILE(LDRV, IM);
30 28
@@ -33,8 +31,10 @@ struct scsi_transport_template *bfad_im_scsi_transport_template;
33struct scsi_transport_template *bfad_im_scsi_vport_transport_template; 31struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
34static void bfad_im_itnim_work_handler(struct work_struct *work); 32static void bfad_im_itnim_work_handler(struct work_struct *work);
35static int bfad_im_queuecommand(struct scsi_cmnd *cmnd, 33static int bfad_im_queuecommand(struct scsi_cmnd *cmnd,
36 void (*done)(struct scsi_cmnd *)); 34 void (*done)(struct scsi_cmnd *));
37static int bfad_im_slave_alloc(struct scsi_device *sdev); 35static int bfad_im_slave_alloc(struct scsi_device *sdev);
36static void bfad_im_fc_rport_add(struct bfad_im_port_s *im_port,
37 struct bfad_itnim_s *itnim);
38 38
39void 39void
40bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio, 40bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
@@ -58,6 +58,7 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
58 sns_len = SCSI_SENSE_BUFFERSIZE; 58 sns_len = SCSI_SENSE_BUFFERSIZE;
59 memcpy(cmnd->sense_buffer, sns_info, sns_len); 59 memcpy(cmnd->sense_buffer, sns_info, sns_len);
60 } 60 }
61
61 if (residue > 0) { 62 if (residue > 0) {
62 bfa_trc(bfad, residue); 63 bfa_trc(bfad, residue);
63 scsi_set_resid(cmnd, residue); 64 scsi_set_resid(cmnd, residue);
@@ -76,7 +77,8 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
76 case BFI_IOIM_STS_TIMEDOUT: 77 case BFI_IOIM_STS_TIMEDOUT:
77 case BFI_IOIM_STS_PATHTOV: 78 case BFI_IOIM_STS_PATHTOV:
78 default: 79 default:
79 cmnd->result = ScsiResult(DID_ERROR, 0); 80 host_status = DID_ERROR;
81 cmnd->result = ScsiResult(host_status, 0);
80 } 82 }
81 83
82 /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */ 84 /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
@@ -162,11 +164,6 @@ bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
162 wake_up(wq); 164 wake_up(wq);
163} 165}
164 166
165void
166bfa_cb_ioim_resfree(void *drv)
167{
168}
169
170/** 167/**
171 * Scsi_Host_template SCSI host template 168 * Scsi_Host_template SCSI host template
172 */ 169 */
@@ -179,15 +176,23 @@ bfad_im_info(struct Scsi_Host *shost)
179 static char bfa_buf[256]; 176 static char bfa_buf[256];
180 struct bfad_im_port_s *im_port = 177 struct bfad_im_port_s *im_port =
181 (struct bfad_im_port_s *) shost->hostdata[0]; 178 (struct bfad_im_port_s *) shost->hostdata[0];
182 struct bfad_s *bfad = im_port->bfad; 179 struct bfad_s *bfad = im_port->bfad;
180 struct bfa_s *bfa = &bfad->bfa;
181 struct bfa_ioc_s *ioc = &bfa->ioc;
183 char model[BFA_ADAPTER_MODEL_NAME_LEN]; 182 char model[BFA_ADAPTER_MODEL_NAME_LEN];
184 183
185 bfa_get_adapter_model(&bfad->bfa, model); 184 bfa_get_adapter_model(bfa, model);
186 185
187 memset(bfa_buf, 0, sizeof(bfa_buf)); 186 memset(bfa_buf, 0, sizeof(bfa_buf));
188 snprintf(bfa_buf, sizeof(bfa_buf), 187 if (ioc->ctdev)
189 "Brocade FC/FCOE Adapter, " "model: %s hwpath: %s driver: %s", 188 snprintf(bfa_buf, sizeof(bfa_buf),
189 "Brocade FCOE Adapter, " "model: %s hwpath: %s driver: %s",
190 model, bfad->pci_name, BFAD_DRIVER_VERSION);
191 else
192 snprintf(bfa_buf, sizeof(bfa_buf),
193 "Brocade FC Adapter, " "model: %s hwpath: %s driver: %s",
190 model, bfad->pci_name, BFAD_DRIVER_VERSION); 194 model, bfad->pci_name, BFAD_DRIVER_VERSION);
195
191 return bfa_buf; 196 return bfa_buf;
192} 197}
193 198
@@ -221,9 +226,9 @@ bfad_im_abort_handler(struct scsi_cmnd *cmnd)
221 } 226 }
222 227
223 bfa_trc(bfad, hal_io->iotag); 228 bfa_trc(bfad, hal_io->iotag);
224 bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_ABORT, 229 BFA_LOG(KERN_INFO, bfad, log_level, "scsi%d: abort cmnd %p iotag %x\n",
225 im_port->shost->host_no, cmnd, hal_io->iotag); 230 im_port->shost->host_no, cmnd, hal_io->iotag);
226 bfa_ioim_abort(hal_io); 231 (void) bfa_ioim_abort(hal_io);
227 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 232 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
228 233
229 /* Need to wait until the command get aborted */ 234 /* Need to wait until the command get aborted */
@@ -237,7 +242,8 @@ bfad_im_abort_handler(struct scsi_cmnd *cmnd)
237 242
238 cmnd->scsi_done(cmnd); 243 cmnd->scsi_done(cmnd);
239 bfa_trc(bfad, hal_io->iotag); 244 bfa_trc(bfad, hal_io->iotag);
240 bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_ABORT_COMP, 245 BFA_LOG(KERN_INFO, bfad, log_level,
246 "scsi%d: complete abort 0x%p iotag 0x%x\n",
241 im_port->shost->host_no, cmnd, hal_io->iotag); 247 im_port->shost->host_no, cmnd, hal_io->iotag);
242 return SUCCESS; 248 return SUCCESS;
243out: 249out:
@@ -255,8 +261,8 @@ bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd,
255 261
256 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); 262 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
257 if (!tskim) { 263 if (!tskim) {
258 BFA_DEV_PRINTF(bfad, BFA_ERR, 264 BFA_LOG(KERN_ERR, bfad, log_level,
259 "target reset, fail to allocate tskim\n"); 265 "target reset, fail to allocate tskim\n");
260 rc = BFA_STATUS_FAILED; 266 rc = BFA_STATUS_FAILED;
261 goto out; 267 goto out;
262 } 268 }
@@ -306,7 +312,7 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
306 312
307 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); 313 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
308 if (!tskim) { 314 if (!tskim) {
309 BFA_DEV_PRINTF(bfad, BFA_ERR, 315 BFA_LOG(KERN_ERR, bfad, log_level,
310 "LUN reset, fail to allocate tskim"); 316 "LUN reset, fail to allocate tskim");
311 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 317 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
312 rc = FAILED; 318 rc = FAILED;
@@ -331,8 +337,8 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
331 337
332 task_status = cmnd->SCp.Status >> 1; 338 task_status = cmnd->SCp.Status >> 1;
333 if (task_status != BFI_TSKIM_STS_OK) { 339 if (task_status != BFI_TSKIM_STS_OK) {
334 BFA_DEV_PRINTF(bfad, BFA_ERR, "LUN reset failure, status: %d\n", 340 BFA_LOG(KERN_ERR, bfad, log_level,
335 task_status); 341 "LUN reset failure, status: %d\n", task_status);
336 rc = FAILED; 342 rc = FAILED;
337 } 343 }
338 344
@@ -375,7 +381,7 @@ bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd)
375 381
376 task_status = cmnd->SCp.Status >> 1; 382 task_status = cmnd->SCp.Status >> 1;
377 if (task_status != BFI_TSKIM_STS_OK) { 383 if (task_status != BFI_TSKIM_STS_OK) {
378 BFA_DEV_PRINTF(bfad, BFA_ERR, 384 BFA_LOG(KERN_ERR, bfad, log_level,
379 "target reset failure," 385 "target reset failure,"
380 " status: %d\n", task_status); 386 " status: %d\n", task_status);
381 err_cnt++; 387 err_cnt++;
@@ -438,6 +444,7 @@ bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv)
438 wwn_t wwpn; 444 wwn_t wwpn;
439 u32 fcid; 445 u32 fcid;
440 char wwpn_str[32], fcid_str[16]; 446 char wwpn_str[32], fcid_str[16];
447 struct bfad_im_s *im = itnim_drv->im;
441 448
442 /* online to free state transtion should not happen */ 449 /* online to free state transtion should not happen */
443 bfa_assert(itnim_drv->state != ITNIM_STATE_ONLINE); 450 bfa_assert(itnim_drv->state != ITNIM_STATE_ONLINE);
@@ -454,10 +461,14 @@ bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv)
454 fcid = bfa_fcs_itnim_get_fcid(&itnim_drv->fcs_itnim); 461 fcid = bfa_fcs_itnim_get_fcid(&itnim_drv->fcs_itnim);
455 wwn2str(wwpn_str, wwpn); 462 wwn2str(wwpn_str, wwpn);
456 fcid2str(fcid_str, fcid); 463 fcid2str(fcid_str, fcid);
457 bfa_log(bfad->logmod, BFA_LOG_LINUX_ITNIM_FREE, 464 BFA_LOG(KERN_INFO, bfad, log_level,
465 "ITNIM FREE scsi%d: FCID: %s WWPN: %s\n",
458 port->im_port->shost->host_no, 466 port->im_port->shost->host_no,
459 fcid_str, wwpn_str); 467 fcid_str, wwpn_str);
460 bfad_os_itnim_process(itnim_drv); 468
469 /* ITNIM processing */
470 if (itnim_drv->queue_work)
471 queue_work(im->drv_workq, &itnim_drv->itnim_work);
461} 472}
462 473
463/** 474/**
@@ -468,13 +479,17 @@ void
468bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv) 479bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv)
469{ 480{
470 struct bfad_port_s *port; 481 struct bfad_port_s *port;
482 struct bfad_im_s *im = itnim_drv->im;
471 483
472 itnim_drv->bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim_drv->fcs_itnim); 484 itnim_drv->bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim_drv->fcs_itnim);
473 port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim); 485 port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
474 itnim_drv->state = ITNIM_STATE_ONLINE; 486 itnim_drv->state = ITNIM_STATE_ONLINE;
475 itnim_drv->queue_work = 1; 487 itnim_drv->queue_work = 1;
476 itnim_drv->im_port = port->im_port; 488 itnim_drv->im_port = port->im_port;
477 bfad_os_itnim_process(itnim_drv); 489
490 /* ITNIM processing */
491 if (itnim_drv->queue_work)
492 queue_work(im->drv_workq, &itnim_drv->itnim_work);
478} 493}
479 494
480/** 495/**
@@ -486,6 +501,7 @@ bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv)
486{ 501{
487 struct bfad_port_s *port; 502 struct bfad_port_s *port;
488 struct bfad_s *bfad; 503 struct bfad_s *bfad;
504 struct bfad_im_s *im = itnim_drv->im;
489 505
490 port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim); 506 port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
491 bfad = port->bfad; 507 bfad = port->bfad;
@@ -497,16 +513,10 @@ bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv)
497 itnim_drv->im_port = port->im_port; 513 itnim_drv->im_port = port->im_port;
498 itnim_drv->state = ITNIM_STATE_OFFLINE_PENDING; 514 itnim_drv->state = ITNIM_STATE_OFFLINE_PENDING;
499 itnim_drv->queue_work = 1; 515 itnim_drv->queue_work = 1;
500 bfad_os_itnim_process(itnim_drv);
501}
502 516
503/** 517 /* ITNIM processing */
504 * BFA FCS itnim timeout callback. 518 if (itnim_drv->queue_work)
505 * Context: Interrupt. bfad_lock is held 519 queue_work(im->drv_workq, &itnim_drv->itnim_work);
506 */
507void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim)
508{
509 itnim->state = ITNIM_STATE_TIMEOUT;
510} 520}
511 521
512/** 522/**
@@ -514,7 +524,7 @@ void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim)
514 */ 524 */
515int 525int
516bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port, 526bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
517 struct device *dev) 527 struct device *dev)
518{ 528{
519 int error = 1; 529 int error = 1;
520 530
@@ -580,7 +590,7 @@ void
580bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port) 590bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
581{ 591{
582 bfa_trc(bfad, bfad->inst_no); 592 bfa_trc(bfad, bfad->inst_no);
583 bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_HOST_FREE, 593 BFA_LOG(KERN_INFO, bfad, log_level, "Free scsi%d\n",
584 im_port->shost->host_no); 594 im_port->shost->host_no);
585 595
586 fc_remove_host(im_port->shost); 596 fc_remove_host(im_port->shost);
@@ -598,14 +608,11 @@ bfad_im_port_delete_handler(struct work_struct *work)
598{ 608{
599 struct bfad_im_port_s *im_port = 609 struct bfad_im_port_s *im_port =
600 container_of(work, struct bfad_im_port_s, port_delete_work); 610 container_of(work, struct bfad_im_port_s, port_delete_work);
601 struct bfad_s *bfad = im_port->bfad;
602 611
603 if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) { 612 if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) {
604 im_port->flags |= BFAD_PORT_DELETE; 613 im_port->flags |= BFAD_PORT_DELETE;
605 fc_vport_terminate(im_port->fc_vport); 614 fc_vport_terminate(im_port->fc_vport);
606 atomic_dec(&bfad->wq_reqcnt);
607 } 615 }
608
609} 616}
610 617
611bfa_status_t 618bfa_status_t
@@ -636,11 +643,8 @@ bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port)
636{ 643{
637 struct bfad_im_port_s *im_port = port->im_port; 644 struct bfad_im_port_s *im_port = port->im_port;
638 645
639 if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) { 646 queue_work(bfad->im->drv_workq,
640 atomic_inc(&bfad->wq_reqcnt);
641 queue_work(bfad->im->drv_workq,
642 &im_port->port_delete_work); 647 &im_port->port_delete_work);
643 }
644} 648}
645 649
646void 650void
@@ -663,16 +667,6 @@ bfad_im_port_clean(struct bfad_im_port_s *im_port)
663 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 667 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
664} 668}
665 669
666void
667bfad_im_port_online(struct bfad_s *bfad, struct bfad_port_s *port)
668{
669}
670
671void
672bfad_im_port_offline(struct bfad_s *bfad, struct bfad_port_s *port)
673{
674}
675
676bfa_status_t 670bfa_status_t
677bfad_im_probe(struct bfad_s *bfad) 671bfad_im_probe(struct bfad_s *bfad)
678{ 672{
@@ -701,27 +695,12 @@ void
701bfad_im_probe_undo(struct bfad_s *bfad) 695bfad_im_probe_undo(struct bfad_s *bfad)
702{ 696{
703 if (bfad->im) { 697 if (bfad->im) {
704 while (atomic_read(&bfad->wq_reqcnt)) {
705 printk(KERN_INFO "bfa %s: waiting workq processing,"
706 " wq_reqcnt:%x\n", bfad->pci_name,
707 atomic_read(&bfad->wq_reqcnt));
708 schedule_timeout_uninterruptible(HZ);
709 }
710 bfad_os_destroy_workq(bfad->im); 698 bfad_os_destroy_workq(bfad->im);
711 kfree(bfad->im); 699 kfree(bfad->im);
712 bfad->im = NULL; 700 bfad->im = NULL;
713 } 701 }
714} 702}
715 703
716/**
717 * Call back function to handle IO redirection state change
718 */
719void
720bfa_cb_ioredirect_state_change(void *hcb_bfad, bfa_boolean_t ioredirect)
721{
722 /* Do nothing */
723}
724
725struct Scsi_Host * 704struct Scsi_Host *
726bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad) 705bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
727{ 706{
@@ -751,6 +730,7 @@ void
751bfad_os_destroy_workq(struct bfad_im_s *im) 730bfad_os_destroy_workq(struct bfad_im_s *im)
752{ 731{
753 if (im && im->drv_workq) { 732 if (im && im->drv_workq) {
733 flush_workqueue(im->drv_workq);
754 destroy_workqueue(im->drv_workq); 734 destroy_workqueue(im->drv_workq);
755 im->drv_workq = NULL; 735 im->drv_workq = NULL;
756 } 736 }
@@ -762,7 +742,7 @@ bfad_os_thread_workq(struct bfad_s *bfad)
762 struct bfad_im_s *im = bfad->im; 742 struct bfad_im_s *im = bfad->im;
763 743
764 bfa_trc(bfad, 0); 744 bfa_trc(bfad, 0);
765 snprintf(im->drv_workq_name, BFAD_KOBJ_NAME_LEN, "bfad_wq_%d", 745 snprintf(im->drv_workq_name, KOBJ_NAME_LEN, "bfad_wq_%d",
766 bfad->inst_no); 746 bfad->inst_no);
767 im->drv_workq = create_singlethread_workqueue(im->drv_workq_name); 747 im->drv_workq = create_singlethread_workqueue(im->drv_workq_name);
768 if (!im->drv_workq) 748 if (!im->drv_workq)
@@ -832,12 +812,6 @@ struct scsi_host_template bfad_im_vport_template = {
832 .max_sectors = 0xFFFF, 812 .max_sectors = 0xFFFF,
833}; 813};
834 814
835void
836bfad_im_probe_post(struct bfad_im_s *im)
837{
838 flush_workqueue(im->drv_workq);
839}
840
841bfa_status_t 815bfa_status_t
842bfad_im_module_init(void) 816bfad_im_module_init(void)
843{ 817{
@@ -861,20 +835,12 @@ bfad_im_module_exit(void)
861{ 835{
862 if (bfad_im_scsi_transport_template) 836 if (bfad_im_scsi_transport_template)
863 fc_release_transport(bfad_im_scsi_transport_template); 837 fc_release_transport(bfad_im_scsi_transport_template);
838
864 if (bfad_im_scsi_vport_transport_template) 839 if (bfad_im_scsi_vport_transport_template)
865 fc_release_transport(bfad_im_scsi_vport_transport_template); 840 fc_release_transport(bfad_im_scsi_vport_transport_template);
866} 841}
867 842
868void 843void
869bfad_os_itnim_process(struct bfad_itnim_s *itnim_drv)
870{
871 struct bfad_im_s *im = itnim_drv->im;
872
873 if (itnim_drv->queue_work)
874 queue_work(im->drv_workq, &itnim_drv->itnim_work);
875}
876
877void
878bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev) 844bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
879{ 845{
880 struct scsi_device *tmp_sdev; 846 struct scsi_device *tmp_sdev;
@@ -916,9 +882,6 @@ bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
916 } 882 }
917} 883}
918 884
919
920
921
922struct bfad_itnim_s * 885struct bfad_itnim_s *
923bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id) 886bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id)
924{ 887{
@@ -949,44 +912,64 @@ bfad_im_slave_alloc(struct scsi_device *sdev)
949 return 0; 912 return 0;
950} 913}
951 914
915static u32
916bfad_im_supported_speeds(struct bfa_s *bfa)
917{
918 struct bfa_ioc_attr_s ioc_attr;
919 u32 supported_speed = 0;
920
921 bfa_get_attr(bfa, &ioc_attr);
922 if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
923 if (ioc_attr.adapter_attr.is_mezz) {
924 supported_speed |= FC_PORTSPEED_8GBIT |
925 FC_PORTSPEED_4GBIT |
926 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
927 } else {
928 supported_speed |= FC_PORTSPEED_8GBIT |
929 FC_PORTSPEED_4GBIT |
930 FC_PORTSPEED_2GBIT;
931 }
932 } else if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) {
933 supported_speed |= FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
934 FC_PORTSPEED_1GBIT;
935 } else if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) {
936 supported_speed |= FC_PORTSPEED_10GBIT;
937 }
938 return supported_speed;
939}
940
952void 941void
953bfad_os_fc_host_init(struct bfad_im_port_s *im_port) 942bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
954{ 943{
955 struct Scsi_Host *host = im_port->shost; 944 struct Scsi_Host *host = im_port->shost;
956 struct bfad_s *bfad = im_port->bfad; 945 struct bfad_s *bfad = im_port->bfad;
957 struct bfad_port_s *port = im_port->port; 946 struct bfad_port_s *port = im_port->port;
958 struct bfa_pport_attr_s pattr; 947 struct bfa_port_attr_s pattr;
959 char model[BFA_ADAPTER_MODEL_NAME_LEN]; 948 struct bfa_lport_attr_s port_attr;
960 char fw_ver[BFA_VERSION_LEN]; 949 char symname[BFA_SYMNAME_MAXLEN];
961 950
962 fc_host_node_name(host) = 951 fc_host_node_name(host) =
963 bfa_os_htonll((bfa_fcs_port_get_nwwn(port->fcs_port))); 952 bfa_os_htonll((bfa_fcs_lport_get_nwwn(port->fcs_port)));
964 fc_host_port_name(host) = 953 fc_host_port_name(host) =
965 bfa_os_htonll((bfa_fcs_port_get_pwwn(port->fcs_port))); 954 bfa_os_htonll((bfa_fcs_lport_get_pwwn(port->fcs_port)));
966 fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa); 955 fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa);
967 956
968 fc_host_supported_classes(host) = FC_COS_CLASS3; 957 fc_host_supported_classes(host) = FC_COS_CLASS3;
969 958
970 memset(fc_host_supported_fc4s(host), 0, 959 memset(fc_host_supported_fc4s(host), 0,
971 sizeof(fc_host_supported_fc4s(host))); 960 sizeof(fc_host_supported_fc4s(host)));
972 if (bfad_supported_fc4s & (BFA_PORT_ROLE_FCP_IM | BFA_PORT_ROLE_FCP_TM)) 961 if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
973 /* For FCP type 0x08 */ 962 /* For FCP type 0x08 */
974 fc_host_supported_fc4s(host)[2] = 1; 963 fc_host_supported_fc4s(host)[2] = 1;
975 if (bfad_supported_fc4s & BFA_PORT_ROLE_FCP_IPFC)
976 /* For LLC/SNAP type 0x05 */
977 fc_host_supported_fc4s(host)[3] = 0x20;
978 /* For fibre channel services type 0x20 */ 964 /* For fibre channel services type 0x20 */
979 fc_host_supported_fc4s(host)[7] = 1; 965 fc_host_supported_fc4s(host)[7] = 1;
980 966
981 bfa_get_adapter_model(&bfad->bfa, model); 967 bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
982 bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver); 968 strncpy(symname, port_attr.port_cfg.sym_name.symname,
983 sprintf(fc_host_symbolic_name(host), "Brocade %s FV%s DV%s", 969 BFA_SYMNAME_MAXLEN);
984 model, fw_ver, BFAD_DRIVER_VERSION); 970 sprintf(fc_host_symbolic_name(host), "%s", symname);
985 971
986 fc_host_supported_speeds(host) = 0; 972 fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa);
987 fc_host_supported_speeds(host) |=
988 FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
989 FC_PORTSPEED_1GBIT;
990 973
991 bfa_fcport_get_attr(&bfad->bfa, &pattr); 974 bfa_fcport_get_attr(&bfad->bfa, &pattr);
992 fc_host_maxframe_size(host) = pattr.pport_cfg.maxfrsize; 975 fc_host_maxframe_size(host) = pattr.pport_cfg.maxfrsize;
@@ -1065,7 +1048,9 @@ bfad_im_itnim_work_handler(struct work_struct *work)
1065 fcid2str(fcid_str, fcid); 1048 fcid2str(fcid_str, fcid);
1066 list_add_tail(&itnim->list_entry, 1049 list_add_tail(&itnim->list_entry,
1067 &im_port->itnim_mapped_list); 1050 &im_port->itnim_mapped_list);
1068 bfa_log(bfad->logmod, BFA_LOG_LINUX_ITNIM_ONLINE, 1051 BFA_LOG(KERN_INFO, bfad, log_level,
1052 "ITNIM ONLINE Target: %d:0:%d "
1053 "FCID: %s WWPN: %s\n",
1069 im_port->shost->host_no, 1054 im_port->shost->host_no,
1070 itnim->scsi_tgt_id, 1055 itnim->scsi_tgt_id,
1071 fcid_str, wwpn_str); 1056 fcid_str, wwpn_str);
@@ -1096,7 +1081,9 @@ bfad_im_itnim_work_handler(struct work_struct *work)
1096 wwn2str(wwpn_str, wwpn); 1081 wwn2str(wwpn_str, wwpn);
1097 fcid2str(fcid_str, fcid); 1082 fcid2str(fcid_str, fcid);
1098 list_del(&itnim->list_entry); 1083 list_del(&itnim->list_entry);
1099 bfa_log(bfad->logmod, BFA_LOG_LINUX_ITNIM_OFFLINE, 1084 BFA_LOG(KERN_INFO, bfad, log_level,
1085 "ITNIM OFFLINE Target: %d:0:%d "
1086 "FCID: %s WWPN: %s\n",
1100 im_port->shost->host_no, 1087 im_port->shost->host_no,
1101 itnim->scsi_tgt_id, 1088 itnim->scsi_tgt_id,
1102 fcid_str, wwpn_str); 1089 fcid_str, wwpn_str);
@@ -1142,7 +1129,7 @@ bfad_im_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1142 struct bfa_ioim_s *hal_io; 1129 struct bfa_ioim_s *hal_io;
1143 unsigned long flags; 1130 unsigned long flags;
1144 int rc; 1131 int rc;
1145 s16 sg_cnt = 0; 1132 int sg_cnt = 0;
1146 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 1133 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1147 1134
1148 rc = fc_remote_port_chkready(rport); 1135 rc = fc_remote_port_chkready(rport);
@@ -1153,7 +1140,6 @@ bfad_im_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1153 } 1140 }
1154 1141
1155 sg_cnt = scsi_dma_map(cmnd); 1142 sg_cnt = scsi_dma_map(cmnd);
1156
1157 if (sg_cnt < 0) 1143 if (sg_cnt < 0)
1158 return SCSI_MLQUEUE_HOST_BUSY; 1144 return SCSI_MLQUEUE_HOST_BUSY;
1159 1145
@@ -1168,6 +1154,7 @@ bfad_im_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1168 goto out_fail_cmd; 1154 goto out_fail_cmd;
1169 } 1155 }
1170 1156
1157
1171 itnim = itnim_data->itnim; 1158 itnim = itnim_data->itnim;
1172 if (!itnim) { 1159 if (!itnim) {
1173 cmnd->result = ScsiResult(DID_IMM_RETRY, 0); 1160 cmnd->result = ScsiResult(DID_IMM_RETRY, 0);
@@ -1206,47 +1193,49 @@ bfad_os_rport_online_wait(struct bfad_s *bfad)
1206 int rport_delay = 10; 1193 int rport_delay = 10;
1207 1194
1208 for (i = 0; !(bfad->bfad_flags & BFAD_PORT_ONLINE) 1195 for (i = 0; !(bfad->bfad_flags & BFAD_PORT_ONLINE)
1209 && i < bfa_linkup_delay; i++) 1196 && i < bfa_linkup_delay; i++) {
1210 schedule_timeout_uninterruptible(HZ); 1197 set_current_state(TASK_UNINTERRUPTIBLE);
1198 schedule_timeout(HZ);
1199 }
1211 1200
1212 if (bfad->bfad_flags & BFAD_PORT_ONLINE) { 1201 if (bfad->bfad_flags & BFAD_PORT_ONLINE) {
1213 rport_delay = rport_delay < bfa_linkup_delay ? 1202 rport_delay = rport_delay < bfa_linkup_delay ?
1214 rport_delay : bfa_linkup_delay; 1203 rport_delay : bfa_linkup_delay;
1215 for (i = 0; !(bfad->bfad_flags & BFAD_RPORT_ONLINE) 1204 for (i = 0; !(bfad->bfad_flags & BFAD_RPORT_ONLINE)
1216 && i < rport_delay; i++) 1205 && i < rport_delay; i++) {
1217 schedule_timeout_uninterruptible(HZ); 1206 set_current_state(TASK_UNINTERRUPTIBLE);
1207 schedule_timeout(HZ);
1208 }
1218 1209
1219 if (rport_delay > 0 && (bfad->bfad_flags & BFAD_RPORT_ONLINE)) 1210 if (rport_delay > 0 && (bfad->bfad_flags & BFAD_RPORT_ONLINE)) {
1220 schedule_timeout_uninterruptible(rport_delay * HZ); 1211 set_current_state(TASK_UNINTERRUPTIBLE);
1212 schedule_timeout(rport_delay * HZ);
1213 }
1221 } 1214 }
1222} 1215}
1223 1216
1224int 1217int
1225bfad_os_get_linkup_delay(struct bfad_s *bfad) 1218bfad_os_get_linkup_delay(struct bfad_s *bfad)
1226{ 1219{
1227 1220 u8 nwwns = 0;
1228 u8 nwwns = 0; 1221 wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX];
1229 wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX]; 1222 int linkup_delay;
1230 int ldelay;
1231 1223
1232 /* 1224 /*
1233 * Querying for the boot target port wwns 1225 * Querying for the boot target port wwns
1234 * -- read from boot information in flash. 1226 * -- read from boot information in flash.
1235 * If nwwns > 0 => boot over SAN and set bfa_linkup_delay = 30 1227 * If nwwns > 0 => boot over SAN and set linkup_delay = 30
1236 * else => local boot machine set bfa_linkup_delay = 10 1228 * else => local boot machine set linkup_delay = 0
1237 */ 1229 */
1238 1230
1239 bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, wwns); 1231 bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, wwns);
1240 1232
1241 if (nwwns > 0) { 1233 if (nwwns > 0)
1242 /* If boot over SAN; linkup_delay = 30sec */ 1234 /* If Boot over SAN set linkup_delay = 30sec */
1243 ldelay = 30; 1235 linkup_delay = 30;
1244 } else { 1236 else
1245 /* If local boot; linkup_delay = 10sec */ 1237 /* If local boot; no linkup_delay */
1246 ldelay = 0; 1238 linkup_delay = 0;
1247 }
1248 1239
1249 return ldelay; 1240 return linkup_delay;
1250} 1241}
1251
1252
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 973cab4d09c7..b038c0e08921 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -18,20 +18,20 @@
18#ifndef __BFAD_IM_H__ 18#ifndef __BFAD_IM_H__
19#define __BFAD_IM_H__ 19#define __BFAD_IM_H__
20 20
21#include "fcs/bfa_fcs_fcpim.h" 21#include "bfa_fcs.h"
22#include "bfad_im_compat.h"
23 22
24#define FCPI_NAME " fcpim" 23#define FCPI_NAME " fcpim"
25 24
25#ifndef KOBJ_NAME_LEN
26#define KOBJ_NAME_LEN 20
27#endif
28
26bfa_status_t bfad_im_module_init(void); 29bfa_status_t bfad_im_module_init(void);
27void bfad_im_module_exit(void); 30void bfad_im_module_exit(void);
28bfa_status_t bfad_im_probe(struct bfad_s *bfad); 31bfa_status_t bfad_im_probe(struct bfad_s *bfad);
29void bfad_im_probe_undo(struct bfad_s *bfad); 32void bfad_im_probe_undo(struct bfad_s *bfad);
30void bfad_im_probe_post(struct bfad_im_s *im);
31bfa_status_t bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port); 33bfa_status_t bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port);
32void bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port); 34void bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port);
33void bfad_im_port_online(struct bfad_s *bfad, struct bfad_port_s *port);
34void bfad_im_port_offline(struct bfad_s *bfad, struct bfad_port_s *port);
35void bfad_im_port_clean(struct bfad_im_port_s *im_port); 35void bfad_im_port_clean(struct bfad_im_port_s *im_port);
36int bfad_im_scsi_host_alloc(struct bfad_s *bfad, 36int bfad_im_scsi_host_alloc(struct bfad_s *bfad,
37 struct bfad_im_port_s *im_port, struct device *dev); 37 struct bfad_im_port_s *im_port, struct device *dev);
@@ -44,14 +44,10 @@ void bfad_im_scsi_host_free(struct bfad_s *bfad,
44#define BFAD_LUN_RESET_TMO 60 44#define BFAD_LUN_RESET_TMO 60
45#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) 45#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
46#define BFA_QUEUE_FULL_RAMP_UP_TIME 120 46#define BFA_QUEUE_FULL_RAMP_UP_TIME 120
47#define BFAD_KOBJ_NAME_LEN 20
48 47
49/* 48/*
50 * itnim flags 49 * itnim flags
51 */ 50 */
52#define ITNIM_MAPPED 0x00000001
53
54#define SCSI_TASK_MGMT 0x00000001
55#define IO_DONE_BIT 0 51#define IO_DONE_BIT 0
56 52
57struct bfad_itnim_data_s { 53struct bfad_itnim_data_s {
@@ -64,7 +60,7 @@ struct bfad_im_port_s {
64 struct work_struct port_delete_work; 60 struct work_struct port_delete_work;
65 int idr_id; 61 int idr_id;
66 u16 cur_scsi_id; 62 u16 cur_scsi_id;
67 u16 flags; 63 u16 flags;
68 struct list_head binding_list; 64 struct list_head binding_list;
69 struct Scsi_Host *shost; 65 struct Scsi_Host *shost;
70 struct list_head itnim_mapped_list; 66 struct list_head itnim_mapped_list;
@@ -118,14 +114,13 @@ struct bfad_fcp_binding {
118struct bfad_im_s { 114struct bfad_im_s {
119 struct bfad_s *bfad; 115 struct bfad_s *bfad;
120 struct workqueue_struct *drv_workq; 116 struct workqueue_struct *drv_workq;
121 char drv_workq_name[BFAD_KOBJ_NAME_LEN]; 117 char drv_workq_name[KOBJ_NAME_LEN];
122}; 118};
123 119
124struct Scsi_Host *bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, 120struct Scsi_Host *bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port,
125 struct bfad_s *); 121 struct bfad_s *);
126bfa_status_t bfad_os_thread_workq(struct bfad_s *bfad); 122bfa_status_t bfad_os_thread_workq(struct bfad_s *bfad);
127void bfad_os_destroy_workq(struct bfad_im_s *im); 123void bfad_os_destroy_workq(struct bfad_im_s *im);
128void bfad_os_itnim_process(struct bfad_itnim_s *itnim_drv);
129void bfad_os_fc_host_init(struct bfad_im_port_s *im_port); 124void bfad_os_fc_host_init(struct bfad_im_port_s *im_port);
130void bfad_os_scsi_host_free(struct bfad_s *bfad, 125void bfad_os_scsi_host_free(struct bfad_s *bfad,
131 struct bfad_im_port_s *im_port); 126 struct bfad_im_port_s *im_port);
@@ -133,11 +128,6 @@ void bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim,
133 struct scsi_device *sdev); 128 struct scsi_device *sdev);
134void bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev); 129void bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev);
135struct bfad_itnim_s *bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id); 130struct bfad_itnim_s *bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id);
136int bfad_os_scsi_add_host(struct Scsi_Host *shost,
137 struct bfad_im_port_s *im_port, struct bfad_s *bfad);
138
139void bfad_im_itnim_unmap(struct bfad_im_port_s *im_port,
140 struct bfad_itnim_s *itnim);
141 131
142extern struct scsi_host_template bfad_im_scsi_host_template; 132extern struct scsi_host_template bfad_im_scsi_host_template;
143extern struct scsi_host_template bfad_im_vport_template; 133extern struct scsi_host_template bfad_im_vport_template;
@@ -146,4 +136,34 @@ extern struct fc_function_template bfad_im_vport_fc_function_template;
146extern struct scsi_transport_template *bfad_im_scsi_transport_template; 136extern struct scsi_transport_template *bfad_im_scsi_transport_template;
147extern struct scsi_transport_template *bfad_im_scsi_vport_transport_template; 137extern struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
148 138
139extern struct device_attribute *bfad_im_host_attrs[];
140extern struct device_attribute *bfad_im_vport_attrs[];
141
142irqreturn_t bfad_intx(int irq, void *dev_id);
143
144/* Firmware releated */
145#define BFAD_FW_FILE_CT_FC "ctfw_fc.bin"
146#define BFAD_FW_FILE_CT_CNA "ctfw_cna.bin"
147#define BFAD_FW_FILE_CB_FC "cbfw_fc.bin"
148
149u32 *bfad_get_firmware_buf(struct pci_dev *pdev);
150u32 *bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
151 u32 *bfi_image_size, char *fw_name);
152
153static inline u32 *
154bfad_load_fwimg(struct pci_dev *pdev)
155{
156 return bfad_get_firmware_buf(pdev);
157}
158
159static inline void
160bfad_free_fwimg(void)
161{
162 if (bfi_image_ct_fc_size && bfi_image_ct_fc)
163 vfree(bfi_image_ct_fc);
164 if (bfi_image_ct_cna_size && bfi_image_ct_cna)
165 vfree(bfi_image_ct_cna);
166 if (bfi_image_cb_fc_size && bfi_image_cb_fc)
167 vfree(bfi_image_cb_fc);
168}
149#endif 169#endif
diff --git a/drivers/scsi/bfa/bfad_im_compat.h b/drivers/scsi/bfa/bfad_im_compat.h
deleted file mode 100644
index 0a122abbbe89..000000000000
--- a/drivers/scsi/bfa/bfad_im_compat.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFAD_IM_COMPAT_H__
19#define __BFAD_IM_COMPAT_H__
20
21extern struct device_attribute *bfad_im_host_attrs[];
22extern struct device_attribute *bfad_im_vport_attrs[];
23
24u32 *bfad_get_firmware_buf(struct pci_dev *pdev);
25u32 *bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
26 u32 *bfi_image_size, char *fw_name);
27
28static inline u32 *
29bfad_load_fwimg(struct pci_dev *pdev)
30{
31 return bfad_get_firmware_buf(pdev);
32}
33
34static inline void
35bfad_free_fwimg(void)
36{
37 if (bfi_image_ct_fc_size && bfi_image_ct_fc)
38 vfree(bfi_image_ct_fc);
39 if (bfi_image_ct_cna_size && bfi_image_ct_cna)
40 vfree(bfi_image_ct_cna);
41 if (bfi_image_cb_fc_size && bfi_image_cb_fc)
42 vfree(bfi_image_cb_fc);
43}
44
45#endif
diff --git a/drivers/scsi/bfa/bfad_intr.c b/drivers/scsi/bfa/bfad_intr.c
deleted file mode 100644
index 56a351584f0c..000000000000
--- a/drivers/scsi/bfa/bfad_intr.c
+++ /dev/null
@@ -1,222 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include "bfad_drv.h"
19#include "bfad_trcmod.h"
20
21BFA_TRC_FILE(LDRV, INTR);
22
23/**
24 * bfa_isr BFA driver interrupt functions
25 */
26static int msix_disable_cb;
27static int msix_disable_ct;
28module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR);
29MODULE_PARM_DESC(msix_disable_cb, "Disable MSIX for Brocade-415/425/815/825"
30 " cards, default=0, Range[false:0|true:1]");
31module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR);
32MODULE_PARM_DESC(msix_disable_ct, "Disable MSIX for Brocade-1010/1020/804"
33 " cards, default=0, Range[false:0|true:1]");
34/**
35 * Line based interrupt handler.
36 */
37static irqreturn_t
38bfad_intx(int irq, void *dev_id)
39{
40 struct bfad_s *bfad = dev_id;
41 struct list_head doneq;
42 unsigned long flags;
43 bfa_boolean_t rc;
44
45 spin_lock_irqsave(&bfad->bfad_lock, flags);
46 rc = bfa_intx(&bfad->bfa);
47 if (!rc) {
48 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
49 return IRQ_NONE;
50 }
51
52 bfa_comp_deq(&bfad->bfa, &doneq);
53 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
54
55 if (!list_empty(&doneq)) {
56 bfa_comp_process(&bfad->bfa, &doneq);
57
58 spin_lock_irqsave(&bfad->bfad_lock, flags);
59 bfa_comp_free(&bfad->bfa, &doneq);
60 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
61 bfa_trc_fp(bfad, irq);
62 }
63
64 return IRQ_HANDLED;
65
66}
67
68static irqreturn_t
69bfad_msix(int irq, void *dev_id)
70{
71 struct bfad_msix_s *vec = dev_id;
72 struct bfad_s *bfad = vec->bfad;
73 struct list_head doneq;
74 unsigned long flags;
75
76 spin_lock_irqsave(&bfad->bfad_lock, flags);
77
78 bfa_msix(&bfad->bfa, vec->msix.entry);
79 bfa_comp_deq(&bfad->bfa, &doneq);
80 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
81
82 if (!list_empty(&doneq)) {
83 bfa_comp_process(&bfad->bfa, &doneq);
84
85 spin_lock_irqsave(&bfad->bfad_lock, flags);
86 bfa_comp_free(&bfad->bfa, &doneq);
87 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
88 }
89
90 return IRQ_HANDLED;
91}
92
93/**
94 * Initialize the MSIX entry table.
95 */
96static void
97bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries,
98 int mask, int max_bit)
99{
100 int i;
101 int match = 0x00000001;
102
103 for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) {
104 if (mask & match) {
105 bfad->msix_tab[bfad->nvec].msix.entry = i;
106 bfad->msix_tab[bfad->nvec].bfad = bfad;
107 msix_entries[bfad->nvec].entry = i;
108 bfad->nvec++;
109 }
110
111 match <<= 1;
112 }
113
114}
115
116int
117bfad_install_msix_handler(struct bfad_s *bfad)
118{
119 int i, error = 0;
120
121 for (i = 0; i < bfad->nvec; i++) {
122 error = request_irq(bfad->msix_tab[i].msix.vector,
123 (irq_handler_t) bfad_msix, 0,
124 BFAD_DRIVER_NAME, &bfad->msix_tab[i]);
125 bfa_trc(bfad, i);
126 bfa_trc(bfad, bfad->msix_tab[i].msix.vector);
127 if (error) {
128 int j;
129
130 for (j = 0; j < i; j++)
131 free_irq(bfad->msix_tab[j].msix.vector,
132 &bfad->msix_tab[j]);
133
134 return 1;
135 }
136 }
137
138 return 0;
139}
140
141/**
142 * Setup MSIX based interrupt.
143 */
144int
145bfad_setup_intr(struct bfad_s *bfad)
146{
147 int error = 0;
148 u32 mask = 0, i, num_bit = 0, max_bit = 0;
149 struct msix_entry msix_entries[MAX_MSIX_ENTRY];
150 struct pci_dev *pdev = bfad->pcidev;
151
152 /* Call BFA to get the msix map for this PCI function. */
153 bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit);
154
155 /* Set up the msix entry table */
156 bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
157
158 if ((bfa_asic_id_ct(pdev->device) && !msix_disable_ct) ||
159 (!bfa_asic_id_ct(pdev->device) && !msix_disable_cb)) {
160
161 error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
162 if (error) {
163 /*
164 * Only error number of vector is available.
165 * We don't have a mechanism to map multiple
166 * interrupts into one vector, so even if we
167 * can try to request less vectors, we don't
168 * know how to associate interrupt events to
169 * vectors. Linux doesn't dupicate vectors
170 * in the MSIX table for this case.
171 */
172
173 printk(KERN_WARNING "bfad%d: "
174 "pci_enable_msix failed (%d),"
175 " use line based.\n", bfad->inst_no, error);
176
177 goto line_based;
178 }
179
180 /* Save the vectors */
181 for (i = 0; i < bfad->nvec; i++) {
182 bfa_trc(bfad, msix_entries[i].vector);
183 bfad->msix_tab[i].msix.vector = msix_entries[i].vector;
184 }
185
186 bfa_msix_init(&bfad->bfa, bfad->nvec);
187
188 bfad->bfad_flags |= BFAD_MSIX_ON;
189
190 return error;
191 }
192
193line_based:
194 error = 0;
195 if (request_irq
196 (bfad->pcidev->irq, (irq_handler_t) bfad_intx, BFAD_IRQ_FLAGS,
197 BFAD_DRIVER_NAME, bfad) != 0) {
198 /* Enable interrupt handler failed */
199 return 1;
200 }
201
202 return error;
203}
204
205void
206bfad_remove_intr(struct bfad_s *bfad)
207{
208 int i;
209
210 if (bfad->bfad_flags & BFAD_MSIX_ON) {
211 for (i = 0; i < bfad->nvec; i++)
212 free_irq(bfad->msix_tab[i].msix.vector,
213 &bfad->msix_tab[i]);
214
215 pci_disable_msix(bfad->pcidev);
216 bfad->bfad_flags &= ~BFAD_MSIX_ON;
217 } else {
218 free_irq(bfad->pcidev->irq, bfad);
219 }
220}
221
222
diff --git a/drivers/scsi/bfa/bfad_ipfc.h b/drivers/scsi/bfa/bfad_ipfc.h
deleted file mode 100644
index 718bc5227671..000000000000
--- a/drivers/scsi/bfa/bfad_ipfc.h
+++ /dev/null
@@ -1,42 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DRV_IPFC_H__
18#define __BFA_DRV_IPFC_H__
19
20
21#define IPFC_NAME ""
22
23#define bfad_ipfc_module_init(x) do {} while (0)
24#define bfad_ipfc_module_exit(x) do {} while (0)
25#define bfad_ipfc_probe(x) do {} while (0)
26#define bfad_ipfc_probe_undo(x) do {} while (0)
27#define bfad_ipfc_port_config(x, y) BFA_STATUS_OK
28#define bfad_ipfc_port_unconfig(x, y) do {} while (0)
29
30#define bfad_ipfc_probe_post(x) do {} while (0)
31#define bfad_ipfc_port_new(x, y, z) BFA_STATUS_OK
32#define bfad_ipfc_port_delete(x, y) do {} while (0)
33#define bfad_ipfc_port_online(x, y) do {} while (0)
34#define bfad_ipfc_port_offline(x, y) do {} while (0)
35
36#define bfad_ip_get_attr(x) BFA_STATUS_FAILED
37#define bfad_ip_reset_drv_stats(x) BFA_STATUS_FAILED
38#define bfad_ip_get_drv_stats(x, y) BFA_STATUS_FAILED
39#define bfad_ip_enable_ipfc(x, y, z) BFA_STATUS_FAILED
40
41
42#endif
diff --git a/drivers/scsi/bfa/bfad_os.c b/drivers/scsi/bfa/bfad_os.c
deleted file mode 100644
index faf47b4f1a38..000000000000
--- a/drivers/scsi/bfa/bfad_os.c
+++ /dev/null
@@ -1,50 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfad_os.c Linux driver OS specific calls.
20 */
21
22#include "bfa_os_inc.h"
23#include "bfad_drv.h"
24
25void
26bfa_os_gettimeofday(struct bfa_timeval_s *tv)
27{
28 struct timeval tmp_tv;
29
30 do_gettimeofday(&tmp_tv);
31 tv->tv_sec = (u32) tmp_tv.tv_sec;
32 tv->tv_usec = (u32) tmp_tv.tv_usec;
33}
34
35void
36bfa_os_printf(struct bfa_log_mod_s *log_mod, u32 msg_id,
37 const char *fmt, ...)
38{
39 va_list ap;
40 #define BFA_STRING_256 256
41 char tmp[BFA_STRING_256];
42
43 va_start(ap, fmt);
44 vsprintf(tmp, fmt, ap);
45 va_end(ap);
46
47 printk(tmp);
48}
49
50
diff --git a/drivers/scsi/bfa/bfad_tm.h b/drivers/scsi/bfa/bfad_tm.h
deleted file mode 100644
index 4901b1b7df02..000000000000
--- a/drivers/scsi/bfa/bfad_tm.h
+++ /dev/null
@@ -1,59 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/*
19 * Brocade Fibre Channel HBA Linux Target Mode Driver
20 */
21
22/**
23 * tm/dummy/bfad_tm.h BFA callback dummy header file for BFA Linux target mode PCI interface module.
24 */
25
26#ifndef __BFAD_TM_H__
27#define __BFAD_TM_H__
28
29#include <defs/bfa_defs_status.h>
30
31#define FCPT_NAME ""
32
33/*
34 * Called from base Linux driver on (De)Init events
35 */
36
37/* attach tgt template with scst */
38#define bfad_tm_module_init() do {} while (0)
39
40/* detach/release tgt template */
41#define bfad_tm_module_exit() do {} while (0)
42
43#define bfad_tm_probe(x) do {} while (0)
44#define bfad_tm_probe_undo(x) do {} while (0)
45#define bfad_tm_probe_post(x) do {} while (0)
46
47/*
48 * Called by base Linux driver but triggered by BFA FCS on config events
49 */
50#define bfad_tm_port_new(x, y) BFA_STATUS_OK
51#define bfad_tm_port_delete(x, y) do {} while (0)
52
53/*
54 * Called by base Linux driver but triggered by BFA FCS on PLOGI/O events
55 */
56#define bfad_tm_port_online(x, y) do {} while (0)
57#define bfad_tm_port_offline(x, y) do {} while (0)
58
59#endif
diff --git a/drivers/scsi/bfa/bfad_trcmod.h b/drivers/scsi/bfa/bfad_trcmod.h
deleted file mode 100644
index 2827b2acd041..000000000000
--- a/drivers/scsi/bfa/bfad_trcmod.h
+++ /dev/null
@@ -1,52 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfad_trcmod.h Linux driver trace modules
20 */
21
22
23#ifndef __BFAD_TRCMOD_H__
24#define __BFAD_TRCMOD_H__
25
26#include <cs/bfa_trc.h>
27
28/*
29 * !!! Only append to the enums defined here to avoid any versioning
30 * !!! needed between trace utility and driver version
31 */
32enum {
33 /* 2.6 Driver */
34 BFA_TRC_LDRV_BFAD = 1,
35 BFA_TRC_LDRV_BFAD_2_6 = 2,
36 BFA_TRC_LDRV_BFAD_2_6_9 = 3,
37 BFA_TRC_LDRV_BFAD_2_6_10 = 4,
38 BFA_TRC_LDRV_INTR = 5,
39 BFA_TRC_LDRV_IOCTL = 6,
40 BFA_TRC_LDRV_OS = 7,
41 BFA_TRC_LDRV_IM = 8,
42 BFA_TRC_LDRV_IM_2_6 = 9,
43 BFA_TRC_LDRV_IM_2_6_9 = 10,
44 BFA_TRC_LDRV_IM_2_6_10 = 11,
45 BFA_TRC_LDRV_TM = 12,
46 BFA_TRC_LDRV_IPFC = 13,
47 BFA_TRC_LDRV_IM_2_4 = 14,
48 BFA_TRC_LDRV_IM_VMW = 15,
49 BFA_TRC_LDRV_IM_LT_2_6_10 = 16,
50};
51
52#endif /* __BFAD_TRCMOD_H__ */
diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h
new file mode 100644
index 000000000000..85f2224a5733
--- /dev/null
+++ b/drivers/scsi/bfa/bfi.h
@@ -0,0 +1,579 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_H__
19#define __BFI_H__
20
21#include "bfa_defs.h"
22#include "bfa_defs_svc.h"
23
24#pragma pack(1)
25
26/**
27 * BFI FW image type
28 */
29#define BFI_FLASH_CHUNK_SZ 256 /* Flash chunk size */
30#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
31enum {
32 BFI_IMAGE_CB_FC,
33 BFI_IMAGE_CT_FC,
34 BFI_IMAGE_CT_CNA,
35 BFI_IMAGE_MAX,
36};
37
38/**
39 * Msg header common to all msgs
40 */
41struct bfi_mhdr_s {
42 u8 msg_class; /* @ref bfi_mclass_t */
43 u8 msg_id; /* msg opcode with in the class */
44 union {
45 struct {
46 u8 rsvd;
47 u8 lpu_id; /* msg destination */
48 } h2i;
49 u16 i2htok; /* token in msgs to host */
50 } mtag;
51};
52
53#define bfi_h2i_set(_mh, _mc, _op, _lpuid) do { \
54 (_mh).msg_class = (_mc); \
55 (_mh).msg_id = (_op); \
56 (_mh).mtag.h2i.lpu_id = (_lpuid); \
57} while (0)
58
59#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \
60 (_mh).msg_class = (_mc); \
61 (_mh).msg_id = (_op); \
62 (_mh).mtag.i2htok = (_i2htok); \
63} while (0)
64
65/*
66 * Message opcodes: 0-127 to firmware, 128-255 to host
67 */
68#define BFI_I2H_OPCODE_BASE 128
69#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
70
71/**
72 ****************************************************************************
73 *
74 * Scatter Gather Element and Page definition
75 *
76 ****************************************************************************
77 */
78
79#define BFI_SGE_INLINE 1
80#define BFI_SGE_INLINE_MAX (BFI_SGE_INLINE + 1)
81
82/**
83 * SG Flags
84 */
85enum {
86 BFI_SGE_DATA = 0, /* data address, not last */
87 BFI_SGE_DATA_CPL = 1, /* data addr, last in current page */
88 BFI_SGE_DATA_LAST = 3, /* data address, last */
89 BFI_SGE_LINK = 2, /* link address */
90 BFI_SGE_PGDLEN = 2, /* cumulative data length for page */
91};
92
93/**
94 * DMA addresses
95 */
96union bfi_addr_u {
97 struct {
98 u32 addr_lo;
99 u32 addr_hi;
100 } a32;
101};
102
103/**
104 * Scatter Gather Element
105 */
106struct bfi_sge_s {
107#ifdef __BIGENDIAN
108 u32 flags:2,
109 rsvd:2,
110 sg_len:28;
111#else
112 u32 sg_len:28,
113 rsvd:2,
114 flags:2;
115#endif
116 union bfi_addr_u sga;
117};
118
119/**
120 * Scatter Gather Page
121 */
122#define BFI_SGPG_DATA_SGES 7
123#define BFI_SGPG_SGES_MAX (BFI_SGPG_DATA_SGES + 1)
124#define BFI_SGPG_RSVD_WD_LEN 8
125struct bfi_sgpg_s {
126 struct bfi_sge_s sges[BFI_SGPG_SGES_MAX];
127 u32 rsvd[BFI_SGPG_RSVD_WD_LEN];
128};
129
130/*
131 * Large Message structure - 128 Bytes size Msgs
132 */
133#define BFI_LMSG_SZ 128
134#define BFI_LMSG_PL_WSZ \
135 ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr_s)) / 4)
136
137struct bfi_msg_s {
138 struct bfi_mhdr_s mhdr;
139 u32 pl[BFI_LMSG_PL_WSZ];
140};
141
142/**
143 * Mailbox message structure
144 */
145#define BFI_MBMSG_SZ 7
146struct bfi_mbmsg_s {
147 struct bfi_mhdr_s mh;
148 u32 pl[BFI_MBMSG_SZ];
149};
150
151/**
152 * Message Classes
153 */
154enum bfi_mclass {
155 BFI_MC_IOC = 1, /* IO Controller (IOC) */
156 BFI_MC_FCPORT = 5, /* FC port */
157 BFI_MC_IOCFC = 6, /* FC - IO Controller (IOC) */
158 BFI_MC_LL = 7, /* Link Layer */
159 BFI_MC_UF = 8, /* Unsolicited frame receive */
160 BFI_MC_FCXP = 9, /* FC Transport */
161 BFI_MC_LPS = 10, /* lport fc login services */
162 BFI_MC_RPORT = 11, /* Remote port */
163 BFI_MC_ITNIM = 12, /* I-T nexus (Initiator mode) */
164 BFI_MC_IOIM_READ = 13, /* read IO (Initiator mode) */
165 BFI_MC_IOIM_WRITE = 14, /* write IO (Initiator mode) */
166 BFI_MC_IOIM_IO = 15, /* IO (Initiator mode) */
167 BFI_MC_IOIM = 16, /* IO (Initiator mode) */
168 BFI_MC_IOIM_IOCOM = 17, /* good IO completion */
169 BFI_MC_TSKIM = 18, /* Initiator Task management */
170 BFI_MC_PORT = 21, /* Physical port */
171 BFI_MC_MAX = 32
172};
173
174#define BFI_IOC_MAX_CQS 4
175#define BFI_IOC_MAX_CQS_ASIC 8
176#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */
177
178#define BFI_BOOT_TYPE_OFF 8
179#define BFI_BOOT_LOADER_OFF 12
180
181#define BFI_BOOT_TYPE_NORMAL 0
182#define BFI_BOOT_TYPE_FLASH 1
183#define BFI_BOOT_TYPE_MEMTEST 2
184
185#define BFI_BOOT_LOADER_OS 0
186#define BFI_BOOT_LOADER_BIOS 1
187#define BFI_BOOT_LOADER_UEFI 2
188
189/**
190 *----------------------------------------------------------------------
191 * IOC
192 *----------------------------------------------------------------------
193 */
194
195enum bfi_ioc_h2i_msgs {
196 BFI_IOC_H2I_ENABLE_REQ = 1,
197 BFI_IOC_H2I_DISABLE_REQ = 2,
198 BFI_IOC_H2I_GETATTR_REQ = 3,
199 BFI_IOC_H2I_DBG_SYNC = 4,
200 BFI_IOC_H2I_DBG_DUMP = 5,
201};
202
203enum bfi_ioc_i2h_msgs {
204 BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1),
205 BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2),
206 BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3),
207 BFI_IOC_I2H_READY_EVENT = BFA_I2HM(4),
208 BFI_IOC_I2H_HBEAT = BFA_I2HM(5),
209};
210
211/**
212 * BFI_IOC_H2I_GETATTR_REQ message
213 */
214struct bfi_ioc_getattr_req_s {
215 struct bfi_mhdr_s mh;
216 union bfi_addr_u attr_addr;
217};
218
219struct bfi_ioc_attr_s {
220 wwn_t mfg_pwwn; /* Mfg port wwn */
221 wwn_t mfg_nwwn; /* Mfg node wwn */
222 mac_t mfg_mac; /* Mfg mac */
223 u16 rsvd_a;
224 wwn_t pwwn;
225 wwn_t nwwn;
226 mac_t mac; /* PBC or Mfg mac */
227 u16 rsvd_b;
228 mac_t fcoe_mac;
229 u16 rsvd_c;
230 char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
231 u8 pcie_gen;
232 u8 pcie_lanes_orig;
233 u8 pcie_lanes;
234 u8 rx_bbcredit; /* receive buffer credits */
235 u32 adapter_prop; /* adapter properties */
236 u16 maxfrsize; /* max receive frame size */
237 char asic_rev;
238 u8 rsvd_d;
239 char fw_version[BFA_VERSION_LEN];
240 char optrom_version[BFA_VERSION_LEN];
241 struct bfa_mfg_vpd_s vpd;
242 u32 card_type; /* card type */
243};
244
245/**
246 * BFI_IOC_I2H_GETATTR_REPLY message
247 */
248struct bfi_ioc_getattr_reply_s {
249 struct bfi_mhdr_s mh; /* Common msg header */
250 u8 status; /* cfg reply status */
251 u8 rsvd[3];
252};
253
254/**
255 * Firmware memory page offsets
256 */
257#define BFI_IOC_SMEM_PG0_CB (0x40)
258#define BFI_IOC_SMEM_PG0_CT (0x180)
259
260/**
261 * Firmware statistic offset
262 */
263#define BFI_IOC_FWSTATS_OFF (0x6B40)
264#define BFI_IOC_FWSTATS_SZ (4096)
265
266/**
267 * Firmware trace offset
268 */
269#define BFI_IOC_TRC_OFF (0x4b00)
270#define BFI_IOC_TRC_ENTS 256
271
272#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
273#define BFI_IOC_MD5SUM_SZ 4
274struct bfi_ioc_image_hdr_s {
275 u32 signature; /* constant signature */
276 u32 rsvd_a;
277 u32 exec; /* exec vector */
278 u32 param; /* parameters */
279 u32 rsvd_b[4];
280 u32 md5sum[BFI_IOC_MD5SUM_SZ];
281};
282
283/**
284 * BFI_IOC_I2H_READY_EVENT message
285 */
286struct bfi_ioc_rdy_event_s {
287 struct bfi_mhdr_s mh; /* common msg header */
288 u8 init_status; /* init event status */
289 u8 rsvd[3];
290};
291
292struct bfi_ioc_hbeat_s {
293 struct bfi_mhdr_s mh; /* common msg header */
294 u32 hb_count; /* current heart beat count */
295};
296
297/**
298 * IOC hardware/firmware state
299 */
300enum bfi_ioc_state {
301 BFI_IOC_UNINIT = 0, /* not initialized */
302 BFI_IOC_INITING = 1, /* h/w is being initialized */
303 BFI_IOC_HWINIT = 2, /* h/w is initialized */
304 BFI_IOC_CFG = 3, /* IOC configuration in progress */
305 BFI_IOC_OP = 4, /* IOC is operational */
306 BFI_IOC_DISABLING = 5, /* IOC is being disabled */
307 BFI_IOC_DISABLED = 6, /* IOC is disabled */
308 BFI_IOC_CFG_DISABLED = 7, /* IOC is being disabled;transient */
309 BFI_IOC_FAIL = 8, /* IOC heart-beat failure */
310 BFI_IOC_MEMTEST = 9, /* IOC is doing memtest */
311};
312
313#define BFI_IOC_ENDIAN_SIG 0x12345678
314
315enum {
316 BFI_ADAPTER_TYPE_FC = 0x01, /* FC adapters */
317 BFI_ADAPTER_TYPE_MK = 0x0f0000, /* adapter type mask */
318 BFI_ADAPTER_TYPE_SH = 16, /* adapter type shift */
319 BFI_ADAPTER_NPORTS_MK = 0xff00, /* number of ports mask */
320 BFI_ADAPTER_NPORTS_SH = 8, /* number of ports shift */
321 BFI_ADAPTER_SPEED_MK = 0xff, /* adapter speed mask */
322 BFI_ADAPTER_SPEED_SH = 0, /* adapter speed shift */
323 BFI_ADAPTER_PROTO = 0x100000, /* prototype adapaters */
324 BFI_ADAPTER_TTV = 0x200000, /* TTV debug capable */
325 BFI_ADAPTER_UNSUPP = 0x400000, /* unknown adapter type */
326};
327
328#define BFI_ADAPTER_GETP(__prop, __adap_prop) \
329 (((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >> \
330 BFI_ADAPTER_ ## __prop ## _SH)
331#define BFI_ADAPTER_SETP(__prop, __val) \
332 ((__val) << BFI_ADAPTER_ ## __prop ## _SH)
333#define BFI_ADAPTER_IS_PROTO(__adap_type) \
334 ((__adap_type) & BFI_ADAPTER_PROTO)
335#define BFI_ADAPTER_IS_TTV(__adap_type) \
336 ((__adap_type) & BFI_ADAPTER_TTV)
337#define BFI_ADAPTER_IS_UNSUPP(__adap_type) \
338 ((__adap_type) & BFI_ADAPTER_UNSUPP)
339#define BFI_ADAPTER_IS_SPECIAL(__adap_type) \
340 ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
341 BFI_ADAPTER_UNSUPP))
342
343/**
344 * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
345 */
346struct bfi_ioc_ctrl_req_s {
347 struct bfi_mhdr_s mh;
348 u8 ioc_class;
349 u8 rsvd[3];
350 u32 tv_sec;
351};
352#define bfi_ioc_enable_req_t struct bfi_ioc_ctrl_req_s;
353#define bfi_ioc_disable_req_t struct bfi_ioc_ctrl_req_s;
354
355/**
356 * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
357 */
358struct bfi_ioc_ctrl_reply_s {
359 struct bfi_mhdr_s mh; /* Common msg header */
360 u8 status; /* enable/disable status */
361 u8 rsvd[3];
362};
363#define bfi_ioc_enable_reply_t struct bfi_ioc_ctrl_reply_s;
364#define bfi_ioc_disable_reply_t struct bfi_ioc_ctrl_reply_s;
365
366#define BFI_IOC_MSGSZ 8
367/**
368 * H2I Messages
369 */
370union bfi_ioc_h2i_msg_u {
371 struct bfi_mhdr_s mh;
372 struct bfi_ioc_ctrl_req_s enable_req;
373 struct bfi_ioc_ctrl_req_s disable_req;
374 struct bfi_ioc_getattr_req_s getattr_req;
375 u32 mboxmsg[BFI_IOC_MSGSZ];
376};
377
378/**
379 * I2H Messages
380 */
381union bfi_ioc_i2h_msg_u {
382 struct bfi_mhdr_s mh;
383 struct bfi_ioc_rdy_event_s rdy_event;
384 u32 mboxmsg[BFI_IOC_MSGSZ];
385};
386
387
388/**
389 *----------------------------------------------------------------------
390 * PBC
391 *----------------------------------------------------------------------
392 */
393
394#define BFI_PBC_MAX_BLUNS 8
395#define BFI_PBC_MAX_VPORTS 16
396
397/**
398 * PBC boot lun configuration
399 */
400struct bfi_pbc_blun_s {
401 wwn_t tgt_pwwn;
402 lun_t tgt_lun;
403};
404
405/**
406 * PBC virtual port configuration
407 */
408struct bfi_pbc_vport_s {
409 wwn_t vp_pwwn;
410 wwn_t vp_nwwn;
411};
412
413/**
414 * BFI pre-boot configuration information
415 */
416struct bfi_pbc_s {
417 u8 port_enabled;
418 u8 boot_enabled;
419 u8 nbluns;
420 u8 nvports;
421 u8 port_speed;
422 u8 rsvd_a;
423 u16 hss;
424 wwn_t pbc_pwwn;
425 wwn_t pbc_nwwn;
426 struct bfi_pbc_blun_s blun[BFI_PBC_MAX_BLUNS];
427 struct bfi_pbc_vport_s vport[BFI_PBC_MAX_VPORTS];
428};
429
430/**
431 *----------------------------------------------------------------------
432 * MSGQ
433 *----------------------------------------------------------------------
434 */
435#define BFI_MSGQ_FULL(_q) (((_q->pi + 1) % _q->q_depth) == _q->ci)
436#define BFI_MSGQ_EMPTY(_q) (_q->pi == _q->ci)
437#define BFI_MSGQ_UPDATE_CI(_q) (_q->ci = (_q->ci + 1) % _q->q_depth)
438#define BFI_MSGQ_UPDATE_PI(_q) (_q->pi = (_q->pi + 1) % _q->q_depth)
439
440/* q_depth must be power of 2 */
441#define BFI_MSGQ_FREE_CNT(_q) ((_q->ci - _q->pi - 1) & (_q->q_depth - 1))
442
443enum bfi_msgq_h2i_msgs_e {
444 BFI_MSGQ_H2I_INIT_REQ = 1,
445 BFI_MSGQ_H2I_DOORBELL = 2,
446 BFI_MSGQ_H2I_SHUTDOWN = 3,
447};
448
449enum bfi_msgq_i2h_msgs_e {
450 BFI_MSGQ_I2H_INIT_RSP = 1,
451 BFI_MSGQ_I2H_DOORBELL = 2,
452};
453
454
455/* Messages(commands/responsed/AENS will have the following header */
456struct bfi_msgq_mhdr_s {
457 u8 msg_class;
458 u8 msg_id;
459 u16 msg_token;
460 u16 num_entries;
461 u8 enet_id;
462 u8 rsvd[1];
463};
464
465#define bfi_msgq_mhdr_set(_mh, _mc, _mid, _tok, _enet_id) do { \
466 (_mh).msg_class = (_mc); \
467 (_mh).msg_id = (_mid); \
468 (_mh).msg_token = (_tok); \
469 (_mh).enet_id = (_enet_id); \
470} while (0)
471
472/*
473 * Mailbox for messaging interface
474 *
475*/
476#define BFI_MSGQ_CMD_ENTRY_SIZE (64) /* TBD */
477#define BFI_MSGQ_RSP_ENTRY_SIZE (64) /* TBD */
478#define BFI_MSGQ_MSG_SIZE_MAX (2048) /* TBD */
479
480struct bfi_msgq_s {
481 union bfi_addr_u addr;
482 u16 q_depth; /* Total num of entries in the queue */
483 u8 rsvd[2];
484};
485
486/* BFI_ENET_MSGQ_CFG_REQ TBD init or cfg? */
487struct bfi_msgq_cfg_req_s {
488 struct bfi_mhdr_s mh;
489 struct bfi_msgq_s cmdq;
490 struct bfi_msgq_s rspq;
491};
492
493/* BFI_ENET_MSGQ_CFG_RSP */
494struct bfi_msgq_cfg_rsp_s {
495 struct bfi_mhdr_s mh;
496 u8 cmd_status;
497 u8 rsvd[3];
498};
499
500
501/* BFI_MSGQ_H2I_DOORBELL */
502struct bfi_msgq_h2i_db_s {
503 struct bfi_mhdr_s mh;
504 u16 cmdq_pi;
505 u16 rspq_ci;
506};
507
508/* BFI_MSGQ_I2H_DOORBELL */
509struct bfi_msgq_i2h_db_s {
510 struct bfi_mhdr_s mh;
511 u16 rspq_pi;
512 u16 cmdq_ci;
513};
514
515#pragma pack()
516
517/* BFI port specific */
518#pragma pack(1)
519
520enum bfi_port_h2i {
521 BFI_PORT_H2I_ENABLE_REQ = (1),
522 BFI_PORT_H2I_DISABLE_REQ = (2),
523 BFI_PORT_H2I_GET_STATS_REQ = (3),
524 BFI_PORT_H2I_CLEAR_STATS_REQ = (4),
525};
526
527enum bfi_port_i2h {
528 BFI_PORT_I2H_ENABLE_RSP = BFA_I2HM(1),
529 BFI_PORT_I2H_DISABLE_RSP = BFA_I2HM(2),
530 BFI_PORT_I2H_GET_STATS_RSP = BFA_I2HM(3),
531 BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4),
532};
533
534/**
535 * Generic REQ type
536 */
537struct bfi_port_generic_req_s {
538 struct bfi_mhdr_s mh; /* msg header */
539 u32 msgtag; /* msgtag for reply */
540 u32 rsvd;
541};
542
543/**
544 * Generic RSP type
545 */
546struct bfi_port_generic_rsp_s {
547 struct bfi_mhdr_s mh; /* common msg header */
548 u8 status; /* port enable status */
549 u8 rsvd[3];
550 u32 msgtag; /* msgtag for reply */
551};
552
553/**
554 * BFI_PORT_H2I_GET_STATS_REQ
555 */
556struct bfi_port_get_stats_req_s {
557 struct bfi_mhdr_s mh; /* common msg header */
558 union bfi_addr_u dma_addr;
559};
560
561union bfi_port_h2i_msg_u {
562 struct bfi_mhdr_s mh;
563 struct bfi_port_generic_req_s enable_req;
564 struct bfi_port_generic_req_s disable_req;
565 struct bfi_port_get_stats_req_s getstats_req;
566 struct bfi_port_generic_req_s clearstats_req;
567};
568
569union bfi_port_i2h_msg_u {
570 struct bfi_mhdr_s mh;
571 struct bfi_port_generic_rsp_s enable_rsp;
572 struct bfi_port_generic_rsp_s disable_rsp;
573 struct bfi_port_generic_rsp_s getstats_rsp;
574 struct bfi_port_generic_rsp_s clearstats_rsp;
575};
576
577#pragma pack()
578
579#endif /* __BFI_H__ */
diff --git a/drivers/scsi/bfa/include/bfi/bfi_cbreg.h b/drivers/scsi/bfa/bfi_cbreg.h
index a51ee61ddb19..6f03ed382c69 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_cbreg.h
+++ b/drivers/scsi/bfa/bfi_cbreg.h
@@ -1,19 +1,3 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17 1
18/* 2/*
19 * bfi_cbreg.h crossbow host block register definitions 3 * bfi_cbreg.h crossbow host block register definitions
@@ -177,8 +161,8 @@
177#define __PSS_LMEM_INIT_EN 0x00000100 161#define __PSS_LMEM_INIT_EN 0x00000100
178#define __PSS_LPU1_RESET 0x00000002 162#define __PSS_LPU1_RESET 0x00000002
179#define __PSS_LPU0_RESET 0x00000001 163#define __PSS_LPU0_RESET 0x00000001
180#define PSS_ERR_STATUS_REG 0x00018810 164#define PSS_ERR_STATUS_REG 0x00018810
181#define __PSS_LMEM1_CORR_ERR 0x00000800 165#define __PSS_LMEM1_CORR_ERR 0x00000800
182#define __PSS_LMEM0_CORR_ERR 0x00000400 166#define __PSS_LMEM0_CORR_ERR 0x00000400
183#define __PSS_LMEM1_UNCORR_ERR 0x00000200 167#define __PSS_LMEM1_UNCORR_ERR 0x00000200
184#define __PSS_LMEM0_UNCORR_ERR 0x00000100 168#define __PSS_LMEM0_UNCORR_ERR 0x00000100
@@ -190,8 +174,9 @@
190#define __PSS_SGM_IF_ERR 0x00000004 174#define __PSS_SGM_IF_ERR 0x00000004
191#define __PSS_LPU1_RAM_ERR 0x00000002 175#define __PSS_LPU1_RAM_ERR 0x00000002
192#define __PSS_LPU0_RAM_ERR 0x00000001 176#define __PSS_LPU0_RAM_ERR 0x00000001
193#define ERR_SET_REG 0x00018818 177#define ERR_SET_REG 0x00018818
194#define __PSS_ERR_STATUS_SET 0x00000fff 178#define __PSS_ERR_STATUS_SET 0x00000fff
179
195 180
196/* 181/*
197 * These definitions are either in error/missing in spec. Its auto-generated 182 * These definitions are either in error/missing in spec. Its auto-generated
diff --git a/drivers/scsi/bfa/bfi_ctreg.h b/drivers/scsi/bfa/bfi_ctreg.h
new file mode 100644
index 000000000000..62b86a4b0e4b
--- /dev/null
+++ b/drivers/scsi/bfa/bfi_ctreg.h
@@ -0,0 +1,627 @@
1
2/*
3 * bfi_ctreg.h catapult host block register definitions
4 *
5 * !!! Do not edit. Auto generated. !!!
6 */
7
8#ifndef __BFI_CTREG_H__
9#define __BFI_CTREG_H__
10
11
12#define HOSTFN0_LPU_MBOX0_0 0x00019200
13#define HOSTFN1_LPU_MBOX0_8 0x00019260
14#define LPU_HOSTFN0_MBOX0_0 0x00019280
15#define LPU_HOSTFN1_MBOX0_8 0x000192e0
16#define HOSTFN2_LPU_MBOX0_0 0x00019400
17#define HOSTFN3_LPU_MBOX0_8 0x00019460
18#define LPU_HOSTFN2_MBOX0_0 0x00019480
19#define LPU_HOSTFN3_MBOX0_8 0x000194e0
20#define HOSTFN0_INT_STATUS 0x00014000
21#define __HOSTFN0_HALT_OCCURRED 0x01000000
22#define __HOSTFN0_INT_STATUS_LVL_MK 0x00f00000
23#define __HOSTFN0_INT_STATUS_LVL_SH 20
24#define __HOSTFN0_INT_STATUS_LVL(_v) ((_v) << __HOSTFN0_INT_STATUS_LVL_SH)
25#define __HOSTFN0_INT_STATUS_P_MK 0x000f0000
26#define __HOSTFN0_INT_STATUS_P_SH 16
27#define __HOSTFN0_INT_STATUS_P(_v) ((_v) << __HOSTFN0_INT_STATUS_P_SH)
28#define __HOSTFN0_INT_STATUS_F 0x0000ffff
29#define HOSTFN0_INT_MSK 0x00014004
30#define HOST_PAGE_NUM_FN0 0x00014008
31#define __HOST_PAGE_NUM_FN 0x000001ff
32#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c
33#define __MSIX_ERR_INDEX_FN 0x000001ff
34#define HOSTFN1_INT_STATUS 0x00014100
35#define __HOSTFN1_HALT_OCCURRED 0x01000000
36#define __HOSTFN1_INT_STATUS_LVL_MK 0x00f00000
37#define __HOSTFN1_INT_STATUS_LVL_SH 20
38#define __HOSTFN1_INT_STATUS_LVL(_v) ((_v) << __HOSTFN1_INT_STATUS_LVL_SH)
39#define __HOSTFN1_INT_STATUS_P_MK 0x000f0000
40#define __HOSTFN1_INT_STATUS_P_SH 16
41#define __HOSTFN1_INT_STATUS_P(_v) ((_v) << __HOSTFN1_INT_STATUS_P_SH)
42#define __HOSTFN1_INT_STATUS_F 0x0000ffff
43#define HOSTFN1_INT_MSK 0x00014104
44#define HOST_PAGE_NUM_FN1 0x00014108
45#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c
46#define APP_PLL_425_CTL_REG 0x00014204
47#define __P_425_PLL_LOCK 0x80000000
48#define __APP_PLL_425_SRAM_USE_100MHZ 0x00100000
49#define __APP_PLL_425_RESET_TIMER_MK 0x000e0000
50#define __APP_PLL_425_RESET_TIMER_SH 17
51#define __APP_PLL_425_RESET_TIMER(_v) ((_v) << __APP_PLL_425_RESET_TIMER_SH)
52#define __APP_PLL_425_LOGIC_SOFT_RESET 0x00010000
53#define __APP_PLL_425_CNTLMT0_1_MK 0x0000c000
54#define __APP_PLL_425_CNTLMT0_1_SH 14
55#define __APP_PLL_425_CNTLMT0_1(_v) ((_v) << __APP_PLL_425_CNTLMT0_1_SH)
56#define __APP_PLL_425_JITLMT0_1_MK 0x00003000
57#define __APP_PLL_425_JITLMT0_1_SH 12
58#define __APP_PLL_425_JITLMT0_1(_v) ((_v) << __APP_PLL_425_JITLMT0_1_SH)
59#define __APP_PLL_425_HREF 0x00000800
60#define __APP_PLL_425_HDIV 0x00000400
61#define __APP_PLL_425_P0_1_MK 0x00000300
62#define __APP_PLL_425_P0_1_SH 8
63#define __APP_PLL_425_P0_1(_v) ((_v) << __APP_PLL_425_P0_1_SH)
64#define __APP_PLL_425_Z0_2_MK 0x000000e0
65#define __APP_PLL_425_Z0_2_SH 5
66#define __APP_PLL_425_Z0_2(_v) ((_v) << __APP_PLL_425_Z0_2_SH)
67#define __APP_PLL_425_RSEL200500 0x00000010
68#define __APP_PLL_425_ENARST 0x00000008
69#define __APP_PLL_425_BYPASS 0x00000004
70#define __APP_PLL_425_LRESETN 0x00000002
71#define __APP_PLL_425_ENABLE 0x00000001
72#define APP_PLL_312_CTL_REG 0x00014208
73#define __P_312_PLL_LOCK 0x80000000
74#define __ENABLE_MAC_AHB_1 0x00800000
75#define __ENABLE_MAC_AHB_0 0x00400000
76#define __ENABLE_MAC_1 0x00200000
77#define __ENABLE_MAC_0 0x00100000
78#define __APP_PLL_312_RESET_TIMER_MK 0x000e0000
79#define __APP_PLL_312_RESET_TIMER_SH 17
80#define __APP_PLL_312_RESET_TIMER(_v) ((_v) << __APP_PLL_312_RESET_TIMER_SH)
81#define __APP_PLL_312_LOGIC_SOFT_RESET 0x00010000
82#define __APP_PLL_312_CNTLMT0_1_MK 0x0000c000
83#define __APP_PLL_312_CNTLMT0_1_SH 14
84#define __APP_PLL_312_CNTLMT0_1(_v) ((_v) << __APP_PLL_312_CNTLMT0_1_SH)
85#define __APP_PLL_312_JITLMT0_1_MK 0x00003000
86#define __APP_PLL_312_JITLMT0_1_SH 12
87#define __APP_PLL_312_JITLMT0_1(_v) ((_v) << __APP_PLL_312_JITLMT0_1_SH)
88#define __APP_PLL_312_HREF 0x00000800
89#define __APP_PLL_312_HDIV 0x00000400
90#define __APP_PLL_312_P0_1_MK 0x00000300
91#define __APP_PLL_312_P0_1_SH 8
92#define __APP_PLL_312_P0_1(_v) ((_v) << __APP_PLL_312_P0_1_SH)
93#define __APP_PLL_312_Z0_2_MK 0x000000e0
94#define __APP_PLL_312_Z0_2_SH 5
95#define __APP_PLL_312_Z0_2(_v) ((_v) << __APP_PLL_312_Z0_2_SH)
96#define __APP_PLL_312_RSEL200500 0x00000010
97#define __APP_PLL_312_ENARST 0x00000008
98#define __APP_PLL_312_BYPASS 0x00000004
99#define __APP_PLL_312_LRESETN 0x00000002
100#define __APP_PLL_312_ENABLE 0x00000001
101#define MBIST_CTL_REG 0x00014220
102#define __EDRAM_BISTR_START 0x00000004
103#define __MBIST_RESET 0x00000002
104#define __MBIST_START 0x00000001
105#define MBIST_STAT_REG 0x00014224
106#define __EDRAM_BISTR_STATUS 0x00000008
107#define __EDRAM_BISTR_DONE 0x00000004
108#define __MEM_BIT_STATUS 0x00000002
109#define __MBIST_DONE 0x00000001
110#define HOST_SEM0_REG 0x00014230
111#define __HOST_SEMAPHORE 0x00000001
112#define HOST_SEM1_REG 0x00014234
113#define HOST_SEM2_REG 0x00014238
114#define HOST_SEM3_REG 0x0001423c
115#define HOST_SEM0_INFO_REG 0x00014240
116#define HOST_SEM1_INFO_REG 0x00014244
117#define HOST_SEM2_INFO_REG 0x00014248
118#define HOST_SEM3_INFO_REG 0x0001424c
119#define ETH_MAC_SER_REG 0x00014288
120#define __APP_EMS_CKBUFAMPIN 0x00000020
121#define __APP_EMS_REFCLKSEL 0x00000010
122#define __APP_EMS_CMLCKSEL 0x00000008
123#define __APP_EMS_REFCKBUFEN2 0x00000004
124#define __APP_EMS_REFCKBUFEN1 0x00000002
125#define __APP_EMS_CHANNEL_SEL 0x00000001
126#define HOSTFN2_INT_STATUS 0x00014300
127#define __HOSTFN2_HALT_OCCURRED 0x01000000
128#define __HOSTFN2_INT_STATUS_LVL_MK 0x00f00000
129#define __HOSTFN2_INT_STATUS_LVL_SH 20
130#define __HOSTFN2_INT_STATUS_LVL(_v) ((_v) << __HOSTFN2_INT_STATUS_LVL_SH)
131#define __HOSTFN2_INT_STATUS_P_MK 0x000f0000
132#define __HOSTFN2_INT_STATUS_P_SH 16
133#define __HOSTFN2_INT_STATUS_P(_v) ((_v) << __HOSTFN2_INT_STATUS_P_SH)
134#define __HOSTFN2_INT_STATUS_F 0x0000ffff
135#define HOSTFN2_INT_MSK 0x00014304
136#define HOST_PAGE_NUM_FN2 0x00014308
137#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c
138#define HOSTFN3_INT_STATUS 0x00014400
139#define __HALT_OCCURRED 0x01000000
140#define __HOSTFN3_INT_STATUS_LVL_MK 0x00f00000
141#define __HOSTFN3_INT_STATUS_LVL_SH 20
142#define __HOSTFN3_INT_STATUS_LVL(_v) ((_v) << __HOSTFN3_INT_STATUS_LVL_SH)
143#define __HOSTFN3_INT_STATUS_P_MK 0x000f0000
144#define __HOSTFN3_INT_STATUS_P_SH 16
145#define __HOSTFN3_INT_STATUS_P(_v) ((_v) << __HOSTFN3_INT_STATUS_P_SH)
146#define __HOSTFN3_INT_STATUS_F 0x0000ffff
147#define HOSTFN3_INT_MSK 0x00014404
148#define HOST_PAGE_NUM_FN3 0x00014408
149#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c
150#define FNC_ID_REG 0x00014600
151#define __FUNCTION_NUMBER 0x00000007
152#define FNC_PERS_REG 0x00014604
153#define __F3_FUNCTION_ACTIVE 0x80000000
154#define __F3_FUNCTION_MODE 0x40000000
155#define __F3_PORT_MAP_MK 0x30000000
156#define __F3_PORT_MAP_SH 28
157#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH)
158#define __F3_VM_MODE 0x08000000
159#define __F3_INTX_STATUS_MK 0x07000000
160#define __F3_INTX_STATUS_SH 24
161#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH)
162#define __F2_FUNCTION_ACTIVE 0x00800000
163#define __F2_FUNCTION_MODE 0x00400000
164#define __F2_PORT_MAP_MK 0x00300000
165#define __F2_PORT_MAP_SH 20
166#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH)
167#define __F2_VM_MODE 0x00080000
168#define __F2_INTX_STATUS_MK 0x00070000
169#define __F2_INTX_STATUS_SH 16
170#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH)
171#define __F1_FUNCTION_ACTIVE 0x00008000
172#define __F1_FUNCTION_MODE 0x00004000
173#define __F1_PORT_MAP_MK 0x00003000
174#define __F1_PORT_MAP_SH 12
175#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH)
176#define __F1_VM_MODE 0x00000800
177#define __F1_INTX_STATUS_MK 0x00000700
178#define __F1_INTX_STATUS_SH 8
179#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH)
180#define __F0_FUNCTION_ACTIVE 0x00000080
181#define __F0_FUNCTION_MODE 0x00000040
182#define __F0_PORT_MAP_MK 0x00000030
183#define __F0_PORT_MAP_SH 4
184#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH)
185#define __F0_VM_MODE 0x00000008
186#define __F0_INTX_STATUS 0x00000007
187enum {
188 __F0_INTX_STATUS_MSIX = 0x0,
189 __F0_INTX_STATUS_INTA = 0x1,
190 __F0_INTX_STATUS_INTB = 0x2,
191 __F0_INTX_STATUS_INTC = 0x3,
192 __F0_INTX_STATUS_INTD = 0x4,
193};
194#define OP_MODE 0x0001460c
195#define __APP_ETH_CLK_LOWSPEED 0x00000004
196#define __GLOBAL_CORECLK_HALFSPEED 0x00000002
197#define __GLOBAL_FCOE_MODE 0x00000001
198#define HOST_SEM4_REG 0x00014610
199#define HOST_SEM5_REG 0x00014614
200#define HOST_SEM6_REG 0x00014618
201#define HOST_SEM7_REG 0x0001461c
202#define HOST_SEM4_INFO_REG 0x00014620
203#define HOST_SEM5_INFO_REG 0x00014624
204#define HOST_SEM6_INFO_REG 0x00014628
205#define HOST_SEM7_INFO_REG 0x0001462c
206#define HOSTFN0_LPU0_MBOX0_CMD_STAT 0x00019000
207#define __HOSTFN0_LPU0_MBOX0_INFO_MK 0xfffffffe
208#define __HOSTFN0_LPU0_MBOX0_INFO_SH 1
209#define __HOSTFN0_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU0_MBOX0_INFO_SH)
210#define __HOSTFN0_LPU0_MBOX0_CMD_STATUS 0x00000001
211#define HOSTFN0_LPU1_MBOX0_CMD_STAT 0x00019004
212#define __HOSTFN0_LPU1_MBOX0_INFO_MK 0xfffffffe
213#define __HOSTFN0_LPU1_MBOX0_INFO_SH 1
214#define __HOSTFN0_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU1_MBOX0_INFO_SH)
215#define __HOSTFN0_LPU1_MBOX0_CMD_STATUS 0x00000001
216#define LPU0_HOSTFN0_MBOX0_CMD_STAT 0x00019008
217#define __LPU0_HOSTFN0_MBOX0_INFO_MK 0xfffffffe
218#define __LPU0_HOSTFN0_MBOX0_INFO_SH 1
219#define __LPU0_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN0_MBOX0_INFO_SH)
220#define __LPU0_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
221#define LPU1_HOSTFN0_MBOX0_CMD_STAT 0x0001900c
222#define __LPU1_HOSTFN0_MBOX0_INFO_MK 0xfffffffe
223#define __LPU1_HOSTFN0_MBOX0_INFO_SH 1
224#define __LPU1_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN0_MBOX0_INFO_SH)
225#define __LPU1_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
226#define HOSTFN1_LPU0_MBOX0_CMD_STAT 0x00019010
227#define __HOSTFN1_LPU0_MBOX0_INFO_MK 0xfffffffe
228#define __HOSTFN1_LPU0_MBOX0_INFO_SH 1
229#define __HOSTFN1_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU0_MBOX0_INFO_SH)
230#define __HOSTFN1_LPU0_MBOX0_CMD_STATUS 0x00000001
231#define HOSTFN1_LPU1_MBOX0_CMD_STAT 0x00019014
232#define __HOSTFN1_LPU1_MBOX0_INFO_MK 0xfffffffe
233#define __HOSTFN1_LPU1_MBOX0_INFO_SH 1
234#define __HOSTFN1_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU1_MBOX0_INFO_SH)
235#define __HOSTFN1_LPU1_MBOX0_CMD_STATUS 0x00000001
236#define LPU0_HOSTFN1_MBOX0_CMD_STAT 0x00019018
237#define __LPU0_HOSTFN1_MBOX0_INFO_MK 0xfffffffe
238#define __LPU0_HOSTFN1_MBOX0_INFO_SH 1
239#define __LPU0_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN1_MBOX0_INFO_SH)
240#define __LPU0_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
241#define LPU1_HOSTFN1_MBOX0_CMD_STAT 0x0001901c
242#define __LPU1_HOSTFN1_MBOX0_INFO_MK 0xfffffffe
243#define __LPU1_HOSTFN1_MBOX0_INFO_SH 1
244#define __LPU1_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN1_MBOX0_INFO_SH)
245#define __LPU1_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
246#define HOSTFN2_LPU0_MBOX0_CMD_STAT 0x00019150
247#define __HOSTFN2_LPU0_MBOX0_INFO_MK 0xfffffffe
248#define __HOSTFN2_LPU0_MBOX0_INFO_SH 1
249#define __HOSTFN2_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU0_MBOX0_INFO_SH)
250#define __HOSTFN2_LPU0_MBOX0_CMD_STATUS 0x00000001
251#define HOSTFN2_LPU1_MBOX0_CMD_STAT 0x00019154
252#define __HOSTFN2_LPU1_MBOX0_INFO_MK 0xfffffffe
253#define __HOSTFN2_LPU1_MBOX0_INFO_SH 1
254#define __HOSTFN2_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU1_MBOX0_INFO_SH)
255#define __HOSTFN2_LPU1_MBOX0BOX0_CMD_STATUS 0x00000001
256#define LPU0_HOSTFN2_MBOX0_CMD_STAT 0x00019158
257#define __LPU0_HOSTFN2_MBOX0_INFO_MK 0xfffffffe
258#define __LPU0_HOSTFN2_MBOX0_INFO_SH 1
259#define __LPU0_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN2_MBOX0_INFO_SH)
260#define __LPU0_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
261#define LPU1_HOSTFN2_MBOX0_CMD_STAT 0x0001915c
262#define __LPU1_HOSTFN2_MBOX0_INFO_MK 0xfffffffe
263#define __LPU1_HOSTFN2_MBOX0_INFO_SH 1
264#define __LPU1_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN2_MBOX0_INFO_SH)
265#define __LPU1_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
266#define HOSTFN3_LPU0_MBOX0_CMD_STAT 0x00019160
267#define __HOSTFN3_LPU0_MBOX0_INFO_MK 0xfffffffe
268#define __HOSTFN3_LPU0_MBOX0_INFO_SH 1
269#define __HOSTFN3_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU0_MBOX0_INFO_SH)
270#define __HOSTFN3_LPU0_MBOX0_CMD_STATUS 0x00000001
271#define HOSTFN3_LPU1_MBOX0_CMD_STAT 0x00019164
272#define __HOSTFN3_LPU1_MBOX0_INFO_MK 0xfffffffe
273#define __HOSTFN3_LPU1_MBOX0_INFO_SH 1
274#define __HOSTFN3_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU1_MBOX0_INFO_SH)
275#define __HOSTFN3_LPU1_MBOX0_CMD_STATUS 0x00000001
276#define LPU0_HOSTFN3_MBOX0_CMD_STAT 0x00019168
277#define __LPU0_HOSTFN3_MBOX0_INFO_MK 0xfffffffe
278#define __LPU0_HOSTFN3_MBOX0_INFO_SH 1
279#define __LPU0_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN3_MBOX0_INFO_SH)
280#define __LPU0_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
281#define LPU1_HOSTFN3_MBOX0_CMD_STAT 0x0001916c
282#define __LPU1_HOSTFN3_MBOX0_INFO_MK 0xfffffffe
283#define __LPU1_HOSTFN3_MBOX0_INFO_SH 1
284#define __LPU1_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN3_MBOX0_INFO_SH)
285#define __LPU1_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
286#define FW_INIT_HALT_P0 0x000191ac
287#define __FW_INIT_HALT_P 0x00000001
288#define FW_INIT_HALT_P1 0x000191bc
289#define CPE_PI_PTR_Q0 0x00038000
290#define __CPE_PI_UNUSED_MK 0xffff0000
291#define __CPE_PI_UNUSED_SH 16
292#define __CPE_PI_UNUSED(_v) ((_v) << __CPE_PI_UNUSED_SH)
293#define __CPE_PI_PTR 0x0000ffff
294#define CPE_PI_PTR_Q1 0x00038040
295#define CPE_CI_PTR_Q0 0x00038004
296#define __CPE_CI_UNUSED_MK 0xffff0000
297#define __CPE_CI_UNUSED_SH 16
298#define __CPE_CI_UNUSED(_v) ((_v) << __CPE_CI_UNUSED_SH)
299#define __CPE_CI_PTR 0x0000ffff
300#define CPE_CI_PTR_Q1 0x00038044
301#define CPE_DEPTH_Q0 0x00038008
302#define __CPE_DEPTH_UNUSED_MK 0xf8000000
303#define __CPE_DEPTH_UNUSED_SH 27
304#define __CPE_DEPTH_UNUSED(_v) ((_v) << __CPE_DEPTH_UNUSED_SH)
305#define __CPE_MSIX_VEC_INDEX_MK 0x07ff0000
306#define __CPE_MSIX_VEC_INDEX_SH 16
307#define __CPE_MSIX_VEC_INDEX(_v) ((_v) << __CPE_MSIX_VEC_INDEX_SH)
308#define __CPE_DEPTH 0x0000ffff
309#define CPE_DEPTH_Q1 0x00038048
310#define CPE_QCTRL_Q0 0x0003800c
311#define __CPE_CTRL_UNUSED30_MK 0xfc000000
312#define __CPE_CTRL_UNUSED30_SH 26
313#define __CPE_CTRL_UNUSED30(_v) ((_v) << __CPE_CTRL_UNUSED30_SH)
314#define __CPE_FUNC_INT_CTRL_MK 0x03000000
315#define __CPE_FUNC_INT_CTRL_SH 24
316#define __CPE_FUNC_INT_CTRL(_v) ((_v) << __CPE_FUNC_INT_CTRL_SH)
317enum {
318 __CPE_FUNC_INT_CTRL_DISABLE = 0x0,
319 __CPE_FUNC_INT_CTRL_F2NF = 0x1,
320 __CPE_FUNC_INT_CTRL_3QUART = 0x2,
321 __CPE_FUNC_INT_CTRL_HALF = 0x3,
322};
323#define __CPE_CTRL_UNUSED20_MK 0x00f00000
324#define __CPE_CTRL_UNUSED20_SH 20
325#define __CPE_CTRL_UNUSED20(_v) ((_v) << __CPE_CTRL_UNUSED20_SH)
326#define __CPE_SCI_TH_MK 0x000f0000
327#define __CPE_SCI_TH_SH 16
328#define __CPE_SCI_TH(_v) ((_v) << __CPE_SCI_TH_SH)
329#define __CPE_CTRL_UNUSED10_MK 0x0000c000
330#define __CPE_CTRL_UNUSED10_SH 14
331#define __CPE_CTRL_UNUSED10(_v) ((_v) << __CPE_CTRL_UNUSED10_SH)
332#define __CPE_ACK_PENDING 0x00002000
333#define __CPE_CTRL_UNUSED40_MK 0x00001c00
334#define __CPE_CTRL_UNUSED40_SH 10
335#define __CPE_CTRL_UNUSED40(_v) ((_v) << __CPE_CTRL_UNUSED40_SH)
336#define __CPE_PCIEID_MK 0x00000300
337#define __CPE_PCIEID_SH 8
338#define __CPE_PCIEID(_v) ((_v) << __CPE_PCIEID_SH)
339#define __CPE_CTRL_UNUSED00_MK 0x000000fe
340#define __CPE_CTRL_UNUSED00_SH 1
341#define __CPE_CTRL_UNUSED00(_v) ((_v) << __CPE_CTRL_UNUSED00_SH)
342#define __CPE_ESIZE 0x00000001
343#define CPE_QCTRL_Q1 0x0003804c
344#define __CPE_CTRL_UNUSED31_MK 0xfc000000
345#define __CPE_CTRL_UNUSED31_SH 26
346#define __CPE_CTRL_UNUSED31(_v) ((_v) << __CPE_CTRL_UNUSED31_SH)
347#define __CPE_CTRL_UNUSED21_MK 0x00f00000
348#define __CPE_CTRL_UNUSED21_SH 20
349#define __CPE_CTRL_UNUSED21(_v) ((_v) << __CPE_CTRL_UNUSED21_SH)
350#define __CPE_CTRL_UNUSED11_MK 0x0000c000
351#define __CPE_CTRL_UNUSED11_SH 14
352#define __CPE_CTRL_UNUSED11(_v) ((_v) << __CPE_CTRL_UNUSED11_SH)
353#define __CPE_CTRL_UNUSED41_MK 0x00001c00
354#define __CPE_CTRL_UNUSED41_SH 10
355#define __CPE_CTRL_UNUSED41(_v) ((_v) << __CPE_CTRL_UNUSED41_SH)
356#define __CPE_CTRL_UNUSED01_MK 0x000000fe
357#define __CPE_CTRL_UNUSED01_SH 1
358#define __CPE_CTRL_UNUSED01(_v) ((_v) << __CPE_CTRL_UNUSED01_SH)
359#define RME_PI_PTR_Q0 0x00038020
360#define __LATENCY_TIME_STAMP_MK 0xffff0000
361#define __LATENCY_TIME_STAMP_SH 16
362#define __LATENCY_TIME_STAMP(_v) ((_v) << __LATENCY_TIME_STAMP_SH)
363#define __RME_PI_PTR 0x0000ffff
364#define RME_PI_PTR_Q1 0x00038060
365#define RME_CI_PTR_Q0 0x00038024
366#define __DELAY_TIME_STAMP_MK 0xffff0000
367#define __DELAY_TIME_STAMP_SH 16
368#define __DELAY_TIME_STAMP(_v) ((_v) << __DELAY_TIME_STAMP_SH)
369#define __RME_CI_PTR 0x0000ffff
370#define RME_CI_PTR_Q1 0x00038064
371#define RME_DEPTH_Q0 0x00038028
372#define __RME_DEPTH_UNUSED_MK 0xf8000000
373#define __RME_DEPTH_UNUSED_SH 27
374#define __RME_DEPTH_UNUSED(_v) ((_v) << __RME_DEPTH_UNUSED_SH)
375#define __RME_MSIX_VEC_INDEX_MK 0x07ff0000
376#define __RME_MSIX_VEC_INDEX_SH 16
377#define __RME_MSIX_VEC_INDEX(_v) ((_v) << __RME_MSIX_VEC_INDEX_SH)
378#define __RME_DEPTH 0x0000ffff
379#define RME_DEPTH_Q1 0x00038068
380#define RME_QCTRL_Q0 0x0003802c
381#define __RME_INT_LATENCY_TIMER_MK 0xff000000
382#define __RME_INT_LATENCY_TIMER_SH 24
383#define __RME_INT_LATENCY_TIMER(_v) ((_v) << __RME_INT_LATENCY_TIMER_SH)
384#define __RME_INT_DELAY_TIMER_MK 0x00ff0000
385#define __RME_INT_DELAY_TIMER_SH 16
386#define __RME_INT_DELAY_TIMER(_v) ((_v) << __RME_INT_DELAY_TIMER_SH)
387#define __RME_INT_DELAY_DISABLE 0x00008000
388#define __RME_DLY_DELAY_DISABLE 0x00004000
389#define __RME_ACK_PENDING 0x00002000
390#define __RME_FULL_INTERRUPT_DISABLE 0x00001000
391#define __RME_CTRL_UNUSED10_MK 0x00000c00
392#define __RME_CTRL_UNUSED10_SH 10
393#define __RME_CTRL_UNUSED10(_v) ((_v) << __RME_CTRL_UNUSED10_SH)
394#define __RME_PCIEID_MK 0x00000300
395#define __RME_PCIEID_SH 8
396#define __RME_PCIEID(_v) ((_v) << __RME_PCIEID_SH)
397#define __RME_CTRL_UNUSED00_MK 0x000000fe
398#define __RME_CTRL_UNUSED00_SH 1
399#define __RME_CTRL_UNUSED00(_v) ((_v) << __RME_CTRL_UNUSED00_SH)
400#define __RME_ESIZE 0x00000001
401#define RME_QCTRL_Q1 0x0003806c
402#define __RME_CTRL_UNUSED11_MK 0x00000c00
403#define __RME_CTRL_UNUSED11_SH 10
404#define __RME_CTRL_UNUSED11(_v) ((_v) << __RME_CTRL_UNUSED11_SH)
405#define __RME_CTRL_UNUSED01_MK 0x000000fe
406#define __RME_CTRL_UNUSED01_SH 1
407#define __RME_CTRL_UNUSED01(_v) ((_v) << __RME_CTRL_UNUSED01_SH)
408#define PSS_CTL_REG 0x00018800
409#define __PSS_I2C_CLK_DIV_MK 0x007f0000
410#define __PSS_I2C_CLK_DIV_SH 16
411#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH)
412#define __PSS_LMEM_INIT_DONE 0x00001000
413#define __PSS_LMEM_RESET 0x00000200
414#define __PSS_LMEM_INIT_EN 0x00000100
415#define __PSS_LPU1_RESET 0x00000002
416#define __PSS_LPU0_RESET 0x00000001
417#define PSS_ERR_STATUS_REG 0x00018810
418#define __PSS_LPU1_TCM_READ_ERR 0x00200000
419#define __PSS_LPU0_TCM_READ_ERR 0x00100000
420#define __PSS_LMEM5_CORR_ERR 0x00080000
421#define __PSS_LMEM4_CORR_ERR 0x00040000
422#define __PSS_LMEM3_CORR_ERR 0x00020000
423#define __PSS_LMEM2_CORR_ERR 0x00010000
424#define __PSS_LMEM1_CORR_ERR 0x00008000
425#define __PSS_LMEM0_CORR_ERR 0x00004000
426#define __PSS_LMEM5_UNCORR_ERR 0x00002000
427#define __PSS_LMEM4_UNCORR_ERR 0x00001000
428#define __PSS_LMEM3_UNCORR_ERR 0x00000800
429#define __PSS_LMEM2_UNCORR_ERR 0x00000400
430#define __PSS_LMEM1_UNCORR_ERR 0x00000200
431#define __PSS_LMEM0_UNCORR_ERR 0x00000100
432#define __PSS_BAL_PERR 0x00000080
433#define __PSS_DIP_IF_ERR 0x00000040
434#define __PSS_IOH_IF_ERR 0x00000020
435#define __PSS_TDS_IF_ERR 0x00000010
436#define __PSS_RDS_IF_ERR 0x00000008
437#define __PSS_SGM_IF_ERR 0x00000004
438#define __PSS_LPU1_RAM_ERR 0x00000002
439#define __PSS_LPU0_RAM_ERR 0x00000001
440#define ERR_SET_REG 0x00018818
441#define __PSS_ERR_STATUS_SET 0x003fffff
442#define PMM_1T_RESET_REG_P0 0x0002381c
443#define __PMM_1T_RESET_P 0x00000001
444#define PMM_1T_RESET_REG_P1 0x00023c1c
445#define HQM_QSET0_RXQ_DRBL_P0 0x00038000
446#define __RXQ0_ADD_VECTORS_P 0x80000000
447#define __RXQ0_STOP_P 0x40000000
448#define __RXQ0_PRD_PTR_P 0x0000ffff
449#define HQM_QSET1_RXQ_DRBL_P0 0x00038080
450#define __RXQ1_ADD_VECTORS_P 0x80000000
451#define __RXQ1_STOP_P 0x40000000
452#define __RXQ1_PRD_PTR_P 0x0000ffff
453#define HQM_QSET0_RXQ_DRBL_P1 0x0003c000
454#define HQM_QSET1_RXQ_DRBL_P1 0x0003c080
455#define HQM_QSET0_TXQ_DRBL_P0 0x00038020
456#define __TXQ0_ADD_VECTORS_P 0x80000000
457#define __TXQ0_STOP_P 0x40000000
458#define __TXQ0_PRD_PTR_P 0x0000ffff
459#define HQM_QSET1_TXQ_DRBL_P0 0x000380a0
460#define __TXQ1_ADD_VECTORS_P 0x80000000
461#define __TXQ1_STOP_P 0x40000000
462#define __TXQ1_PRD_PTR_P 0x0000ffff
463#define HQM_QSET0_TXQ_DRBL_P1 0x0003c020
464#define HQM_QSET1_TXQ_DRBL_P1 0x0003c0a0
465#define HQM_QSET0_IB_DRBL_1_P0 0x00038040
466#define __IB1_0_ACK_P 0x80000000
467#define __IB1_0_DISABLE_P 0x40000000
468#define __IB1_0_COALESCING_CFG_P_MK 0x00ff0000
469#define __IB1_0_COALESCING_CFG_P_SH 16
470#define __IB1_0_COALESCING_CFG_P(_v) ((_v) << __IB1_0_COALESCING_CFG_P_SH)
471#define __IB1_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff
472#define HQM_QSET1_IB_DRBL_1_P0 0x000380c0
473#define __IB1_1_ACK_P 0x80000000
474#define __IB1_1_DISABLE_P 0x40000000
475#define __IB1_1_COALESCING_CFG_P_MK 0x00ff0000
476#define __IB1_1_COALESCING_CFG_P_SH 16
477#define __IB1_1_COALESCING_CFG_P(_v) ((_v) << __IB1_1_COALESCING_CFG_P_SH)
478#define __IB1_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff
479#define HQM_QSET0_IB_DRBL_1_P1 0x0003c040
480#define HQM_QSET1_IB_DRBL_1_P1 0x0003c0c0
481#define HQM_QSET0_IB_DRBL_2_P0 0x00038060
482#define __IB2_0_ACK_P 0x80000000
483#define __IB2_0_DISABLE_P 0x40000000
484#define __IB2_0_COALESCING_CFG_P_MK 0x00ff0000
485#define __IB2_0_COALESCING_CFG_P_SH 16
486#define __IB2_0_COALESCING_CFG_P(_v) ((_v) << __IB2_0_COALESCING_CFG_P_SH)
487#define __IB2_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff
488#define HQM_QSET1_IB_DRBL_2_P0 0x000380e0
489#define __IB2_1_ACK_P 0x80000000
490#define __IB2_1_DISABLE_P 0x40000000
491#define __IB2_1_COALESCING_CFG_P_MK 0x00ff0000
492#define __IB2_1_COALESCING_CFG_P_SH 16
493#define __IB2_1_COALESCING_CFG_P(_v) ((_v) << __IB2_1_COALESCING_CFG_P_SH)
494#define __IB2_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff
495#define HQM_QSET0_IB_DRBL_2_P1 0x0003c060
496#define HQM_QSET1_IB_DRBL_2_P1 0x0003c0e0
497
498
499/*
500 * These definitions are either in error/missing in spec. Its auto-generated
501 * from hard coded values in regparse.pl.
502 */
503#define __EMPHPOST_AT_4G_MK_FIX 0x0000001c
504#define __EMPHPOST_AT_4G_SH_FIX 0x00000002
505#define __EMPHPRE_AT_4G_FIX 0x00000003
506#define __SFP_TXRATE_EN_FIX 0x00000100
507#define __SFP_RXRATE_EN_FIX 0x00000080
508
509
510/*
511 * These register definitions are auto-generated from hard coded values
512 * in regparse.pl.
513 */
514
515
516/*
517 * These register mapping definitions are auto-generated from mapping tables
518 * in regparse.pl.
519 */
520#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG
521#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG
522#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
523#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
524#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
525
526#define CPE_DEPTH_Q(__n) \
527 (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0))
528#define CPE_QCTRL_Q(__n) \
529 (CPE_QCTRL_Q0 + (__n) * (CPE_QCTRL_Q1 - CPE_QCTRL_Q0))
530#define CPE_PI_PTR_Q(__n) \
531 (CPE_PI_PTR_Q0 + (__n) * (CPE_PI_PTR_Q1 - CPE_PI_PTR_Q0))
532#define CPE_CI_PTR_Q(__n) \
533 (CPE_CI_PTR_Q0 + (__n) * (CPE_CI_PTR_Q1 - CPE_CI_PTR_Q0))
534#define RME_DEPTH_Q(__n) \
535 (RME_DEPTH_Q0 + (__n) * (RME_DEPTH_Q1 - RME_DEPTH_Q0))
536#define RME_QCTRL_Q(__n) \
537 (RME_QCTRL_Q0 + (__n) * (RME_QCTRL_Q1 - RME_QCTRL_Q0))
538#define RME_PI_PTR_Q(__n) \
539 (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0))
540#define RME_CI_PTR_Q(__n) \
541 (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0))
542#define HQM_QSET_RXQ_DRBL_P0(__n) (HQM_QSET0_RXQ_DRBL_P0 + (__n) \
543 * (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
544#define HQM_QSET_TXQ_DRBL_P0(__n) (HQM_QSET0_TXQ_DRBL_P0 + (__n) \
545 * (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
546#define HQM_QSET_IB_DRBL_1_P0(__n) (HQM_QSET0_IB_DRBL_1_P0 + (__n) \
547 * (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
548#define HQM_QSET_IB_DRBL_2_P0(__n) (HQM_QSET0_IB_DRBL_2_P0 + (__n) \
549 * (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
550#define HQM_QSET_RXQ_DRBL_P1(__n) (HQM_QSET0_RXQ_DRBL_P1 + (__n) \
551 * (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
552#define HQM_QSET_TXQ_DRBL_P1(__n) (HQM_QSET0_TXQ_DRBL_P1 + (__n) \
553 * (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
554#define HQM_QSET_IB_DRBL_1_P1(__n) (HQM_QSET0_IB_DRBL_1_P1 + (__n) \
555 * (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
556#define HQM_QSET_IB_DRBL_2_P1(__n) (HQM_QSET0_IB_DRBL_2_P1 + (__n) \
557 * (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
558
559#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
560#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
561#define CPE_Q_MASK(__q) ((__q) & 0x3)
562#define RME_Q_MASK(__q) ((__q) & 0x3)
563
564
565/*
566 * PCI MSI-X vector defines
567 */
568enum {
569 BFA_MSIX_CPE_Q0 = 0,
570 BFA_MSIX_CPE_Q1 = 1,
571 BFA_MSIX_CPE_Q2 = 2,
572 BFA_MSIX_CPE_Q3 = 3,
573 BFA_MSIX_RME_Q0 = 4,
574 BFA_MSIX_RME_Q1 = 5,
575 BFA_MSIX_RME_Q2 = 6,
576 BFA_MSIX_RME_Q3 = 7,
577 BFA_MSIX_LPU_ERR = 8,
578 BFA_MSIX_CT_MAX = 9,
579};
580
581/*
582 * And corresponding host interrupt status bit field defines
583 */
584#define __HFN_INT_CPE_Q0 0x00000001U
585#define __HFN_INT_CPE_Q1 0x00000002U
586#define __HFN_INT_CPE_Q2 0x00000004U
587#define __HFN_INT_CPE_Q3 0x00000008U
588#define __HFN_INT_CPE_Q4 0x00000010U
589#define __HFN_INT_CPE_Q5 0x00000020U
590#define __HFN_INT_CPE_Q6 0x00000040U
591#define __HFN_INT_CPE_Q7 0x00000080U
592#define __HFN_INT_RME_Q0 0x00000100U
593#define __HFN_INT_RME_Q1 0x00000200U
594#define __HFN_INT_RME_Q2 0x00000400U
595#define __HFN_INT_RME_Q3 0x00000800U
596#define __HFN_INT_RME_Q4 0x00001000U
597#define __HFN_INT_RME_Q5 0x00002000U
598#define __HFN_INT_RME_Q6 0x00004000U
599#define __HFN_INT_RME_Q7 0x00008000U
600#define __HFN_INT_ERR_EMC 0x00010000U
601#define __HFN_INT_ERR_LPU0 0x00020000U
602#define __HFN_INT_ERR_LPU1 0x00040000U
603#define __HFN_INT_ERR_PSS 0x00080000U
604#define __HFN_INT_MBOX_LPU0 0x00100000U
605#define __HFN_INT_MBOX_LPU1 0x00200000U
606#define __HFN_INT_MBOX1_LPU0 0x00400000U
607#define __HFN_INT_MBOX1_LPU1 0x00800000U
608#define __HFN_INT_LL_HALT 0x01000000U
609#define __HFN_INT_CPE_MASK 0x000000ffU
610#define __HFN_INT_RME_MASK 0x0000ff00U
611
612
613/*
614 * catapult memory map.
615 */
616#define LL_PGN_HQM0 0x0096
617#define LL_PGN_HQM1 0x0097
618#define PSS_SMEM_PAGE_START 0x8000
619#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15))
620#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff)
621
622/*
623 * End of catapult memory map
624 */
625
626
627#endif /* __BFI_CTREG_H__ */
diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h
new file mode 100644
index 000000000000..69ac85f9e938
--- /dev/null
+++ b/drivers/scsi/bfa/bfi_ms.h
@@ -0,0 +1,765 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_MS_H__
19#define __BFI_MS_H__
20
21#include "bfi.h"
22#include "bfa_fc.h"
23#include "bfa_defs_svc.h"
24
25#pragma pack(1)
26
27enum bfi_iocfc_h2i_msgs {
28 BFI_IOCFC_H2I_CFG_REQ = 1,
29 BFI_IOCFC_H2I_SET_INTR_REQ = 2,
30 BFI_IOCFC_H2I_UPDATEQ_REQ = 3,
31};
32
33enum bfi_iocfc_i2h_msgs {
34 BFI_IOCFC_I2H_CFG_REPLY = BFA_I2HM(1),
35 BFI_IOCFC_I2H_UPDATEQ_RSP = BFA_I2HM(3),
36};
37
38struct bfi_iocfc_cfg_s {
39 u8 num_cqs; /* Number of CQs to be used */
40 u8 sense_buf_len; /* SCSI sense length */
41 u16 rsvd_1;
42 u32 endian_sig; /* endian signature of host */
43
44 /**
45 * Request and response circular queue base addresses, size and
46 * shadow index pointers.
47 */
48 union bfi_addr_u req_cq_ba[BFI_IOC_MAX_CQS];
49 union bfi_addr_u req_shadow_ci[BFI_IOC_MAX_CQS];
50 u16 req_cq_elems[BFI_IOC_MAX_CQS];
51 union bfi_addr_u rsp_cq_ba[BFI_IOC_MAX_CQS];
52 union bfi_addr_u rsp_shadow_pi[BFI_IOC_MAX_CQS];
53 u16 rsp_cq_elems[BFI_IOC_MAX_CQS];
54
55 union bfi_addr_u stats_addr; /* DMA-able address for stats */
56 union bfi_addr_u cfgrsp_addr; /* config response dma address */
57 union bfi_addr_u ioim_snsbase; /* IO sense buffer base address */
58 struct bfa_iocfc_intr_attr_s intr_attr; /* IOC interrupt attributes */
59};
60
61/**
62 * Boot target wwn information for this port. This contains either the stored
63 * or discovered boot target port wwns for the port.
64 */
65struct bfi_iocfc_bootwwns {
66 wwn_t wwn[BFA_BOOT_BOOTLUN_MAX];
67 u8 nwwns;
68 u8 rsvd[7];
69};
70
71struct bfi_iocfc_cfgrsp_s {
72 struct bfa_iocfc_fwcfg_s fwcfg;
73 struct bfa_iocfc_intr_attr_s intr_attr;
74 struct bfi_iocfc_bootwwns bootwwns;
75 struct bfi_pbc_s pbc_cfg;
76};
77
78/**
79 * BFI_IOCFC_H2I_CFG_REQ message
80 */
81struct bfi_iocfc_cfg_req_s {
82 struct bfi_mhdr_s mh;
83 union bfi_addr_u ioc_cfg_dma_addr;
84};
85
86
87/**
88 * BFI_IOCFC_I2H_CFG_REPLY message
89 */
90struct bfi_iocfc_cfg_reply_s {
91 struct bfi_mhdr_s mh; /* Common msg header */
92 u8 cfg_success; /* cfg reply status */
93 u8 lpu_bm; /* LPUs assigned for this IOC */
94 u8 rsvd[2];
95};
96
97
98/**
99 * BFI_IOCFC_H2I_SET_INTR_REQ message
100 */
101struct bfi_iocfc_set_intr_req_s {
102 struct bfi_mhdr_s mh; /* common msg header */
103 u8 coalesce; /* enable intr coalescing */
104 u8 rsvd[3];
105 u16 delay; /* delay timer 0..1125us */
106 u16 latency; /* latency timer 0..225us */
107};
108
109
110/**
111 * BFI_IOCFC_H2I_UPDATEQ_REQ message
112 */
113struct bfi_iocfc_updateq_req_s {
114 struct bfi_mhdr_s mh; /* common msg header */
115 u32 reqq_ba; /* reqq base addr */
116 u32 rspq_ba; /* rspq base addr */
117 u32 reqq_sci; /* reqq shadow ci */
118 u32 rspq_spi; /* rspq shadow pi */
119};
120
121
122/**
123 * BFI_IOCFC_I2H_UPDATEQ_RSP message
124 */
125struct bfi_iocfc_updateq_rsp_s {
126 struct bfi_mhdr_s mh; /* common msg header */
127 u8 status; /* updateq status */
128 u8 rsvd[3];
129};
130
131
132/**
133 * H2I Messages
134 */
135union bfi_iocfc_h2i_msg_u {
136 struct bfi_mhdr_s mh;
137 struct bfi_iocfc_cfg_req_s cfg_req;
138 struct bfi_iocfc_updateq_req_s updateq_req;
139 u32 mboxmsg[BFI_IOC_MSGSZ];
140};
141
142
143/**
144 * I2H Messages
145 */
146union bfi_iocfc_i2h_msg_u {
147 struct bfi_mhdr_s mh;
148 struct bfi_iocfc_cfg_reply_s cfg_reply;
149 struct bfi_iocfc_updateq_rsp_s updateq_rsp;
150 u32 mboxmsg[BFI_IOC_MSGSZ];
151};
152
153
154enum bfi_fcport_h2i {
155 BFI_FCPORT_H2I_ENABLE_REQ = (1),
156 BFI_FCPORT_H2I_DISABLE_REQ = (2),
157 BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ = (3),
158 BFI_FCPORT_H2I_STATS_GET_REQ = (4),
159 BFI_FCPORT_H2I_STATS_CLEAR_REQ = (5),
160};
161
162
163enum bfi_fcport_i2h {
164 BFI_FCPORT_I2H_ENABLE_RSP = BFA_I2HM(1),
165 BFI_FCPORT_I2H_DISABLE_RSP = BFA_I2HM(2),
166 BFI_FCPORT_I2H_SET_SVC_PARAMS_RSP = BFA_I2HM(3),
167 BFI_FCPORT_I2H_STATS_GET_RSP = BFA_I2HM(4),
168 BFI_FCPORT_I2H_STATS_CLEAR_RSP = BFA_I2HM(5),
169 BFI_FCPORT_I2H_EVENT = BFA_I2HM(6),
170 BFI_FCPORT_I2H_TRUNK_SCN = BFA_I2HM(7),
171 BFI_FCPORT_I2H_ENABLE_AEN = BFA_I2HM(8),
172 BFI_FCPORT_I2H_DISABLE_AEN = BFA_I2HM(9),
173};
174
175
176/**
177 * Generic REQ type
178 */
179struct bfi_fcport_req_s {
180 struct bfi_mhdr_s mh; /* msg header */
181 u32 msgtag; /* msgtag for reply */
182};
183
184/**
185 * Generic RSP type
186 */
187struct bfi_fcport_rsp_s {
188 struct bfi_mhdr_s mh; /* common msg header */
189 u8 status; /* port enable status */
190 u8 rsvd[3];
191 u32 msgtag; /* msgtag for reply */
192};
193
194/**
195 * BFI_FCPORT_H2I_ENABLE_REQ
196 */
197struct bfi_fcport_enable_req_s {
198 struct bfi_mhdr_s mh; /* msg header */
199 u32 rsvd1;
200 wwn_t nwwn; /* node wwn of physical port */
201 wwn_t pwwn; /* port wwn of physical port */
202 struct bfa_port_cfg_s port_cfg; /* port configuration */
203 union bfi_addr_u stats_dma_addr; /* DMA address for stats */
204 u32 msgtag; /* msgtag for reply */
205 u32 rsvd2;
206};
207
208/**
209 * BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ
210 */
211struct bfi_fcport_set_svc_params_req_s {
212 struct bfi_mhdr_s mh; /* msg header */
213 u16 tx_bbcredit; /* Tx credits */
214 u16 rsvd;
215};
216
217/**
218 * BFI_FCPORT_I2H_EVENT
219 */
220struct bfi_fcport_event_s {
221 struct bfi_mhdr_s mh; /* common msg header */
222 struct bfa_port_link_s link_state;
223};
224
225/**
226 * BFI_FCPORT_I2H_TRUNK_SCN
227 */
228struct bfi_fcport_trunk_link_s {
229 wwn_t trunk_wwn;
230 u8 fctl; /* bfa_trunk_link_fctl_t */
231 u8 state; /* bfa_trunk_link_state_t */
232 u8 speed; /* bfa_port_speed_t */
233 u8 rsvd;
234 u32 deskew;
235};
236
237#define BFI_FCPORT_MAX_LINKS 2
238struct bfi_fcport_trunk_scn_s {
239 struct bfi_mhdr_s mh;
240 u8 trunk_state; /* bfa_trunk_state_t */
241 u8 trunk_speed; /* bfa_port_speed_t */
242 u8 rsvd_a[2];
243 struct bfi_fcport_trunk_link_s tlink[BFI_FCPORT_MAX_LINKS];
244};
245
246/**
247 * fcport H2I message
248 */
249union bfi_fcport_h2i_msg_u {
250 struct bfi_mhdr_s *mhdr;
251 struct bfi_fcport_enable_req_s *penable;
252 struct bfi_fcport_req_s *pdisable;
253 struct bfi_fcport_set_svc_params_req_s *psetsvcparams;
254 struct bfi_fcport_req_s *pstatsget;
255 struct bfi_fcport_req_s *pstatsclear;
256};
257
258/**
259 * fcport I2H message
260 */
261union bfi_fcport_i2h_msg_u {
262 struct bfi_msg_s *msg;
263 struct bfi_fcport_rsp_s *penable_rsp;
264 struct bfi_fcport_rsp_s *pdisable_rsp;
265 struct bfi_fcport_rsp_s *psetsvcparams_rsp;
266 struct bfi_fcport_rsp_s *pstatsget_rsp;
267 struct bfi_fcport_rsp_s *pstatsclear_rsp;
268 struct bfi_fcport_event_s *event;
269 struct bfi_fcport_trunk_scn_s *trunk_scn;
270};
271
272enum bfi_fcxp_h2i {
273 BFI_FCXP_H2I_SEND_REQ = 1,
274};
275
276enum bfi_fcxp_i2h {
277 BFI_FCXP_I2H_SEND_RSP = BFA_I2HM(1),
278};
279
280#define BFA_FCXP_MAX_SGES 2
281
282/**
283 * FCXP send request structure
284 */
285struct bfi_fcxp_send_req_s {
286 struct bfi_mhdr_s mh; /* Common msg header */
287 u16 fcxp_tag; /* driver request tag */
288 u16 max_frmsz; /* max send frame size */
289 u16 vf_id; /* vsan tag if applicable */
290 u16 rport_fw_hndl; /* FW Handle for the remote port */
291 u8 class; /* FC class used for req/rsp */
292 u8 rsp_timeout; /* timeout in secs, 0-no response */
293 u8 cts; /* continue sequence */
294 u8 lp_tag; /* lport tag */
295 struct fchs_s fchs; /* request FC header structure */
296 u32 req_len; /* request payload length */
297 u32 rsp_maxlen; /* max response length expected */
298 struct bfi_sge_s req_sge[BFA_FCXP_MAX_SGES]; /* request buf */
299 struct bfi_sge_s rsp_sge[BFA_FCXP_MAX_SGES]; /* response buf */
300};
301
302/**
303 * FCXP send response structure
304 */
305struct bfi_fcxp_send_rsp_s {
306 struct bfi_mhdr_s mh; /* Common msg header */
307 u16 fcxp_tag; /* send request tag */
308 u8 req_status; /* request status */
309 u8 rsvd;
310 u32 rsp_len; /* actual response length */
311 u32 residue_len; /* residual response length */
312 struct fchs_s fchs; /* response FC header structure */
313};
314
315enum bfi_uf_h2i {
316 BFI_UF_H2I_BUF_POST = 1,
317};
318
319enum bfi_uf_i2h {
320 BFI_UF_I2H_FRM_RCVD = BFA_I2HM(1),
321};
322
323#define BFA_UF_MAX_SGES 2
324
325struct bfi_uf_buf_post_s {
326 struct bfi_mhdr_s mh; /* Common msg header */
327 u16 buf_tag; /* buffer tag */
328 u16 buf_len; /* total buffer length */
329 struct bfi_sge_s sge[BFA_UF_MAX_SGES]; /* buffer DMA SGEs */
330};
331
332struct bfi_uf_frm_rcvd_s {
333 struct bfi_mhdr_s mh; /* Common msg header */
334 u16 buf_tag; /* buffer tag */
335 u16 rsvd;
336 u16 frm_len; /* received frame length */
337 u16 xfr_len; /* tranferred length */
338};
339
340enum bfi_lps_h2i_msgs {
341 BFI_LPS_H2I_LOGIN_REQ = 1,
342 BFI_LPS_H2I_LOGOUT_REQ = 2,
343};
344
345enum bfi_lps_i2h_msgs {
346 BFI_LPS_H2I_LOGIN_RSP = BFA_I2HM(1),
347 BFI_LPS_H2I_LOGOUT_RSP = BFA_I2HM(2),
348 BFI_LPS_H2I_CVL_EVENT = BFA_I2HM(3),
349};
350
351struct bfi_lps_login_req_s {
352 struct bfi_mhdr_s mh; /* common msg header */
353 u8 lp_tag;
354 u8 alpa;
355 u16 pdu_size;
356 wwn_t pwwn;
357 wwn_t nwwn;
358 u8 fdisc;
359 u8 auth_en;
360 u8 rsvd[2];
361};
362
363struct bfi_lps_login_rsp_s {
364 struct bfi_mhdr_s mh; /* common msg header */
365 u8 lp_tag;
366 u8 status;
367 u8 lsrjt_rsn;
368 u8 lsrjt_expl;
369 wwn_t port_name;
370 wwn_t node_name;
371 u16 bb_credit;
372 u8 f_port;
373 u8 npiv_en;
374 u32 lp_pid:24;
375 u32 auth_req:8;
376 mac_t lp_mac;
377 mac_t fcf_mac;
378 u8 ext_status;
379 u8 brcd_switch; /* attached peer is brcd switch */
380};
381
382struct bfi_lps_logout_req_s {
383 struct bfi_mhdr_s mh; /* common msg header */
384 u8 lp_tag;
385 u8 rsvd[3];
386 wwn_t port_name;
387};
388
389struct bfi_lps_logout_rsp_s {
390 struct bfi_mhdr_s mh; /* common msg header */
391 u8 lp_tag;
392 u8 status;
393 u8 rsvd[2];
394};
395
396struct bfi_lps_cvl_event_s {
397 struct bfi_mhdr_s mh; /* common msg header */
398 u8 lp_tag;
399 u8 rsvd[3];
400};
401
402union bfi_lps_h2i_msg_u {
403 struct bfi_mhdr_s *msg;
404 struct bfi_lps_login_req_s *login_req;
405 struct bfi_lps_logout_req_s *logout_req;
406};
407
408union bfi_lps_i2h_msg_u {
409 struct bfi_msg_s *msg;
410 struct bfi_lps_login_rsp_s *login_rsp;
411 struct bfi_lps_logout_rsp_s *logout_rsp;
412 struct bfi_lps_cvl_event_s *cvl_event;
413};
414
415enum bfi_rport_h2i_msgs {
416 BFI_RPORT_H2I_CREATE_REQ = 1,
417 BFI_RPORT_H2I_DELETE_REQ = 2,
418 BFI_RPORT_H2I_SET_SPEED_REQ = 3,
419};
420
421enum bfi_rport_i2h_msgs {
422 BFI_RPORT_I2H_CREATE_RSP = BFA_I2HM(1),
423 BFI_RPORT_I2H_DELETE_RSP = BFA_I2HM(2),
424 BFI_RPORT_I2H_QOS_SCN = BFA_I2HM(3),
425};
426
427struct bfi_rport_create_req_s {
428 struct bfi_mhdr_s mh; /* common msg header */
429 u16 bfa_handle; /* host rport handle */
430 u16 max_frmsz; /* max rcv pdu size */
431 u32 pid:24, /* remote port ID */
432 lp_tag:8; /* local port tag */
433 u32 local_pid:24, /* local port ID */
434 cisc:8;
435 u8 fc_class; /* supported FC classes */
436 u8 vf_en; /* virtual fabric enable */
437 u16 vf_id; /* virtual fabric ID */
438};
439
440struct bfi_rport_create_rsp_s {
441 struct bfi_mhdr_s mh; /* common msg header */
442 u8 status; /* rport creation status */
443 u8 rsvd[3];
444 u16 bfa_handle; /* host rport handle */
445 u16 fw_handle; /* firmware rport handle */
446 struct bfa_rport_qos_attr_s qos_attr; /* QoS Attributes */
447};
448
449struct bfa_rport_speed_req_s {
450 struct bfi_mhdr_s mh; /* common msg header */
451 u16 fw_handle; /* firmware rport handle */
452 u8 speed; /* rport's speed via RPSC */
453 u8 rsvd;
454};
455
456struct bfi_rport_delete_req_s {
457 struct bfi_mhdr_s mh; /* common msg header */
458 u16 fw_handle; /* firmware rport handle */
459 u16 rsvd;
460};
461
462struct bfi_rport_delete_rsp_s {
463 struct bfi_mhdr_s mh; /* common msg header */
464 u16 bfa_handle; /* host rport handle */
465 u8 status; /* rport deletion status */
466 u8 rsvd;
467};
468
469struct bfi_rport_qos_scn_s {
470 struct bfi_mhdr_s mh; /* common msg header */
471 u16 bfa_handle; /* host rport handle */
472 u16 rsvd;
473 struct bfa_rport_qos_attr_s old_qos_attr; /* Old QoS Attributes */
474 struct bfa_rport_qos_attr_s new_qos_attr; /* New QoS Attributes */
475};
476
477union bfi_rport_h2i_msg_u {
478 struct bfi_msg_s *msg;
479 struct bfi_rport_create_req_s *create_req;
480 struct bfi_rport_delete_req_s *delete_req;
481 struct bfi_rport_speed_req_s *speed_req;
482};
483
484union bfi_rport_i2h_msg_u {
485 struct bfi_msg_s *msg;
486 struct bfi_rport_create_rsp_s *create_rsp;
487 struct bfi_rport_delete_rsp_s *delete_rsp;
488 struct bfi_rport_qos_scn_s *qos_scn_evt;
489};
490
491/*
492 * Initiator mode I-T nexus interface defines.
493 */
494
495enum bfi_itnim_h2i {
496 BFI_ITNIM_H2I_CREATE_REQ = 1, /* i-t nexus creation */
497 BFI_ITNIM_H2I_DELETE_REQ = 2, /* i-t nexus deletion */
498};
499
500enum bfi_itnim_i2h {
501 BFI_ITNIM_I2H_CREATE_RSP = BFA_I2HM(1),
502 BFI_ITNIM_I2H_DELETE_RSP = BFA_I2HM(2),
503 BFI_ITNIM_I2H_SLER_EVENT = BFA_I2HM(3),
504};
505
506struct bfi_itnim_create_req_s {
507 struct bfi_mhdr_s mh; /* common msg header */
508 u16 fw_handle; /* f/w handle for itnim */
509 u8 class; /* FC class for IO */
510 u8 seq_rec; /* sequence recovery support */
511 u8 msg_no; /* seq id of the msg */
512};
513
514struct bfi_itnim_create_rsp_s {
515 struct bfi_mhdr_s mh; /* common msg header */
516 u16 bfa_handle; /* bfa handle for itnim */
517 u8 status; /* fcp request status */
518 u8 seq_id; /* seq id of the msg */
519};
520
521struct bfi_itnim_delete_req_s {
522 struct bfi_mhdr_s mh; /* common msg header */
523 u16 fw_handle; /* f/w itnim handle */
524 u8 seq_id; /* seq id of the msg */
525 u8 rsvd;
526};
527
528struct bfi_itnim_delete_rsp_s {
529 struct bfi_mhdr_s mh; /* common msg header */
530 u16 bfa_handle; /* bfa handle for itnim */
531 u8 status; /* fcp request status */
532 u8 seq_id; /* seq id of the msg */
533};
534
535struct bfi_itnim_sler_event_s {
536 struct bfi_mhdr_s mh; /* common msg header */
537 u16 bfa_handle; /* bfa handle for itnim */
538 u16 rsvd;
539};
540
541union bfi_itnim_h2i_msg_u {
542 struct bfi_itnim_create_req_s *create_req;
543 struct bfi_itnim_delete_req_s *delete_req;
544 struct bfi_msg_s *msg;
545};
546
547union bfi_itnim_i2h_msg_u {
548 struct bfi_itnim_create_rsp_s *create_rsp;
549 struct bfi_itnim_delete_rsp_s *delete_rsp;
550 struct bfi_itnim_sler_event_s *sler_event;
551 struct bfi_msg_s *msg;
552};
553
554/*
555 * Initiator mode IO interface defines.
556 */
557
558enum bfi_ioim_h2i {
559 BFI_IOIM_H2I_IOABORT_REQ = 1, /* IO abort request */
560 BFI_IOIM_H2I_IOCLEANUP_REQ = 2, /* IO cleanup request */
561};
562
563enum bfi_ioim_i2h {
564 BFI_IOIM_I2H_IO_RSP = BFA_I2HM(1), /* non-fp IO response */
565 BFI_IOIM_I2H_IOABORT_RSP = BFA_I2HM(2), /* ABORT rsp */
566};
567
568/**
569 * IO command DIF info
570 */
571struct bfi_ioim_dif_s {
572 u32 dif_info[4];
573};
574
575/**
576 * FCP IO messages overview
577 *
578 * @note
579 * - Max CDB length supported is 64 bytes.
580 * - SCSI Linked commands and SCSI bi-directional Commands not
581 * supported.
582 *
583 */
584struct bfi_ioim_req_s {
585 struct bfi_mhdr_s mh; /* Common msg header */
586 u16 io_tag; /* I/O tag */
587 u16 rport_hdl; /* itnim/rport firmware handle */
588 struct fcp_cmnd_s cmnd; /* IO request info */
589
590 /**
591 * SG elements array within the IO request must be double word
592 * aligned. This aligment is required to optimize SGM setup for the IO.
593 */
594 struct bfi_sge_s sges[BFI_SGE_INLINE_MAX];
595 u8 io_timeout;
596 u8 dif_en;
597 u8 rsvd_a[2];
598 struct bfi_ioim_dif_s dif;
599};
600
601/**
602 * This table shows various IO status codes from firmware and their
603 * meaning. Host driver can use these status codes to further process
604 * IO completions.
605 *
606 * BFI_IOIM_STS_OK : IO completed with error free SCSI &
607 * transport status.
608 * io-tag can be reused.
609 *
610 * BFA_IOIM_STS_SCSI_ERR : IO completed with scsi error.
611 * - io-tag can be reused.
612 *
613 * BFI_IOIM_STS_HOST_ABORTED : IO was aborted successfully due to
614 * host request.
615 * - io-tag cannot be reused yet.
616 *
617 * BFI_IOIM_STS_ABORTED : IO was aborted successfully
618 * internally by f/w.
619 * - io-tag cannot be reused yet.
620 *
621 * BFI_IOIM_STS_TIMEDOUT : IO timedout and ABTS/RRQ is happening
622 * in the firmware and
623 * - io-tag cannot be reused yet.
624 *
625 * BFI_IOIM_STS_SQER_NEEDED : Firmware could not recover the IO
626 * with sequence level error
627 * logic and hence host needs to retry
628 * this IO with a different IO tag
629 * - io-tag cannot be used yet.
630 *
631 * BFI_IOIM_STS_NEXUS_ABORT : Second Level Error Recovery from host
632 * is required because 2 consecutive ABTS
633 * timedout and host needs logout and
634 * re-login with the target
635 * - io-tag cannot be used yet.
636 *
637 * BFI_IOIM_STS_UNDERRUN : IO completed with SCSI status good,
638 * but the data tranferred is less than
639 * the fcp data length in the command.
640 * ex. SCSI INQUIRY where transferred
641 * data length and residue count in FCP
642 * response accounts for total fcp-dl
643 * - io-tag can be reused.
644 *
645 * BFI_IOIM_STS_OVERRUN : IO completed with SCSI status good,
646 * but the data transerred is more than
647 * fcp data length in the command. ex.
648 * TAPE IOs where blocks can of unequal
649 * lengths.
650 * - io-tag can be reused.
651 *
652 * BFI_IOIM_STS_RES_FREE : Firmware has completed using io-tag
653 * during abort process
654 * - io-tag can be reused.
655 *
656 * BFI_IOIM_STS_PROTO_ERR : Firmware detected a protocol error.
657 * ex target sent more data than
658 * requested, or there was data frame
659 * loss and other reasons
660 * - io-tag cannot be used yet.
661 *
662 * BFI_IOIM_STS_DIF_ERR : Firwmare detected DIF error. ex: DIF
663 * CRC err or Ref Tag err or App tag err.
664 * - io-tag can be reused.
665 *
666 * BFA_IOIM_STS_TSK_MGT_ABORT : IO was aborted because of Task
667 * Management command from the host
668 * - io-tag can be reused.
669 *
670 * BFI_IOIM_STS_UTAG : Firmware does not know about this
671 * io_tag.
672 * - io-tag can be reused.
673 */
674enum bfi_ioim_status {
675 BFI_IOIM_STS_OK = 0,
676 BFI_IOIM_STS_HOST_ABORTED = 1,
677 BFI_IOIM_STS_ABORTED = 2,
678 BFI_IOIM_STS_TIMEDOUT = 3,
679 BFI_IOIM_STS_RES_FREE = 4,
680 BFI_IOIM_STS_SQER_NEEDED = 5,
681 BFI_IOIM_STS_PROTO_ERR = 6,
682 BFI_IOIM_STS_UTAG = 7,
683 BFI_IOIM_STS_PATHTOV = 8,
684};
685
686#define BFI_IOIM_SNSLEN (256)
687/**
688 * I/O response message
689 */
690struct bfi_ioim_rsp_s {
691 struct bfi_mhdr_s mh; /* common msg header */
692 u16 io_tag; /* completed IO tag */
693 u16 bfa_rport_hndl; /* releated rport handle */
694 u8 io_status; /* IO completion status */
695 u8 reuse_io_tag; /* IO tag can be reused */
696 u16 abort_tag; /* host abort request tag */
697 u8 scsi_status; /* scsi status from target */
698 u8 sns_len; /* scsi sense length */
699 u8 resid_flags; /* IO residue flags */
700 u8 rsvd_a;
701 u32 residue; /* IO residual length in bytes */
702 u32 rsvd_b[3];
703};
704
705struct bfi_ioim_abort_req_s {
706 struct bfi_mhdr_s mh; /* Common msg header */
707 u16 io_tag; /* I/O tag */
708 u16 abort_tag; /* unique request tag */
709};
710
711/*
712 * Initiator mode task management command interface defines.
713 */
714
715enum bfi_tskim_h2i {
716 BFI_TSKIM_H2I_TM_REQ = 1, /* task-mgmt command */
717 BFI_TSKIM_H2I_ABORT_REQ = 2, /* task-mgmt command */
718};
719
720enum bfi_tskim_i2h {
721 BFI_TSKIM_I2H_TM_RSP = BFA_I2HM(1),
722};
723
724struct bfi_tskim_req_s {
725 struct bfi_mhdr_s mh; /* Common msg header */
726 u16 tsk_tag; /* task management tag */
727 u16 itn_fhdl; /* itn firmware handle */
728 lun_t lun; /* LU number */
729 u8 tm_flags; /* see enum fcp_tm_cmnd */
730 u8 t_secs; /* Timeout value in seconds */
731 u8 rsvd[2];
732};
733
734struct bfi_tskim_abortreq_s {
735 struct bfi_mhdr_s mh; /* Common msg header */
736 u16 tsk_tag; /* task management tag */
737 u16 rsvd;
738};
739
740enum bfi_tskim_status {
741 /*
742 * Following are FCP-4 spec defined status codes,
743 * **DO NOT CHANGE THEM **
744 */
745 BFI_TSKIM_STS_OK = 0,
746 BFI_TSKIM_STS_NOT_SUPP = 4,
747 BFI_TSKIM_STS_FAILED = 5,
748
749 /**
750 * Defined by BFA
751 */
752 BFI_TSKIM_STS_TIMEOUT = 10, /* TM request timedout */
753 BFI_TSKIM_STS_ABORTED = 11, /* Aborted on host request */
754};
755
756struct bfi_tskim_rsp_s {
757 struct bfi_mhdr_s mh; /* Common msg header */
758 u16 tsk_tag; /* task mgmt cmnd tag */
759 u8 tsk_status; /* @ref bfi_tskim_status */
760 u8 rsvd;
761};
762
763#pragma pack()
764
765#endif /* __BFI_MS_H__ */
diff --git a/drivers/scsi/bfa/fab.c b/drivers/scsi/bfa/fab.c
deleted file mode 100644
index 7e3a4d5d7bb4..000000000000
--- a/drivers/scsi/bfa/fab.c
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfa_svc.h>
20#include "fcs_lport.h"
21#include "fcs_rport.h"
22#include "lport_priv.h"
23
24/**
25 * fab.c port fab implementation.
26 */
27
28/**
29 * bfa_fcs_port_fab_public port fab public functions
30 */
31
32/**
33 * Called by port to initialize fabric services of the base port.
34 */
35void
36bfa_fcs_port_fab_init(struct bfa_fcs_port_s *port)
37{
38 bfa_fcs_port_ns_init(port);
39 bfa_fcs_port_scn_init(port);
40 bfa_fcs_port_ms_init(port);
41}
42
43/**
44 * Called by port to notify transition to online state.
45 */
46void
47bfa_fcs_port_fab_online(struct bfa_fcs_port_s *port)
48{
49 bfa_fcs_port_ns_online(port);
50 bfa_fcs_port_scn_online(port);
51}
52
53/**
54 * Called by port to notify transition to offline state.
55 */
56void
57bfa_fcs_port_fab_offline(struct bfa_fcs_port_s *port)
58{
59 bfa_fcs_port_ns_offline(port);
60 bfa_fcs_port_scn_offline(port);
61 bfa_fcs_port_ms_offline(port);
62}
diff --git a/drivers/scsi/bfa/fabric.c b/drivers/scsi/bfa/fabric.c
deleted file mode 100644
index ddd4ba9317e6..000000000000
--- a/drivers/scsi/bfa/fabric.c
+++ /dev/null
@@ -1,1323 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fabric.c Fabric module implementation.
20 */
21
22#include "fcs_fabric.h"
23#include "fcs_lport.h"
24#include "fcs_vport.h"
25#include "fcs_trcmod.h"
26#include "fcs_fcxp.h"
27#include "fcs_auth.h"
28#include "fcs.h"
29#include "fcbuild.h"
30#include <log/bfa_log_fcs.h>
31#include <aen/bfa_aen_port.h>
32#include <bfa_svc.h>
33
34BFA_TRC_FILE(FCS, FABRIC);
35
36#define BFA_FCS_FABRIC_RETRY_DELAY (2000) /* Milliseconds */
37#define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */
38
39#define bfa_fcs_fabric_set_opertype(__fabric) do { \
40 if (bfa_fcport_get_topology((__fabric)->fcs->bfa) \
41 == BFA_PPORT_TOPOLOGY_P2P) \
42 (__fabric)->oper_type = BFA_PPORT_TYPE_NPORT; \
43 else \
44 (__fabric)->oper_type = BFA_PPORT_TYPE_NLPORT; \
45} while (0)
46
47/*
48 * forward declarations
49 */
50static void bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric);
51static void bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric);
52static void bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric);
53static void bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric);
54static void bfa_fcs_fabric_delay(void *cbarg);
55static void bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric);
56static void bfa_fcs_fabric_delete_comp(void *cbarg);
57static void bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric,
58 struct fchs_s *fchs, u16 len);
59static void bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
60 struct fchs_s *fchs, u16 len);
61static void bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric);
62static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg,
63 struct bfa_fcxp_s *fcxp,
64 void *cbarg, bfa_status_t status,
65 u32 rsp_len,
66 u32 resid_len,
67 struct fchs_s *rspfchs);
68/**
69 * fcs_fabric_sm fabric state machine functions
70 */
71
72/**
73 * Fabric state machine events
74 */
75enum bfa_fcs_fabric_event {
76 BFA_FCS_FABRIC_SM_CREATE = 1, /* fabric create from driver */
77 BFA_FCS_FABRIC_SM_DELETE = 2, /* fabric delete from driver */
78 BFA_FCS_FABRIC_SM_LINK_DOWN = 3, /* link down from port */
79 BFA_FCS_FABRIC_SM_LINK_UP = 4, /* link up from port */
80 BFA_FCS_FABRIC_SM_CONT_OP = 5, /* continue op from flogi/auth */
81 BFA_FCS_FABRIC_SM_RETRY_OP = 6, /* continue op from flogi/auth */
82 BFA_FCS_FABRIC_SM_NO_FABRIC = 7, /* no fabric from flogi/auth
83 */
84 BFA_FCS_FABRIC_SM_PERF_EVFP = 8, /* perform EVFP from
85 *flogi/auth */
86 BFA_FCS_FABRIC_SM_ISOLATE = 9, /* isolate from EVFP processing */
87 BFA_FCS_FABRIC_SM_NO_TAGGING = 10,/* no VFT tagging from EVFP */
88 BFA_FCS_FABRIC_SM_DELAYED = 11, /* timeout delay event */
89 BFA_FCS_FABRIC_SM_AUTH_FAILED = 12, /* authentication failed */
90 BFA_FCS_FABRIC_SM_AUTH_SUCCESS = 13, /* authentication successful
91 */
92 BFA_FCS_FABRIC_SM_DELCOMP = 14, /* all vports deleted event */
93 BFA_FCS_FABRIC_SM_LOOPBACK = 15, /* Received our own FLOGI */
94 BFA_FCS_FABRIC_SM_START = 16, /* fabric delete from driver */
95};
96
97static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
98 enum bfa_fcs_fabric_event event);
99static void bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
100 enum bfa_fcs_fabric_event event);
101static void bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
102 enum bfa_fcs_fabric_event event);
103static void bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
104 enum bfa_fcs_fabric_event event);
105static void bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric,
106 enum bfa_fcs_fabric_event event);
107static void bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
108 enum bfa_fcs_fabric_event event);
109static void bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
110 enum bfa_fcs_fabric_event event);
111static void bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
112 enum bfa_fcs_fabric_event event);
113static void bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
114 enum bfa_fcs_fabric_event event);
115static void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
116 enum bfa_fcs_fabric_event event);
117static void bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric,
118 enum bfa_fcs_fabric_event event);
119static void bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric,
120 enum bfa_fcs_fabric_event event);
121static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
122 enum bfa_fcs_fabric_event event);
123static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
124 enum bfa_fcs_fabric_event event);
125/**
126 * Beginning state before fabric creation.
127 */
128static void
129bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
130 enum bfa_fcs_fabric_event event)
131{
132 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
133 bfa_trc(fabric->fcs, event);
134
135 switch (event) {
136 case BFA_FCS_FABRIC_SM_CREATE:
137 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
138 bfa_fcs_fabric_init(fabric);
139 bfa_fcs_lport_init(&fabric->bport, &fabric->bport.port_cfg);
140 break;
141
142 case BFA_FCS_FABRIC_SM_LINK_UP:
143 case BFA_FCS_FABRIC_SM_LINK_DOWN:
144 break;
145
146 default:
147 bfa_sm_fault(fabric->fcs, event);
148 }
149}
150
151/**
152 * Beginning state before fabric creation.
153 */
154static void
155bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
156 enum bfa_fcs_fabric_event event)
157{
158 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
159 bfa_trc(fabric->fcs, event);
160
161 switch (event) {
162 case BFA_FCS_FABRIC_SM_START:
163 if (bfa_fcport_is_linkup(fabric->fcs->bfa)) {
164 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
165 bfa_fcs_fabric_login(fabric);
166 } else
167 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
168 break;
169
170 case BFA_FCS_FABRIC_SM_LINK_UP:
171 case BFA_FCS_FABRIC_SM_LINK_DOWN:
172 break;
173
174 case BFA_FCS_FABRIC_SM_DELETE:
175 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
176 bfa_fcs_modexit_comp(fabric->fcs);
177 break;
178
179 default:
180 bfa_sm_fault(fabric->fcs, event);
181 }
182}
183
184/**
185 * Link is down, awaiting LINK UP event from port. This is also the
186 * first state at fabric creation.
187 */
188static void
189bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
190 enum bfa_fcs_fabric_event event)
191{
192 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
193 bfa_trc(fabric->fcs, event);
194
195 switch (event) {
196 case BFA_FCS_FABRIC_SM_LINK_UP:
197 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
198 bfa_fcs_fabric_login(fabric);
199 break;
200
201 case BFA_FCS_FABRIC_SM_RETRY_OP:
202 break;
203
204 case BFA_FCS_FABRIC_SM_DELETE:
205 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
206 bfa_fcs_fabric_delete(fabric);
207 break;
208
209 default:
210 bfa_sm_fault(fabric->fcs, event);
211 }
212}
213
214/**
215 * FLOGI is in progress, awaiting FLOGI reply.
216 */
217static void
218bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
219 enum bfa_fcs_fabric_event event)
220{
221 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
222 bfa_trc(fabric->fcs, event);
223
224 switch (event) {
225 case BFA_FCS_FABRIC_SM_CONT_OP:
226
227 bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit);
228 fabric->fab_type = BFA_FCS_FABRIC_SWITCHED;
229
230 if (fabric->auth_reqd && fabric->is_auth) {
231 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth);
232 bfa_trc(fabric->fcs, event);
233 } else {
234 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online);
235 bfa_fcs_fabric_notify_online(fabric);
236 }
237 break;
238
239 case BFA_FCS_FABRIC_SM_RETRY_OP:
240 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi_retry);
241 bfa_timer_start(fabric->fcs->bfa, &fabric->delay_timer,
242 bfa_fcs_fabric_delay, fabric,
243 BFA_FCS_FABRIC_RETRY_DELAY);
244 break;
245
246 case BFA_FCS_FABRIC_SM_LOOPBACK:
247 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_loopback);
248 bfa_lps_discard(fabric->lps);
249 bfa_fcs_fabric_set_opertype(fabric);
250 break;
251
252 case BFA_FCS_FABRIC_SM_NO_FABRIC:
253 fabric->fab_type = BFA_FCS_FABRIC_N2N;
254 bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit);
255 bfa_fcs_fabric_notify_online(fabric);
256 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric);
257 break;
258
259 case BFA_FCS_FABRIC_SM_LINK_DOWN:
260 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
261 bfa_lps_discard(fabric->lps);
262 break;
263
264 case BFA_FCS_FABRIC_SM_DELETE:
265 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
266 bfa_lps_discard(fabric->lps);
267 bfa_fcs_fabric_delete(fabric);
268 break;
269
270 default:
271 bfa_sm_fault(fabric->fcs, event);
272 }
273}
274
275
276static void
277bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric,
278 enum bfa_fcs_fabric_event event)
279{
280 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
281 bfa_trc(fabric->fcs, event);
282
283 switch (event) {
284 case BFA_FCS_FABRIC_SM_DELAYED:
285 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
286 bfa_fcs_fabric_login(fabric);
287 break;
288
289 case BFA_FCS_FABRIC_SM_LINK_DOWN:
290 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
291 bfa_timer_stop(&fabric->delay_timer);
292 break;
293
294 case BFA_FCS_FABRIC_SM_DELETE:
295 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
296 bfa_timer_stop(&fabric->delay_timer);
297 bfa_fcs_fabric_delete(fabric);
298 break;
299
300 default:
301 bfa_sm_fault(fabric->fcs, event);
302 }
303}
304
305/**
306 * Authentication is in progress, awaiting authentication results.
307 */
308static void
309bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
310 enum bfa_fcs_fabric_event event)
311{
312 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
313 bfa_trc(fabric->fcs, event);
314
315 switch (event) {
316 case BFA_FCS_FABRIC_SM_AUTH_FAILED:
317 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
318 bfa_lps_discard(fabric->lps);
319 break;
320
321 case BFA_FCS_FABRIC_SM_AUTH_SUCCESS:
322 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online);
323 bfa_fcs_fabric_notify_online(fabric);
324 break;
325
326 case BFA_FCS_FABRIC_SM_PERF_EVFP:
327 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp);
328 break;
329
330 case BFA_FCS_FABRIC_SM_LINK_DOWN:
331 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
332 bfa_lps_discard(fabric->lps);
333 break;
334
335 case BFA_FCS_FABRIC_SM_DELETE:
336 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
337 bfa_fcs_fabric_delete(fabric);
338 break;
339
340 default:
341 bfa_sm_fault(fabric->fcs, event);
342 }
343}
344
345/**
346 * Authentication failed
347 */
348static void
349bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
350 enum bfa_fcs_fabric_event event)
351{
352 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
353 bfa_trc(fabric->fcs, event);
354
355 switch (event) {
356 case BFA_FCS_FABRIC_SM_LINK_DOWN:
357 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
358 bfa_fcs_fabric_notify_offline(fabric);
359 break;
360
361 case BFA_FCS_FABRIC_SM_DELETE:
362 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
363 bfa_fcs_fabric_delete(fabric);
364 break;
365
366 default:
367 bfa_sm_fault(fabric->fcs, event);
368 }
369}
370
371/**
372 * Port is in loopback mode.
373 */
374static void
375bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
376 enum bfa_fcs_fabric_event event)
377{
378 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
379 bfa_trc(fabric->fcs, event);
380
381 switch (event) {
382 case BFA_FCS_FABRIC_SM_LINK_DOWN:
383 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
384 bfa_fcs_fabric_notify_offline(fabric);
385 break;
386
387 case BFA_FCS_FABRIC_SM_DELETE:
388 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
389 bfa_fcs_fabric_delete(fabric);
390 break;
391
392 default:
393 bfa_sm_fault(fabric->fcs, event);
394 }
395}
396
397/**
398 * There is no attached fabric - private loop or NPort-to-NPort topology.
399 */
400static void
401bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
402 enum bfa_fcs_fabric_event event)
403{
404 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
405 bfa_trc(fabric->fcs, event);
406
407 switch (event) {
408 case BFA_FCS_FABRIC_SM_LINK_DOWN:
409 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
410 bfa_lps_discard(fabric->lps);
411 bfa_fcs_fabric_notify_offline(fabric);
412 break;
413
414 case BFA_FCS_FABRIC_SM_DELETE:
415 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
416 bfa_fcs_fabric_delete(fabric);
417 break;
418
419 case BFA_FCS_FABRIC_SM_NO_FABRIC:
420 bfa_trc(fabric->fcs, fabric->bb_credit);
421 bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit);
422 break;
423
424 default:
425 bfa_sm_fault(fabric->fcs, event);
426 }
427}
428
429/**
430 * Fabric is online - normal operating state.
431 */
432static void
433bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
434 enum bfa_fcs_fabric_event event)
435{
436 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
437 bfa_trc(fabric->fcs, event);
438
439 switch (event) {
440 case BFA_FCS_FABRIC_SM_LINK_DOWN:
441 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
442 bfa_lps_discard(fabric->lps);
443 bfa_fcs_fabric_notify_offline(fabric);
444 break;
445
446 case BFA_FCS_FABRIC_SM_DELETE:
447 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
448 bfa_fcs_fabric_delete(fabric);
449 break;
450
451 case BFA_FCS_FABRIC_SM_AUTH_FAILED:
452 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
453 bfa_lps_discard(fabric->lps);
454 break;
455
456 case BFA_FCS_FABRIC_SM_AUTH_SUCCESS:
457 break;
458
459 default:
460 bfa_sm_fault(fabric->fcs, event);
461 }
462}
463
464/**
465 * Exchanging virtual fabric parameters.
466 */
467static void
468bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric,
469 enum bfa_fcs_fabric_event event)
470{
471 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
472 bfa_trc(fabric->fcs, event);
473
474 switch (event) {
475 case BFA_FCS_FABRIC_SM_CONT_OP:
476 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp_done);
477 break;
478
479 case BFA_FCS_FABRIC_SM_ISOLATE:
480 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_isolated);
481 break;
482
483 default:
484 bfa_sm_fault(fabric->fcs, event);
485 }
486}
487
488/**
489 * EVFP exchange complete and VFT tagging is enabled.
490 */
491static void
492bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric,
493 enum bfa_fcs_fabric_event event)
494{
495 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
496 bfa_trc(fabric->fcs, event);
497}
498
499/**
500 * Port is isolated after EVFP exchange due to VF_ID mismatch (N and F).
501 */
502static void
503bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
504 enum bfa_fcs_fabric_event event)
505{
506 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
507 bfa_trc(fabric->fcs, event);
508
509 bfa_log(fabric->fcs->logm, BFA_LOG_FCS_FABRIC_ISOLATED,
510 fabric->bport.port_cfg.pwwn, fabric->fcs->port_vfid,
511 fabric->event_arg.swp_vfid);
512}
513
514/**
515 * Fabric is being deleted, awaiting vport delete completions.
516 */
517static void
518bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
519 enum bfa_fcs_fabric_event event)
520{
521 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
522 bfa_trc(fabric->fcs, event);
523
524 switch (event) {
525 case BFA_FCS_FABRIC_SM_DELCOMP:
526 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
527 bfa_fcs_modexit_comp(fabric->fcs);
528 break;
529
530 case BFA_FCS_FABRIC_SM_LINK_UP:
531 break;
532
533 case BFA_FCS_FABRIC_SM_LINK_DOWN:
534 bfa_fcs_fabric_notify_offline(fabric);
535 break;
536
537 default:
538 bfa_sm_fault(fabric->fcs, event);
539 }
540}
541
542
543
544/**
545 * fcs_fabric_private fabric private functions
546 */
547
548static void
549bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric)
550{
551 struct bfa_port_cfg_s *port_cfg = &fabric->bport.port_cfg;
552
553 port_cfg->roles = BFA_PORT_ROLE_FCP_IM;
554 port_cfg->nwwn = bfa_ioc_get_nwwn(&fabric->fcs->bfa->ioc);
555 port_cfg->pwwn = bfa_ioc_get_pwwn(&fabric->fcs->bfa->ioc);
556}
557
558/**
559 * Port Symbolic Name Creation for base port.
560 */
561void
562bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
563{
564 struct bfa_port_cfg_s *port_cfg = &fabric->bport.port_cfg;
565 char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0};
566 struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info;
567
568 bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
569
570 /*
571 * Model name/number
572 */
573 strncpy((char *)&port_cfg->sym_name, model,
574 BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
575 strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
576 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
577
578 /*
579 * Driver Version
580 */
581 strncat((char *)&port_cfg->sym_name, (char *)driver_info->version,
582 BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
583 strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
584 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
585
586 /*
587 * Host machine name
588 */
589 strncat((char *)&port_cfg->sym_name,
590 (char *)driver_info->host_machine_name,
591 BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
592 strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
593 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
594
595 /*
596 * Host OS Info :
597 * If OS Patch Info is not there, do not truncate any bytes from the
598 * OS name string and instead copy the entire OS info string (64 bytes).
599 */
600 if (driver_info->host_os_patch[0] == '\0') {
601 strncat((char *)&port_cfg->sym_name,
602 (char *)driver_info->host_os_name, BFA_FCS_OS_STR_LEN);
603 strncat((char *)&port_cfg->sym_name,
604 BFA_FCS_PORT_SYMBNAME_SEPARATOR,
605 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
606 } else {
607 strncat((char *)&port_cfg->sym_name,
608 (char *)driver_info->host_os_name,
609 BFA_FCS_PORT_SYMBNAME_OSINFO_SZ);
610 strncat((char *)&port_cfg->sym_name,
611 BFA_FCS_PORT_SYMBNAME_SEPARATOR,
612 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
613
614 /*
615 * Append host OS Patch Info
616 */
617 strncat((char *)&port_cfg->sym_name,
618 (char *)driver_info->host_os_patch,
619 BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ);
620 }
621
622 /*
623 * null terminate
624 */
625 port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
626}
627
628/**
629 * bfa lps login completion callback
630 */
631void
632bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
633{
634 struct bfa_fcs_fabric_s *fabric = uarg;
635
636 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
637 bfa_trc(fabric->fcs, status);
638
639 switch (status) {
640 case BFA_STATUS_OK:
641 fabric->stats.flogi_accepts++;
642 break;
643
644 case BFA_STATUS_INVALID_MAC:
645 /*
646 * Only for CNA
647 */
648 fabric->stats.flogi_acc_err++;
649 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
650
651 return;
652
653 case BFA_STATUS_EPROTOCOL:
654 switch (bfa_lps_get_extstatus(fabric->lps)) {
655 case BFA_EPROTO_BAD_ACCEPT:
656 fabric->stats.flogi_acc_err++;
657 break;
658
659 case BFA_EPROTO_UNKNOWN_RSP:
660 fabric->stats.flogi_unknown_rsp++;
661 break;
662
663 default:
664 break;
665 }
666 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
667
668 return;
669
670 case BFA_STATUS_FABRIC_RJT:
671 fabric->stats.flogi_rejects++;
672 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
673 return;
674
675 default:
676 fabric->stats.flogi_rsp_err++;
677 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
678 return;
679 }
680
681 fabric->bb_credit = bfa_lps_get_peer_bbcredit(fabric->lps);
682 bfa_trc(fabric->fcs, fabric->bb_credit);
683
684 if (!bfa_lps_is_brcd_fabric(fabric->lps))
685 fabric->fabric_name = bfa_lps_get_peer_nwwn(fabric->lps);
686
687 /*
688 * Check port type. It should be 1 = F-port.
689 */
690 if (bfa_lps_is_fport(fabric->lps)) {
691 fabric->bport.pid = bfa_lps_get_pid(fabric->lps);
692 fabric->is_npiv = bfa_lps_is_npiv_en(fabric->lps);
693 fabric->is_auth = bfa_lps_is_authreq(fabric->lps);
694 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CONT_OP);
695 } else {
696 /*
697 * Nport-2-Nport direct attached
698 */
699 fabric->bport.port_topo.pn2n.rem_port_wwn =
700 bfa_lps_get_peer_pwwn(fabric->lps);
701 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC);
702 }
703
704 bfa_trc(fabric->fcs, fabric->bport.pid);
705 bfa_trc(fabric->fcs, fabric->is_npiv);
706 bfa_trc(fabric->fcs, fabric->is_auth);
707}
708
709/**
710 * Allocate and send FLOGI.
711 */
712static void
713bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric)
714{
715 struct bfa_s *bfa = fabric->fcs->bfa;
716 struct bfa_port_cfg_s *pcfg = &fabric->bport.port_cfg;
717 u8 alpa = 0;
718
719 if (bfa_fcport_get_topology(bfa) == BFA_PPORT_TOPOLOGY_LOOP)
720 alpa = bfa_fcport_get_myalpa(bfa);
721
722 bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa),
723 pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd);
724
725 fabric->stats.flogi_sent++;
726}
727
728static void
729bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric)
730{
731 struct bfa_fcs_vport_s *vport;
732 struct list_head *qe, *qen;
733
734 bfa_trc(fabric->fcs, fabric->fabric_name);
735
736 bfa_fcs_fabric_set_opertype(fabric);
737 fabric->stats.fabric_onlines++;
738
739 /**
740 * notify online event to base and then virtual ports
741 */
742 bfa_fcs_port_online(&fabric->bport);
743
744 list_for_each_safe(qe, qen, &fabric->vport_q) {
745 vport = (struct bfa_fcs_vport_s *)qe;
746 bfa_fcs_vport_online(vport);
747 }
748}
749
750static void
751bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric)
752{
753 struct bfa_fcs_vport_s *vport;
754 struct list_head *qe, *qen;
755
756 bfa_trc(fabric->fcs, fabric->fabric_name);
757 fabric->stats.fabric_offlines++;
758
759 /**
760 * notify offline event first to vports and then base port.
761 */
762 list_for_each_safe(qe, qen, &fabric->vport_q) {
763 vport = (struct bfa_fcs_vport_s *)qe;
764 bfa_fcs_vport_offline(vport);
765 }
766
767 bfa_fcs_port_offline(&fabric->bport);
768
769 fabric->fabric_name = 0;
770 fabric->fabric_ip_addr[0] = 0;
771}
772
773static void
774bfa_fcs_fabric_delay(void *cbarg)
775{
776 struct bfa_fcs_fabric_s *fabric = cbarg;
777
778 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED);
779}
780
781/**
782 * Delete all vports and wait for vport delete completions.
783 */
784static void
785bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric)
786{
787 struct bfa_fcs_vport_s *vport;
788 struct list_head *qe, *qen;
789
790 list_for_each_safe(qe, qen, &fabric->vport_q) {
791 vport = (struct bfa_fcs_vport_s *)qe;
792 bfa_fcs_vport_fcs_delete(vport);
793 }
794
795 bfa_fcs_port_delete(&fabric->bport);
796 bfa_wc_wait(&fabric->wc);
797}
798
799static void
800bfa_fcs_fabric_delete_comp(void *cbarg)
801{
802 struct bfa_fcs_fabric_s *fabric = cbarg;
803
804 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP);
805}
806
807
808
809/**
810 * fcs_fabric_public fabric public functions
811 */
812
813/**
814 * Attach time initialization
815 */
816void
817bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
818{
819 struct bfa_fcs_fabric_s *fabric;
820
821 fabric = &fcs->fabric;
822 bfa_os_memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s));
823
824 /**
825 * Initialize base fabric.
826 */
827 fabric->fcs = fcs;
828 INIT_LIST_HEAD(&fabric->vport_q);
829 INIT_LIST_HEAD(&fabric->vf_q);
830 fabric->lps = bfa_lps_alloc(fcs->bfa);
831 bfa_assert(fabric->lps);
832
833 /**
834 * Initialize fabric delete completion handler. Fabric deletion is complete
835 * when the last vport delete is complete.
836 */
837 bfa_wc_init(&fabric->wc, bfa_fcs_fabric_delete_comp, fabric);
838 bfa_wc_up(&fabric->wc); /* For the base port */
839
840 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
841 bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL);
842}
843
844void
845bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs)
846{
847 bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE);
848 bfa_trc(fcs, 0);
849}
850
851/**
852 * Module cleanup
853 */
854void
855bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs)
856{
857 struct bfa_fcs_fabric_s *fabric;
858
859 bfa_trc(fcs, 0);
860
861 /**
862 * Cleanup base fabric.
863 */
864 fabric = &fcs->fabric;
865 bfa_lps_delete(fabric->lps);
866 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE);
867}
868
869/**
870 * Fabric module start -- kick starts FCS actions
871 */
872void
873bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs)
874{
875 struct bfa_fcs_fabric_s *fabric;
876
877 bfa_trc(fcs, 0);
878 fabric = &fcs->fabric;
879 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START);
880}
881
882/**
883 * Suspend fabric activity as part of driver suspend.
884 */
885void
886bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs)
887{
888}
889
890bfa_boolean_t
891bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric)
892{
893 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_loopback);
894}
895
896bfa_boolean_t
897bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric)
898{
899 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_auth_failed);
900}
901
902enum bfa_pport_type
903bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric)
904{
905 return fabric->oper_type;
906}
907
908/**
909 * Link up notification from BFA physical port module.
910 */
911void
912bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric)
913{
914 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
915 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP);
916}
917
918/**
919 * Link down notification from BFA physical port module.
920 */
921void
922bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric)
923{
924 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
925 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN);
926}
927
928/**
929 * A child vport is being created in the fabric.
930 *
931 * Call from vport module at vport creation. A list of base port and vports
932 * belonging to a fabric is maintained to propagate link events.
933 *
934 * param[in] fabric - Fabric instance. This can be a base fabric or vf.
935 * param[in] vport - Vport being created.
936 *
937 * @return None (always succeeds)
938 */
939void
940bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
941 struct bfa_fcs_vport_s *vport)
942{
943 /**
944 * - add vport to fabric's vport_q
945 */
946 bfa_trc(fabric->fcs, fabric->vf_id);
947
948 list_add_tail(&vport->qe, &fabric->vport_q);
949 fabric->num_vports++;
950 bfa_wc_up(&fabric->wc);
951}
952
953/**
954 * A child vport is being deleted from fabric.
955 *
956 * Vport is being deleted.
957 */
958void
959bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
960 struct bfa_fcs_vport_s *vport)
961{
962 list_del(&vport->qe);
963 fabric->num_vports--;
964 bfa_wc_down(&fabric->wc);
965}
966
967/**
968 * Base port is deleted.
969 */
970void
971bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric)
972{
973 bfa_wc_down(&fabric->wc);
974}
975
976/**
977 * Check if fabric is online.
978 *
979 * param[in] fabric - Fabric instance. This can be a base fabric or vf.
980 *
981 * @return TRUE/FALSE
982 */
983int
984bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric)
985{
986 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online);
987}
988
989
990bfa_status_t
991bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf, struct bfa_fcs_s *fcs,
992 struct bfa_port_cfg_s *port_cfg,
993 struct bfad_vf_s *vf_drv)
994{
995 bfa_sm_set_state(vf, bfa_fcs_fabric_sm_uninit);
996 return BFA_STATUS_OK;
997}
998
999/**
1000 * Lookup for a vport withing a fabric given its pwwn
1001 */
1002struct bfa_fcs_vport_s *
1003bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn)
1004{
1005 struct bfa_fcs_vport_s *vport;
1006 struct list_head *qe;
1007
1008 list_for_each(qe, &fabric->vport_q) {
1009 vport = (struct bfa_fcs_vport_s *)qe;
1010 if (bfa_fcs_port_get_pwwn(&vport->lport) == pwwn)
1011 return vport;
1012 }
1013
1014 return NULL;
1015}
1016
1017/**
1018 * In a given fabric, return the number of lports.
1019 *
1020 * param[in] fabric - Fabric instance. This can be a base fabric or vf.
1021 *
1022* @return : 1 or more.
1023 */
1024u16
1025bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric)
1026{
1027 return fabric->num_vports;
1028}
1029
1030/*
1031 * Get OUI of the attached switch.
1032 *
1033 * Note : Use of this function should be avoided as much as possible.
1034 * This function should be used only if there is any requirement
1035 * to check for FOS version below 6.3.
1036 * To check if the attached fabric is a brocade fabric, use
1037 * bfa_lps_is_brcd_fabric() which works for FOS versions 6.3
1038 * or above only.
1039 */
1040
1041u16
1042bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric)
1043{
1044 wwn_t fab_nwwn;
1045 u8 *tmp;
1046 u16 oui;
1047
1048 fab_nwwn = bfa_lps_get_peer_nwwn(fabric->lps);
1049
1050 tmp = (uint8_t *)&fab_nwwn;
1051 oui = (tmp[3] << 8) | tmp[4];
1052
1053 return oui;
1054}
1055
1056/**
1057 * Unsolicited frame receive handling.
1058 */
1059void
1060bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
1061 u16 len)
1062{
1063 u32 pid = fchs->d_id;
1064 struct bfa_fcs_vport_s *vport;
1065 struct list_head *qe;
1066 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
1067 struct fc_logi_s *flogi = (struct fc_logi_s *) els_cmd;
1068
1069 bfa_trc(fabric->fcs, len);
1070 bfa_trc(fabric->fcs, pid);
1071
1072 /**
1073 * Look for our own FLOGI frames being looped back. This means an
1074 * external loopback cable is in place. Our own FLOGI frames are
1075 * sometimes looped back when switch port gets temporarily bypassed.
1076 */
1077 if ((pid == bfa_os_ntoh3b(FC_FABRIC_PORT))
1078 && (els_cmd->els_code == FC_ELS_FLOGI)
1079 && (flogi->port_name == bfa_fcs_port_get_pwwn(&fabric->bport))) {
1080 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOOPBACK);
1081 return;
1082 }
1083
1084 /**
1085 * FLOGI/EVFP exchanges should be consumed by base fabric.
1086 */
1087 if (fchs->d_id == bfa_os_hton3b(FC_FABRIC_PORT)) {
1088 bfa_trc(fabric->fcs, pid);
1089 bfa_fcs_fabric_process_uf(fabric, fchs, len);
1090 return;
1091 }
1092
1093 if (fabric->bport.pid == pid) {
1094 /**
1095 * All authentication frames should be routed to auth
1096 */
1097 bfa_trc(fabric->fcs, els_cmd->els_code);
1098 if (els_cmd->els_code == FC_ELS_AUTH) {
1099 bfa_trc(fabric->fcs, els_cmd->els_code);
1100 fabric->auth.response = (u8 *) els_cmd;
1101 return;
1102 }
1103
1104 bfa_trc(fabric->fcs, *(u8 *) ((u8 *) fchs));
1105 bfa_fcs_port_uf_recv(&fabric->bport, fchs, len);
1106 return;
1107 }
1108
1109 /**
1110 * look for a matching local port ID
1111 */
1112 list_for_each(qe, &fabric->vport_q) {
1113 vport = (struct bfa_fcs_vport_s *)qe;
1114 if (vport->lport.pid == pid) {
1115 bfa_fcs_port_uf_recv(&vport->lport, fchs, len);
1116 return;
1117 }
1118 }
1119 bfa_trc(fabric->fcs, els_cmd->els_code);
1120 bfa_fcs_port_uf_recv(&fabric->bport, fchs, len);
1121}
1122
1123/**
1124 * Unsolicited frames to be processed by fabric.
1125 */
1126static void
1127bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
1128 u16 len)
1129{
1130 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
1131
1132 bfa_trc(fabric->fcs, els_cmd->els_code);
1133
1134 switch (els_cmd->els_code) {
1135 case FC_ELS_FLOGI:
1136 bfa_fcs_fabric_process_flogi(fabric, fchs, len);
1137 break;
1138
1139 default:
1140 /*
1141 * need to generate a LS_RJT
1142 */
1143 break;
1144 }
1145}
1146
1147/**
1148 * Process incoming FLOGI
1149 */
1150static void
1151bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
1152 struct fchs_s *fchs, u16 len)
1153{
1154 struct fc_logi_s *flogi = (struct fc_logi_s *) (fchs + 1);
1155 struct bfa_fcs_port_s *bport = &fabric->bport;
1156
1157 bfa_trc(fabric->fcs, fchs->s_id);
1158
1159 fabric->stats.flogi_rcvd++;
1160 /*
1161 * Check port type. It should be 0 = n-port.
1162 */
1163 if (flogi->csp.port_type) {
1164 /*
1165 * @todo: may need to send a LS_RJT
1166 */
1167 bfa_trc(fabric->fcs, flogi->port_name);
1168 fabric->stats.flogi_rejected++;
1169 return;
1170 }
1171
1172 fabric->bb_credit = bfa_os_ntohs(flogi->csp.bbcred);
1173 bport->port_topo.pn2n.rem_port_wwn = flogi->port_name;
1174 bport->port_topo.pn2n.reply_oxid = fchs->ox_id;
1175
1176 /*
1177 * Send a Flogi Acc
1178 */
1179 bfa_fcs_fabric_send_flogi_acc(fabric);
1180 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC);
1181}
1182
1183static void
1184bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
1185{
1186 struct bfa_port_cfg_s *pcfg = &fabric->bport.port_cfg;
1187 struct bfa_fcs_port_n2n_s *n2n_port = &fabric->bport.port_topo.pn2n;
1188 struct bfa_s *bfa = fabric->fcs->bfa;
1189 struct bfa_fcxp_s *fcxp;
1190 u16 reqlen;
1191 struct fchs_s fchs;
1192
1193 fcxp = bfa_fcs_fcxp_alloc(fabric->fcs);
1194 /**
1195 * Do not expect this failure -- expect remote node to retry
1196 */
1197 if (!fcxp)
1198 return;
1199
1200 reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
1201 bfa_os_hton3b(FC_FABRIC_PORT),
1202 n2n_port->reply_oxid, pcfg->pwwn,
1203 pcfg->nwwn, bfa_fcport_get_maxfrsize(bfa),
1204 bfa_fcport_get_rx_bbcredit(bfa));
1205
1206 bfa_fcxp_send(fcxp, NULL, fabric->vf_id, bfa_lps_get_tag(fabric->lps),
1207 BFA_FALSE, FC_CLASS_3, reqlen, &fchs,
1208 bfa_fcs_fabric_flogiacc_comp, fabric,
1209 FC_MAX_PDUSZ, 0); /* Timeout 0 indicates no
1210 * response expected
1211 */
1212}
1213
1214/**
1215 * Flogi Acc completion callback.
1216 */
1217static void
1218bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1219 bfa_status_t status, u32 rsp_len,
1220 u32 resid_len, struct fchs_s *rspfchs)
1221{
1222 struct bfa_fcs_fabric_s *fabric = cbarg;
1223
1224 bfa_trc(fabric->fcs, status);
1225}
1226
1227/*
1228 *
1229 * @param[in] fabric - fabric
1230 * @param[in] result - 1
1231 *
1232 * @return - none
1233 */
1234void
1235bfa_fcs_auth_finished(struct bfa_fcs_fabric_s *fabric, enum auth_status status)
1236{
1237 bfa_trc(fabric->fcs, status);
1238
1239 if (status == FC_AUTH_STATE_SUCCESS)
1240 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_AUTH_SUCCESS);
1241 else
1242 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_AUTH_FAILED);
1243}
1244
1245/**
1246 * Send AEN notification
1247 */
1248static void
1249bfa_fcs_fabric_aen_post(struct bfa_fcs_port_s *port,
1250 enum bfa_port_aen_event event)
1251{
1252 union bfa_aen_data_u aen_data;
1253 struct bfa_log_mod_s *logmod = port->fcs->logm;
1254 wwn_t pwwn = bfa_fcs_port_get_pwwn(port);
1255 wwn_t fwwn = bfa_fcs_port_get_fabric_name(port);
1256 char pwwn_ptr[BFA_STRING_32];
1257 char fwwn_ptr[BFA_STRING_32];
1258
1259 wwn2str(pwwn_ptr, pwwn);
1260 wwn2str(fwwn_ptr, fwwn);
1261
1262 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, event),
1263 pwwn_ptr, fwwn_ptr);
1264
1265 aen_data.port.pwwn = pwwn;
1266 aen_data.port.fwwn = fwwn;
1267}
1268
1269/*
1270 *
1271 * @param[in] fabric - fabric
1272 * @param[in] wwn_t - new fabric name
1273 *
1274 * @return - none
1275 */
1276void
1277bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
1278 wwn_t fabric_name)
1279{
1280 bfa_trc(fabric->fcs, fabric_name);
1281
1282 if (fabric->fabric_name == 0) {
1283 /*
1284 * With BRCD switches, we don't get Fabric Name in FLOGI.
1285 * Don't generate a fabric name change event in this case.
1286 */
1287 fabric->fabric_name = fabric_name;
1288 } else {
1289 fabric->fabric_name = fabric_name;
1290 /*
1291 * Generate a Event
1292 */
1293 bfa_fcs_fabric_aen_post(&fabric->bport,
1294 BFA_PORT_AEN_FABRIC_NAME_CHANGE);
1295 }
1296
1297}
1298
1299/**
1300 *
1301 * @param[in] fabric - fabric
1302 * @param[in] node_symname -
1303 * Caller allocated buffer to receive the symbolic name
1304 *
1305 * @return - none
1306 */
1307void
1308bfa_fcs_get_sym_name(const struct bfa_fcs_s *fcs, char *node_symname)
1309{
1310 bfa_os_memcpy(node_symname,
1311 fcs->fabric.bport.port_cfg.sym_name.symname,
1312 BFA_SYMNAME_MAXLEN);
1313}
1314
1315/**
1316 * Not used by FCS.
1317 */
1318void
1319bfa_cb_lps_flogo_comp(void *bfad, void *uarg)
1320{
1321}
1322
1323
diff --git a/drivers/scsi/bfa/fcbuild.h b/drivers/scsi/bfa/fcbuild.h
deleted file mode 100644
index 981d98d542b9..000000000000
--- a/drivers/scsi/bfa/fcbuild.h
+++ /dev/null
@@ -1,279 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17/*
18 * fcbuild.h - FC link service frame building and parsing routines
19 */
20
21#ifndef __FCBUILD_H__
22#define __FCBUILD_H__
23
24#include <bfa_os_inc.h>
25#include <protocol/fc.h>
26#include <protocol/fcp.h>
27#include <protocol/ct.h>
28#include <defs/bfa_defs_port.h>
29#include <defs/bfa_defs_pport.h>
30
31/*
32 * Utility Macros/functions
33 */
34
35#define fcif_sof_set(_ifhdr, _sof) ((_ifhdr)->sof = FC_ ## _sof)
36#define fcif_eof_set(_ifhdr, _eof) ((_ifhdr)->eof = FC_ ## _eof)
37
38#define wwn_is_equal(_wwn1, _wwn2) \
39 (memcmp(&(_wwn1), &(_wwn2), sizeof(wwn_t)) == 0)
40
41#define fc_roundup(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1))
42
43/*
44 * Given the fc response length, this routine will return
45 * the length of the actual payload bytes following the CT header.
46 *
47 * Assumes the input response length does not include the crc, eof, etc.
48 */
49static inline u32
50fc_get_ctresp_pyld_len(u32 resp_len)
51{
52 return resp_len - sizeof(struct ct_hdr_s);
53}
54
55/*
56 * Convert bfa speed to rpsc speed value.
57 */
58static inline enum bfa_pport_speed
59fc_rpsc_operspeed_to_bfa_speed(enum fc_rpsc_op_speed_s speed)
60{
61 switch (speed) {
62
63 case RPSC_OP_SPEED_1G:
64 return BFA_PPORT_SPEED_1GBPS;
65
66 case RPSC_OP_SPEED_2G:
67 return BFA_PPORT_SPEED_2GBPS;
68
69 case RPSC_OP_SPEED_4G:
70 return BFA_PPORT_SPEED_4GBPS;
71
72 case RPSC_OP_SPEED_8G:
73 return BFA_PPORT_SPEED_8GBPS;
74
75 case RPSC_OP_SPEED_10G:
76 return BFA_PPORT_SPEED_10GBPS;
77
78 default:
79 return BFA_PPORT_SPEED_UNKNOWN;
80 }
81}
82
83/*
84 * Convert RPSC speed to bfa speed value.
85 */
86static inline enum fc_rpsc_op_speed_s
87fc_bfa_speed_to_rpsc_operspeed(enum bfa_pport_speed op_speed)
88{
89 switch (op_speed) {
90
91 case BFA_PPORT_SPEED_1GBPS:
92 return RPSC_OP_SPEED_1G;
93
94 case BFA_PPORT_SPEED_2GBPS:
95 return RPSC_OP_SPEED_2G;
96
97 case BFA_PPORT_SPEED_4GBPS:
98 return RPSC_OP_SPEED_4G;
99
100 case BFA_PPORT_SPEED_8GBPS:
101 return RPSC_OP_SPEED_8G;
102
103 case BFA_PPORT_SPEED_10GBPS:
104 return RPSC_OP_SPEED_10G;
105
106 default:
107 return RPSC_OP_SPEED_NOT_EST;
108 }
109}
110enum fc_parse_status {
111 FC_PARSE_OK = 0,
112 FC_PARSE_FAILURE = 1,
113 FC_PARSE_BUSY = 2,
114 FC_PARSE_LEN_INVAL,
115 FC_PARSE_ACC_INVAL,
116 FC_PARSE_PWWN_NOT_EQUAL,
117 FC_PARSE_NWWN_NOT_EQUAL,
118 FC_PARSE_RXSZ_INVAL,
119 FC_PARSE_NOT_FCP,
120 FC_PARSE_OPAFLAG_INVAL,
121 FC_PARSE_RPAFLAG_INVAL,
122 FC_PARSE_OPA_INVAL,
123 FC_PARSE_RPA_INVAL,
124
125};
126
127struct fc_templates_s {
128 struct fchs_s fc_els_req;
129 struct fchs_s fc_bls_req;
130 struct fc_logi_s plogi;
131 struct fc_rrq_s rrq;
132};
133
134void fcbuild_init(void);
135
136u16 fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi,
137 u32 s_id, u16 ox_id, wwn_t port_name,
138 wwn_t node_name, u16 pdu_size, u8 set_npiv,
139 u8 set_auth, u16 local_bb_credits);
140u16 fc_fdisc_build(struct fchs_s *buf, struct fc_logi_s *flogi,
141 u32 s_id, u16 ox_id, wwn_t port_name,
142 wwn_t node_name, u16 pdu_size);
143u16 fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi,
144 u32 s_id, u16 ox_id, wwn_t port_name,
145 wwn_t node_name, u16 pdu_size,
146 u16 local_bb_credits);
147u16 fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id,
148 u32 s_id, u16 ox_id, wwn_t port_name,
149 wwn_t node_name, u16 pdu_size);
150enum fc_parse_status fc_plogi_parse(struct fchs_s *fchs);
151u16 fc_abts_build(struct fchs_s *buf, u32 d_id, u32 s_id,
152 u16 ox_id);
153enum fc_parse_status fc_abts_rsp_parse(struct fchs_s *buf, int len);
154u16 fc_rrq_build(struct fchs_s *buf, struct fc_rrq_s *rrq, u32 d_id,
155 u32 s_id, u16 ox_id, u16 rrq_oxid);
156enum fc_parse_status fc_rrq_rsp_parse(struct fchs_s *buf, int len);
157u16 fc_rspnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
158 u16 ox_id, u8 *name);
159u16 fc_rftid_build(struct fchs_s *fchs, void *pld, u32 s_id,
160 u16 ox_id, enum bfa_port_role role);
161u16 fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id,
162 u16 ox_id, u8 *fc4_bitmap,
163 u32 bitmap_size);
164u16 fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
165 u16 ox_id, u8 fc4_type, u8 fc4_ftrs);
166u16 fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
167 u16 ox_id, wwn_t port_name);
168u16 fc_gpnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
169 u16 ox_id, u32 port_id);
170u16 fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
171 u8 set_br_reg, u32 s_id, u16 ox_id);
172u16 fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
173 u32 s_id, u16 ox_id,
174 wwn_t port_name, wwn_t node_name, u16 pdu_size);
175
176u16 fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
177 u32 d_id, u32 s_id, u16 ox_id,
178 wwn_t port_name, wwn_t node_name);
179enum fc_parse_status fc_adisc_parse(struct fchs_s *fchs, void *pld,
180 u32 host_dap,
181 wwn_t node_name, wwn_t port_name);
182enum fc_parse_status fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len,
183 wwn_t port_name, wwn_t node_name);
184u16 fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
185 u32 d_id, u32 s_id, u16 ox_id,
186 wwn_t port_name, wwn_t node_name);
187u16 fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt,
188 u32 d_id, u32 s_id, u16 ox_id,
189 u8 reason_code, u8 reason_code_expl);
190u16 fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd,
191 u32 d_id, u32 s_id, u16 ox_id);
192u16 fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id,
193 u32 s_id, u16 ox_id);
194enum fc_parse_status fc_prli_rsp_parse(struct fc_prli_s *prli, int len);
195
196u16 fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
197 u32 s_id, u16 ox_id,
198 enum bfa_port_role role);
199u16 fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid,
200 u32 d_id, u32 s_id, u16 ox_id,
201 u32 data_format);
202u16 fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc,
203 u32 d_id, u32 s_id, u16 ox_id,
204 u32 data_format,
205 struct fc_rnid_common_id_data_s *common_id_data,
206 struct fc_rnid_general_topology_data_s *
207 gen_topo_data);
208u16 fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rps2c,
209 u32 d_id, u32 s_id,
210 u32 *pid_list, u16 npids);
211u16 fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc,
212 u32 d_id, u32 s_id, u16 ox_id);
213u16 fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
214 u32 d_id, u32 s_id, u16 ox_id,
215 struct fc_rpsc_speed_info_s *oper_speed);
216u16 fc_gid_ft_build(struct fchs_s *fchs, void *pld, u32 s_id,
217 u8 fc4_type);
218u16 fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
219 u32 port_id, wwn_t port_name);
220u16 fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
221 u32 port_id, wwn_t node_name);
222u16 fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
223 u32 port_id, u32 cos);
224u16 fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
225 u32 port_id, u8 port_type);
226u16 fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id,
227 u32 port_id);
228u16 fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo,
229 u32 d_id, u32 s_id, u16 ox_id,
230 wwn_t port_name);
231u16 fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
232 u32 s_id, u16 ox_id);
233u16 fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id,
234 u16 cmd_code);
235u16 fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id,
236 wwn_t wwn);
237u16 fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id,
238 wwn_t wwn);
239void fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask);
240void fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
241 u16 ox_id);
242enum fc_parse_status fc_els_rsp_parse(struct fchs_s *fchs, int len);
243enum fc_parse_status fc_plogi_rsp_parse(struct fchs_s *fchs, int len,
244 wwn_t port_name);
245enum fc_parse_status fc_prli_parse(struct fc_prli_s *prli);
246enum fc_parse_status fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name,
247 wwn_t port_name);
248u16 fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc,
249 u32 d_id, u32 s_id, u16 ox_id,
250 u16 rx_id);
251int fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code);
252u16 fc_tprlo_acc_build(struct fchs_s *fchs,
253 struct fc_tprlo_acc_s *tprlo_acc,
254 u32 d_id, u32 s_id, u16 ox_id,
255 int num_pages);
256u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
257 u32 d_id, u32 s_id, u16 ox_id,
258 int num_pages);
259u16 fc_logo_rsp_parse(struct fchs_s *fchs, int len);
260u16 fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
261 u16 ox_id, wwn_t port_name, wwn_t node_name,
262 u16 pdu_size);
263u16 fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name);
264u16 fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
265 u16 ox_id, int num_pages);
266u16 fc_prlo_rsp_parse(struct fchs_s *fchs, int len);
267u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
268 u16 ox_id, int num_pages,
269 enum fc_tprlo_type tprlo_type, u32 tpr_id);
270u16 fc_tprlo_rsp_parse(struct fchs_s *fchs, int len);
271u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
272 u16 ox_id, u32 reason_code,
273 u32 reason_expl);
274u16 fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
275 u16 ox_id, u32 port_id);
276u16 fc_ct_rsp_parse(struct ct_hdr_s *cthdr);
277u16 fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn,
278 u32 s_id, u16 ox_id);
279#endif
diff --git a/drivers/scsi/bfa/fcptm.c b/drivers/scsi/bfa/fcptm.c
deleted file mode 100644
index 8c8b08c72e7a..000000000000
--- a/drivers/scsi/bfa/fcptm.c
+++ /dev/null
@@ -1,68 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * This file contains dummy FCPTM routines to aid in Initiator Mode only
20 * compilation of OS driver.
21 *
22 */
23
24#include "bfa_os_inc.h"
25#include "fcs_rport.h"
26#include "fcs_fcptm.h"
27#include "fcs/bfa_fcs_rport.h"
28
29struct bfa_fcs_tin_s *
30bfa_fcs_tin_create(struct bfa_fcs_rport_s *rport)
31{
32 return NULL;
33}
34
35void
36bfa_fcs_tin_delete(struct bfa_fcs_tin_s *tin)
37{
38}
39
40void
41bfa_fcs_tin_rport_offline(struct bfa_fcs_tin_s *tin)
42{
43}
44
45void
46bfa_fcs_tin_rport_online(struct bfa_fcs_tin_s *tin)
47{
48}
49
50void
51bfa_fcs_tin_rx_prli(struct bfa_fcs_tin_s *tin, struct fchs_s *fchs, u16 len)
52{
53}
54
55void
56bfa_fcs_fcptm_uf_recv(struct bfa_fcs_tin_s *tin, struct fchs_s *fchs, u16 len)
57{
58}
59
60void
61bfa_fcs_tin_pause(struct bfa_fcs_tin_s *tin)
62{
63}
64
65void
66bfa_fcs_tin_resume(struct bfa_fcs_tin_s *tin)
67{
68}
diff --git a/drivers/scsi/bfa/fcs.h b/drivers/scsi/bfa/fcs.h
deleted file mode 100644
index 8d08230e6295..000000000000
--- a/drivers/scsi/bfa/fcs.h
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs.h FCS module functions
20 */
21
22
23#ifndef __FCS_H__
24#define __FCS_H__
25
26#define __fcs_min_cfg(__fcs) ((__fcs)->min_cfg)
27
28void bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs);
29
30#endif /* __FCS_H__ */
diff --git a/drivers/scsi/bfa/fcs_auth.h b/drivers/scsi/bfa/fcs_auth.h
deleted file mode 100644
index 65d155fea3d7..000000000000
--- a/drivers/scsi/bfa/fcs_auth.h
+++ /dev/null
@@ -1,37 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs_uf.h FCS unsolicited frame receive
20 */
21
22
23#ifndef __FCS_AUTH_H__
24#define __FCS_AUTH_H__
25
26#include <fcs/bfa_fcs.h>
27#include <fcs/bfa_fcs_vport.h>
28#include <fcs/bfa_fcs_lport.h>
29
30/*
31 * fcs friend functions: only between fcs modules
32 */
33void bfa_fcs_auth_uf_recv(struct bfa_fcs_fabric_s *fabric, int len);
34void bfa_fcs_auth_start(struct bfa_fcs_fabric_s *fabric);
35void bfa_fcs_auth_stop(struct bfa_fcs_fabric_s *fabric);
36
37#endif /* __FCS_UF_H__ */
diff --git a/drivers/scsi/bfa/fcs_fabric.h b/drivers/scsi/bfa/fcs_fabric.h
deleted file mode 100644
index 432ab8ab8c3c..000000000000
--- a/drivers/scsi/bfa/fcs_fabric.h
+++ /dev/null
@@ -1,68 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs_lport.h FCS logical port interfaces
20 */
21
22#ifndef __FCS_FABRIC_H__
23#define __FCS_FABRIC_H__
24
25#include <fcs/bfa_fcs.h>
26#include <fcs/bfa_fcs_vport.h>
27#include <fcs/bfa_fcs_lport.h>
28
29#define BFA_FCS_BRCD_SWITCH_OUI 0x051e
30
31/*
32* fcs friend functions: only between fcs modules
33 */
34void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs);
35void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs);
36void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs);
37void bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs);
38void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric);
39void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric);
40void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
41 struct bfa_fcs_vport_s *vport);
42void bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
43 struct bfa_fcs_vport_s *vport);
44int bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric);
45struct bfa_fcs_vport_s *bfa_fcs_fabric_vport_lookup(
46 struct bfa_fcs_fabric_s *fabric, wwn_t pwwn);
47void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs);
48void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric,
49 struct fchs_s *fchs, u16 len);
50u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric);
51bfa_boolean_t bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric);
52bfa_boolean_t bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric);
53enum bfa_pport_type bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric);
54void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric);
55void bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric);
56
57bfa_status_t bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf,
58 struct bfa_fcs_s *fcs, struct bfa_port_cfg_s *port_cfg,
59 struct bfad_vf_s *vf_drv);
60void bfa_fcs_auth_finished(struct bfa_fcs_fabric_s *fabric,
61 enum auth_status status);
62
63void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
64 wwn_t fabric_name);
65u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric);
66void bfa_fcs_get_sym_name(const struct bfa_fcs_s *fcs, char *node_symname);
67
68#endif /* __FCS_FABRIC_H__ */
diff --git a/drivers/scsi/bfa/fcs_fcpim.h b/drivers/scsi/bfa/fcs_fcpim.h
deleted file mode 100644
index 11e6e7bce9f6..000000000000
--- a/drivers/scsi/bfa/fcs_fcpim.h
+++ /dev/null
@@ -1,39 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __FCS_FCPIM_H__
18#define __FCS_FCPIM_H__
19
20#include <defs/bfa_defs_port.h>
21#include <fcs/bfa_fcs_lport.h>
22#include <fcs/bfa_fcs_rport.h>
23
24/*
25 * Following routines are from FCPIM and will be called by rport.
26 */
27struct bfa_fcs_itnim_s *bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport);
28void bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim);
29void bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim);
30void bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim);
31bfa_status_t bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim);
32
33void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim);
34void bfa_fcs_itnim_pause(struct bfa_fcs_itnim_s *itnim);
35void bfa_fcs_itnim_resume(struct bfa_fcs_itnim_s *itnim);
36
37void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs,
38 u16 len);
39#endif /* __FCS_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/fcs_fcptm.h b/drivers/scsi/bfa/fcs_fcptm.h
deleted file mode 100644
index ffff0829fd31..000000000000
--- a/drivers/scsi/bfa/fcs_fcptm.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __FCS_FCPTM_H__
19#define __FCS_FCPTM_H__
20
21#include <defs/bfa_defs_port.h>
22#include <fcs/bfa_fcs_lport.h>
23#include <fcs/bfa_fcs_rport.h>
24
25/*
26 * Following routines are from FCPTM and will be called by rport.
27 */
28struct bfa_fcs_tin_s *bfa_fcs_tin_create(struct bfa_fcs_rport_s *rport);
29void bfa_fcs_tin_rport_offline(struct bfa_fcs_tin_s *tin);
30void bfa_fcs_tin_rport_online(struct bfa_fcs_tin_s *tin);
31void bfa_fcs_tin_delete(struct bfa_fcs_tin_s *tin);
32void bfa_fcs_tin_rx_prli(struct bfa_fcs_tin_s *tin, struct fchs_s *fchs,
33 u16 len);
34void bfa_fcs_tin_pause(struct bfa_fcs_tin_s *tin);
35void bfa_fcs_tin_resume(struct bfa_fcs_tin_s *tin);
36
37/*
38 * Modudle init/cleanup routines.
39 */
40void bfa_fcs_fcptm_modinit(struct bfa_fcs_s *fcs);
41void bfa_fcs_fcptm_modexit(struct bfa_fcs_s *fcs);
42void bfa_fcs_fcptm_uf_recv(struct bfa_fcs_tin_s *tin, struct fchs_s *fchs,
43 u16 len);
44
45#endif /* __FCS_FCPTM_H__ */
diff --git a/drivers/scsi/bfa/fcs_fcxp.h b/drivers/scsi/bfa/fcs_fcxp.h
deleted file mode 100644
index 8277fe9c2b70..000000000000
--- a/drivers/scsi/bfa/fcs_fcxp.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs_fcxp.h FCXP helper macros for FCS
20 */
21
22
23#ifndef __FCS_FCXP_H__
24#define __FCS_FCXP_H__
25
26#define bfa_fcs_fcxp_alloc(__fcs) \
27 bfa_fcxp_alloc(NULL, (__fcs)->bfa, 0, 0, NULL, NULL, NULL, NULL)
28
29#endif /* __FCS_FCXP_H__ */
diff --git a/drivers/scsi/bfa/fcs_lport.h b/drivers/scsi/bfa/fcs_lport.h
deleted file mode 100644
index a6508c8ab184..000000000000
--- a/drivers/scsi/bfa/fcs_lport.h
+++ /dev/null
@@ -1,118 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs_lport.h FCS logical port interfaces
20 */
21
22#ifndef __FCS_LPORT_H__
23#define __FCS_LPORT_H__
24
25#define __VPORT_H__
26#include <defs/bfa_defs_port.h>
27#include <bfa_svc.h>
28#include <fcs/bfa_fcs_lport.h>
29#include <fcs/bfa_fcs_rport.h>
30#include <fcs/bfa_fcs_vport.h>
31#include <fcs_fabric.h>
32#include <fcs_ms.h>
33#include <cs/bfa_q.h>
34#include <fcbuild.h>
35
36/*
37 * PID used in P2P/N2N ( In Big Endian)
38 */
39#define N2N_LOCAL_PID 0x010000
40#define N2N_REMOTE_PID 0x020000
41
42/*
43 * Misc Timeouts
44 */
45/*
46 * To be used when spawning a timer before retrying a failed command. Milli
47 * Secs.
48 */
49#define BFA_FCS_RETRY_TIMEOUT 2000
50
51/*
52 * Check for Port/Vport Mode/Role
53 */
54#define BFA_FCS_VPORT_IS_INITIATOR_MODE(port) \
55 (port->port_cfg.roles & BFA_PORT_ROLE_FCP_IM)
56
57#define BFA_FCS_VPORT_IS_TARGET_MODE(port) \
58 (port->port_cfg.roles & BFA_PORT_ROLE_FCP_TM)
59
60#define BFA_FCS_VPORT_IS_IPFC_MODE(port) \
61 (port->port_cfg.roles & BFA_PORT_ROLE_FCP_IPFC)
62
63/*
64 * Is this a Well Known Address
65 */
66#define BFA_FCS_PID_IS_WKA(pid) ((bfa_os_ntoh3b(pid) > 0xFFF000) ? 1 : 0)
67
68/*
69 * Pointer to elements within Port
70 */
71#define BFA_FCS_GET_HAL_FROM_PORT(port) (port->fcs->bfa)
72#define BFA_FCS_GET_NS_FROM_PORT(port) (&port->port_topo.pfab.ns)
73#define BFA_FCS_GET_SCN_FROM_PORT(port) (&port->port_topo.pfab.scn)
74#define BFA_FCS_GET_MS_FROM_PORT(port) (&port->port_topo.pfab.ms)
75#define BFA_FCS_GET_FDMI_FROM_PORT(port) (&port->port_topo.pfab.ms.fdmi)
76
77/*
78 * handler for unsolicied frames
79 */
80void bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs,
81 u16 len);
82
83/*
84 * Following routines will be called by Fabric to indicate port
85 * online/offline to vport.
86 */
87void bfa_fcs_lport_attach(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs,
88 uint16_t vf_id, struct bfa_fcs_vport_s *vport);
89void bfa_fcs_lport_init(struct bfa_fcs_port_s *lport,
90 struct bfa_port_cfg_s *port_cfg);
91void bfa_fcs_port_online(struct bfa_fcs_port_s *port);
92void bfa_fcs_port_offline(struct bfa_fcs_port_s *port);
93void bfa_fcs_port_delete(struct bfa_fcs_port_s *port);
94bfa_boolean_t bfa_fcs_port_is_online(struct bfa_fcs_port_s *port);
95
96/*
97 * Lookup rport based on PID
98 */
99struct bfa_fcs_rport_s *bfa_fcs_port_get_rport_by_pid(
100 struct bfa_fcs_port_s *port, u32 pid);
101
102/*
103 * Lookup rport based on PWWN
104 */
105struct bfa_fcs_rport_s *bfa_fcs_port_get_rport_by_pwwn(
106 struct bfa_fcs_port_s *port, wwn_t pwwn);
107struct bfa_fcs_rport_s *bfa_fcs_port_get_rport_by_nwwn(
108 struct bfa_fcs_port_s *port, wwn_t nwwn);
109void bfa_fcs_port_add_rport(struct bfa_fcs_port_s *port,
110 struct bfa_fcs_rport_s *rport);
111void bfa_fcs_port_del_rport(struct bfa_fcs_port_s *port,
112 struct bfa_fcs_rport_s *rport);
113
114void bfa_fcs_port_modinit(struct bfa_fcs_s *fcs);
115void bfa_fcs_port_modexit(struct bfa_fcs_s *fcs);
116void bfa_fcs_port_lip(struct bfa_fcs_port_s *port);
117
118#endif /* __FCS_LPORT_H__ */
diff --git a/drivers/scsi/bfa/fcs_ms.h b/drivers/scsi/bfa/fcs_ms.h
deleted file mode 100644
index b6a8c12876f4..000000000000
--- a/drivers/scsi/bfa/fcs_ms.h
+++ /dev/null
@@ -1,35 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs_ms.h FCS ms interfaces
20 */
21#ifndef __FCS_MS_H__
22#define __FCS_MS_H__
23
24/* MS FCS routines */
25void bfa_fcs_port_ms_init(struct bfa_fcs_port_s *port);
26void bfa_fcs_port_ms_offline(struct bfa_fcs_port_s *port);
27void bfa_fcs_port_ms_online(struct bfa_fcs_port_s *port);
28void bfa_fcs_port_ms_fabric_rscn(struct bfa_fcs_port_s *port);
29
30/* FDMI FCS routines */
31void bfa_fcs_port_fdmi_init(struct bfa_fcs_port_ms_s *ms);
32void bfa_fcs_port_fdmi_offline(struct bfa_fcs_port_ms_s *ms);
33void bfa_fcs_port_fdmi_online(struct bfa_fcs_port_ms_s *ms);
34
35#endif
diff --git a/drivers/scsi/bfa/fcs_port.h b/drivers/scsi/bfa/fcs_port.h
deleted file mode 100644
index 408c06a7d164..000000000000
--- a/drivers/scsi/bfa/fcs_port.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs_pport.h FCS physical port interfaces
20 */
21
22
23#ifndef __FCS_PPORT_H__
24#define __FCS_PPORT_H__
25
26/*
27 * fcs friend functions: only between fcs modules
28 */
29void bfa_fcs_pport_attach(struct bfa_fcs_s *fcs);
30
31#endif /* __FCS_PPORT_H__ */
diff --git a/drivers/scsi/bfa/fcs_rport.h b/drivers/scsi/bfa/fcs_rport.h
deleted file mode 100644
index e634fb7a69b8..000000000000
--- a/drivers/scsi/bfa/fcs_rport.h
+++ /dev/null
@@ -1,61 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs_rport.h FCS rport interfaces and defines
20 */
21
22#ifndef __FCS_RPORT_H__
23#define __FCS_RPORT_H__
24
25#include <fcs/bfa_fcs_rport.h>
26
27#define BFA_FCS_RPORT_MAX_RETRIES (5)
28
29void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
30 u16 len);
31void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport);
32
33struct bfa_fcs_rport_s *bfa_fcs_rport_create(struct bfa_fcs_port_s *port,
34 u32 pid);
35void bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport);
36void bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport);
37void bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport);
38void bfa_fcs_rport_start(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs,
39 struct fc_logi_s *plogi_rsp);
40void bfa_fcs_rport_plogi_create(struct bfa_fcs_port_s *port,
41 struct fchs_s *rx_fchs,
42 struct fc_logi_s *plogi);
43void bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
44 struct fc_logi_s *plogi);
45void bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport);
46void bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, uint16_t ox_id);
47void bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport);
48void bfa_fcs_rport_itntm_ack(struct bfa_fcs_rport_s *rport);
49void bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport);
50void bfa_fcs_rport_fcptm_offline_done(struct bfa_fcs_rport_s *rport);
51int bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport);
52struct bfa_fcs_rport_s *bfa_fcs_rport_create_by_wwn(struct bfa_fcs_port_s *port,
53 wwn_t wwn);
54
55
56/* Rport Features */
57void bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport);
58void bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport);
59void bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport);
60
61#endif /* __FCS_RPORT_H__ */
diff --git a/drivers/scsi/bfa/fcs_trcmod.h b/drivers/scsi/bfa/fcs_trcmod.h
deleted file mode 100644
index 41b5ae8d7644..000000000000
--- a/drivers/scsi/bfa/fcs_trcmod.h
+++ /dev/null
@@ -1,56 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs_trcmod.h BFA FCS trace modules
20 */
21
22#ifndef __FCS_TRCMOD_H__
23#define __FCS_TRCMOD_H__
24
25#include <cs/bfa_trc.h>
26
27/*
28 * !!! Only append to the enums defined here to avoid any versioning
29 * !!! needed between trace utility and driver version
30 */
31enum {
32 BFA_TRC_FCS_FABRIC = 1,
33 BFA_TRC_FCS_VFAPI = 2,
34 BFA_TRC_FCS_PORT = 3,
35 BFA_TRC_FCS_VPORT = 4,
36 BFA_TRC_FCS_VP_API = 5,
37 BFA_TRC_FCS_VPS = 6,
38 BFA_TRC_FCS_RPORT = 7,
39 BFA_TRC_FCS_FCPIM = 8,
40 BFA_TRC_FCS_FCPTM = 9,
41 BFA_TRC_FCS_NS = 10,
42 BFA_TRC_FCS_SCN = 11,
43 BFA_TRC_FCS_LOOP = 12,
44 BFA_TRC_FCS_UF = 13,
45 BFA_TRC_FCS_PPORT = 14,
46 BFA_TRC_FCS_FCPIP = 15,
47 BFA_TRC_FCS_PORT_API = 16,
48 BFA_TRC_FCS_RPORT_API = 17,
49 BFA_TRC_FCS_AUTH = 18,
50 BFA_TRC_FCS_N2N = 19,
51 BFA_TRC_FCS_MS = 20,
52 BFA_TRC_FCS_FDMI = 21,
53 BFA_TRC_FCS_RPORT_FTRS = 22,
54};
55
56#endif /* __FCS_TRCMOD_H__ */
diff --git a/drivers/scsi/bfa/fcs_uf.h b/drivers/scsi/bfa/fcs_uf.h
deleted file mode 100644
index f591072214fe..000000000000
--- a/drivers/scsi/bfa/fcs_uf.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs_uf.h FCS unsolicited frame receive
20 */
21
22
23#ifndef __FCS_UF_H__
24#define __FCS_UF_H__
25
26/*
27 * fcs friend functions: only between fcs modules
28 */
29void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs);
30
31#endif /* __FCS_UF_H__ */
diff --git a/drivers/scsi/bfa/fcs_vport.h b/drivers/scsi/bfa/fcs_vport.h
deleted file mode 100644
index bb647a4a5dde..000000000000
--- a/drivers/scsi/bfa/fcs_vport.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __FCS_VPORT_H__
19#define __FCS_VPORT_H__
20
21#include <fcs/bfa_fcs_lport.h>
22#include <fcs/bfa_fcs_vport.h>
23#include <defs/bfa_defs_pci.h>
24
25void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport);
26void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport);
27void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport);
28void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport);
29void bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport);
30
31#endif /* __FCS_VPORT_H__ */
32
diff --git a/drivers/scsi/bfa/fdmi.c b/drivers/scsi/bfa/fdmi.c
deleted file mode 100644
index 2b50eabf4b1e..000000000000
--- a/drivers/scsi/bfa/fdmi.c
+++ /dev/null
@@ -1,1230 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * port_api.c BFA FCS port
20 */
21
22
23#include <bfa.h>
24#include <bfa_svc.h>
25#include "fcs_lport.h"
26#include "fcs_rport.h"
27#include "lport_priv.h"
28#include "fcs_trcmod.h"
29#include "fcs_fcxp.h"
30#include <fcs/bfa_fcs_fdmi.h>
31
32BFA_TRC_FILE(FCS, FDMI);
33
34#define BFA_FCS_FDMI_CMD_MAX_RETRIES 2
35
36/*
37 * forward declarations
38 */
39static void bfa_fcs_port_fdmi_send_rhba(void *fdmi_cbarg,
40 struct bfa_fcxp_s *fcxp_alloced);
41static void bfa_fcs_port_fdmi_send_rprt(void *fdmi_cbarg,
42 struct bfa_fcxp_s *fcxp_alloced);
43static void bfa_fcs_port_fdmi_send_rpa(void *fdmi_cbarg,
44 struct bfa_fcxp_s *fcxp_alloced);
45static void bfa_fcs_port_fdmi_rhba_response(void *fcsarg,
46 struct bfa_fcxp_s *fcxp,
47 void *cbarg,
48 bfa_status_t req_status,
49 u32 rsp_len,
50 u32 resid_len,
51 struct fchs_s *rsp_fchs);
52static void bfa_fcs_port_fdmi_rprt_response(void *fcsarg,
53 struct bfa_fcxp_s *fcxp,
54 void *cbarg,
55 bfa_status_t req_status,
56 u32 rsp_len,
57 u32 resid_len,
58 struct fchs_s *rsp_fchs);
59static void bfa_fcs_port_fdmi_rpa_response(void *fcsarg,
60 struct bfa_fcxp_s *fcxp,
61 void *cbarg,
62 bfa_status_t req_status,
63 u32 rsp_len,
64 u32 resid_len,
65 struct fchs_s *rsp_fchs);
66static void bfa_fcs_port_fdmi_timeout(void *arg);
67static u16 bfa_fcs_port_fdmi_build_rhba_pyld(
68 struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld);
69static u16 bfa_fcs_port_fdmi_build_rprt_pyld(
70 struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld);
71static u16 bfa_fcs_port_fdmi_build_rpa_pyld(
72 struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld);
73static u16 bfa_fcs_port_fdmi_build_portattr_block(
74 struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld);
75static void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi,
76 struct bfa_fcs_fdmi_hba_attr_s *hba_attr);
77static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_port_fdmi_s *fdmi,
78 struct bfa_fcs_fdmi_port_attr_s *port_attr);
79/**
80 * fcs_fdmi_sm FCS FDMI state machine
81 */
82
83/**
84 * FDMI State Machine events
85 */
86enum port_fdmi_event {
87 FDMISM_EVENT_PORT_ONLINE = 1,
88 FDMISM_EVENT_PORT_OFFLINE = 2,
89 FDMISM_EVENT_RSP_OK = 4,
90 FDMISM_EVENT_RSP_ERROR = 5,
91 FDMISM_EVENT_TIMEOUT = 6,
92 FDMISM_EVENT_RHBA_SENT = 7,
93 FDMISM_EVENT_RPRT_SENT = 8,
94 FDMISM_EVENT_RPA_SENT = 9,
95};
96
97static void bfa_fcs_port_fdmi_sm_offline(struct bfa_fcs_port_fdmi_s *fdmi,
98 enum port_fdmi_event event);
99static void bfa_fcs_port_fdmi_sm_sending_rhba(struct bfa_fcs_port_fdmi_s *fdmi,
100 enum port_fdmi_event event);
101static void bfa_fcs_port_fdmi_sm_rhba(struct bfa_fcs_port_fdmi_s *fdmi,
102 enum port_fdmi_event event);
103static void bfa_fcs_port_fdmi_sm_rhba_retry(struct bfa_fcs_port_fdmi_s *fdmi,
104 enum port_fdmi_event event);
105static void bfa_fcs_port_fdmi_sm_sending_rprt(struct bfa_fcs_port_fdmi_s *fdmi,
106 enum port_fdmi_event event);
107static void bfa_fcs_port_fdmi_sm_rprt(struct bfa_fcs_port_fdmi_s *fdmi,
108 enum port_fdmi_event event);
109static void bfa_fcs_port_fdmi_sm_rprt_retry(struct bfa_fcs_port_fdmi_s *fdmi,
110 enum port_fdmi_event event);
111static void bfa_fcs_port_fdmi_sm_sending_rpa(struct bfa_fcs_port_fdmi_s *fdmi,
112 enum port_fdmi_event event);
113static void bfa_fcs_port_fdmi_sm_rpa(struct bfa_fcs_port_fdmi_s *fdmi,
114 enum port_fdmi_event event);
115static void bfa_fcs_port_fdmi_sm_rpa_retry(struct bfa_fcs_port_fdmi_s *fdmi,
116 enum port_fdmi_event event);
117static void bfa_fcs_port_fdmi_sm_online(struct bfa_fcs_port_fdmi_s *fdmi,
118 enum port_fdmi_event event);
119static void bfa_fcs_port_fdmi_sm_disabled(struct bfa_fcs_port_fdmi_s *fdmi,
120 enum port_fdmi_event event);
121
122/**
123 * Start in offline state - awaiting MS to send start.
124 */
125static void
126bfa_fcs_port_fdmi_sm_offline(struct bfa_fcs_port_fdmi_s *fdmi,
127 enum port_fdmi_event event)
128{
129 struct bfa_fcs_port_s *port = fdmi->ms->port;
130
131 bfa_trc(port->fcs, port->port_cfg.pwwn);
132 bfa_trc(port->fcs, event);
133
134 fdmi->retry_cnt = 0;
135
136 switch (event) {
137 case FDMISM_EVENT_PORT_ONLINE:
138 if (port->vport) {
139 /*
140 * For Vports, register a new port.
141 */
142 bfa_sm_set_state(fdmi,
143 bfa_fcs_port_fdmi_sm_sending_rprt);
144 bfa_fcs_port_fdmi_send_rprt(fdmi, NULL);
145 } else {
146 /*
147 * For a base port, we should first register the HBA
148 * atribute. The HBA attribute also contains the base
149 * port registration.
150 */
151 bfa_sm_set_state(fdmi,
152 bfa_fcs_port_fdmi_sm_sending_rhba);
153 bfa_fcs_port_fdmi_send_rhba(fdmi, NULL);
154 }
155 break;
156
157 case FDMISM_EVENT_PORT_OFFLINE:
158 break;
159
160 default:
161 bfa_sm_fault(port->fcs, event);
162 }
163}
164
165static void
166bfa_fcs_port_fdmi_sm_sending_rhba(struct bfa_fcs_port_fdmi_s *fdmi,
167 enum port_fdmi_event event)
168{
169 struct bfa_fcs_port_s *port = fdmi->ms->port;
170
171 bfa_trc(port->fcs, port->port_cfg.pwwn);
172 bfa_trc(port->fcs, event);
173
174 switch (event) {
175 case FDMISM_EVENT_RHBA_SENT:
176 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_rhba);
177 break;
178
179 case FDMISM_EVENT_PORT_OFFLINE:
180 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
181 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port),
182 &fdmi->fcxp_wqe);
183 break;
184
185 default:
186 bfa_sm_fault(port->fcs, event);
187 }
188}
189
190static void
191bfa_fcs_port_fdmi_sm_rhba(struct bfa_fcs_port_fdmi_s *fdmi,
192 enum port_fdmi_event event)
193{
194 struct bfa_fcs_port_s *port = fdmi->ms->port;
195
196 bfa_trc(port->fcs, port->port_cfg.pwwn);
197 bfa_trc(port->fcs, event);
198
199 switch (event) {
200 case FDMISM_EVENT_RSP_ERROR:
201 /*
202 * if max retries have not been reached, start timer for a
203 * delayed retry
204 */
205 if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) {
206 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_rhba_retry);
207 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port),
208 &fdmi->timer, bfa_fcs_port_fdmi_timeout,
209 fdmi, BFA_FCS_RETRY_TIMEOUT);
210 } else {
211 /*
212 * set state to offline
213 */
214 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
215 }
216 break;
217
218 case FDMISM_EVENT_RSP_OK:
219 /*
220 * Initiate Register Port Attributes
221 */
222 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_sending_rpa);
223 fdmi->retry_cnt = 0;
224 bfa_fcs_port_fdmi_send_rpa(fdmi, NULL);
225 break;
226
227 case FDMISM_EVENT_PORT_OFFLINE:
228 bfa_fcxp_discard(fdmi->fcxp);
229 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
230 break;
231
232 default:
233 bfa_sm_fault(port->fcs, event);
234 }
235}
236
237static void
238bfa_fcs_port_fdmi_sm_rhba_retry(struct bfa_fcs_port_fdmi_s *fdmi,
239 enum port_fdmi_event event)
240{
241 struct bfa_fcs_port_s *port = fdmi->ms->port;
242
243 bfa_trc(port->fcs, port->port_cfg.pwwn);
244 bfa_trc(port->fcs, event);
245
246 switch (event) {
247 case FDMISM_EVENT_TIMEOUT:
248 /*
249 * Retry Timer Expired. Re-send
250 */
251 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_sending_rhba);
252 bfa_fcs_port_fdmi_send_rhba(fdmi, NULL);
253 break;
254
255 case FDMISM_EVENT_PORT_OFFLINE:
256 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
257 bfa_timer_stop(&fdmi->timer);
258 break;
259
260 default:
261 bfa_sm_fault(port->fcs, event);
262 }
263}
264
265/*
266* RPRT : Register Port
267 */
268static void
269bfa_fcs_port_fdmi_sm_sending_rprt(struct bfa_fcs_port_fdmi_s *fdmi,
270 enum port_fdmi_event event)
271{
272 struct bfa_fcs_port_s *port = fdmi->ms->port;
273
274 bfa_trc(port->fcs, port->port_cfg.pwwn);
275 bfa_trc(port->fcs, event);
276
277 switch (event) {
278 case FDMISM_EVENT_RPRT_SENT:
279 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_rprt);
280 break;
281
282 case FDMISM_EVENT_PORT_OFFLINE:
283 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
284 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port),
285 &fdmi->fcxp_wqe);
286 break;
287
288 default:
289 bfa_sm_fault(port->fcs, event);
290 }
291}
292
293static void
294bfa_fcs_port_fdmi_sm_rprt(struct bfa_fcs_port_fdmi_s *fdmi,
295 enum port_fdmi_event event)
296{
297 struct bfa_fcs_port_s *port = fdmi->ms->port;
298
299 bfa_trc(port->fcs, port->port_cfg.pwwn);
300 bfa_trc(port->fcs, event);
301
302 switch (event) {
303 case FDMISM_EVENT_RSP_ERROR:
304 /*
305 * if max retries have not been reached, start timer for a
306 * delayed retry
307 */
308 if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) {
309 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_rprt_retry);
310 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port),
311 &fdmi->timer, bfa_fcs_port_fdmi_timeout,
312 fdmi, BFA_FCS_RETRY_TIMEOUT);
313
314 } else {
315 /*
316 * set state to offline
317 */
318 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
319 fdmi->retry_cnt = 0;
320 }
321 break;
322
323 case FDMISM_EVENT_RSP_OK:
324 fdmi->retry_cnt = 0;
325 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_online);
326 break;
327
328 case FDMISM_EVENT_PORT_OFFLINE:
329 bfa_fcxp_discard(fdmi->fcxp);
330 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
331 break;
332
333 default:
334 bfa_sm_fault(port->fcs, event);
335 }
336}
337
338static void
339bfa_fcs_port_fdmi_sm_rprt_retry(struct bfa_fcs_port_fdmi_s *fdmi,
340 enum port_fdmi_event event)
341{
342 struct bfa_fcs_port_s *port = fdmi->ms->port;
343
344 bfa_trc(port->fcs, port->port_cfg.pwwn);
345 bfa_trc(port->fcs, event);
346
347 switch (event) {
348 case FDMISM_EVENT_TIMEOUT:
349 /*
350 * Retry Timer Expired. Re-send
351 */
352 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_sending_rprt);
353 bfa_fcs_port_fdmi_send_rprt(fdmi, NULL);
354 break;
355
356 case FDMISM_EVENT_PORT_OFFLINE:
357 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
358 bfa_timer_stop(&fdmi->timer);
359 break;
360
361 default:
362 bfa_sm_fault(port->fcs, event);
363 }
364}
365
366/*
367 * Register Port Attributes
368 */
369static void
370bfa_fcs_port_fdmi_sm_sending_rpa(struct bfa_fcs_port_fdmi_s *fdmi,
371 enum port_fdmi_event event)
372{
373 struct bfa_fcs_port_s *port = fdmi->ms->port;
374
375 bfa_trc(port->fcs, port->port_cfg.pwwn);
376 bfa_trc(port->fcs, event);
377
378 switch (event) {
379 case FDMISM_EVENT_RPA_SENT:
380 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_rpa);
381 break;
382
383 case FDMISM_EVENT_PORT_OFFLINE:
384 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
385 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port),
386 &fdmi->fcxp_wqe);
387 break;
388
389 default:
390 bfa_sm_fault(port->fcs, event);
391 }
392}
393
394static void
395bfa_fcs_port_fdmi_sm_rpa(struct bfa_fcs_port_fdmi_s *fdmi,
396 enum port_fdmi_event event)
397{
398 struct bfa_fcs_port_s *port = fdmi->ms->port;
399
400 bfa_trc(port->fcs, port->port_cfg.pwwn);
401 bfa_trc(port->fcs, event);
402
403 switch (event) {
404 case FDMISM_EVENT_RSP_ERROR:
405 /*
406 * if max retries have not been reached, start timer for a
407 * delayed retry
408 */
409 if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) {
410 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_rpa_retry);
411 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port),
412 &fdmi->timer, bfa_fcs_port_fdmi_timeout,
413 fdmi, BFA_FCS_RETRY_TIMEOUT);
414 } else {
415 /*
416 * set state to offline
417 */
418 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
419 fdmi->retry_cnt = 0;
420 }
421 break;
422
423 case FDMISM_EVENT_RSP_OK:
424 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_online);
425 fdmi->retry_cnt = 0;
426 break;
427
428 case FDMISM_EVENT_PORT_OFFLINE:
429 bfa_fcxp_discard(fdmi->fcxp);
430 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
431 break;
432
433 default:
434 bfa_sm_fault(port->fcs, event);
435 }
436}
437
438static void
439bfa_fcs_port_fdmi_sm_rpa_retry(struct bfa_fcs_port_fdmi_s *fdmi,
440 enum port_fdmi_event event)
441{
442 struct bfa_fcs_port_s *port = fdmi->ms->port;
443
444 bfa_trc(port->fcs, port->port_cfg.pwwn);
445 bfa_trc(port->fcs, event);
446
447 switch (event) {
448 case FDMISM_EVENT_TIMEOUT:
449 /*
450 * Retry Timer Expired. Re-send
451 */
452 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_sending_rpa);
453 bfa_fcs_port_fdmi_send_rpa(fdmi, NULL);
454 break;
455
456 case FDMISM_EVENT_PORT_OFFLINE:
457 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
458 bfa_timer_stop(&fdmi->timer);
459 break;
460
461 default:
462 bfa_sm_fault(port->fcs, event);
463 }
464}
465
466static void
467bfa_fcs_port_fdmi_sm_online(struct bfa_fcs_port_fdmi_s *fdmi,
468 enum port_fdmi_event event)
469{
470 struct bfa_fcs_port_s *port = fdmi->ms->port;
471
472 bfa_trc(port->fcs, port->port_cfg.pwwn);
473 bfa_trc(port->fcs, event);
474
475 switch (event) {
476 case FDMISM_EVENT_PORT_OFFLINE:
477 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
478 break;
479
480 default:
481 bfa_sm_fault(port->fcs, event);
482 }
483}
484
485/**
486 * FDMI is disabled state.
487 */
488static void
489bfa_fcs_port_fdmi_sm_disabled(struct bfa_fcs_port_fdmi_s *fdmi,
490 enum port_fdmi_event event)
491{
492 struct bfa_fcs_port_s *port = fdmi->ms->port;
493
494 bfa_trc(port->fcs, port->port_cfg.pwwn);
495 bfa_trc(port->fcs, event);
496
497 /* No op State. It can only be enabled at Driver Init. */
498}
499
500/**
501* RHBA : Register HBA Attributes.
502 */
503static void
504bfa_fcs_port_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
505{
506 struct bfa_fcs_port_fdmi_s *fdmi = fdmi_cbarg;
507 struct bfa_fcs_port_s *port = fdmi->ms->port;
508 struct fchs_s fchs;
509 int len, attr_len;
510 struct bfa_fcxp_s *fcxp;
511 u8 *pyld;
512
513 bfa_trc(port->fcs, port->port_cfg.pwwn);
514
515 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
516 if (!fcxp) {
517 bfa_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
518 bfa_fcs_port_fdmi_send_rhba, fdmi);
519 return;
520 }
521 fdmi->fcxp = fcxp;
522
523 pyld = bfa_fcxp_get_reqbuf(fcxp);
524 bfa_os_memset(pyld, 0, FC_MAX_PDUSZ);
525
526 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_port_get_fcid(port),
527 FDMI_RHBA);
528
529 attr_len = bfa_fcs_port_fdmi_build_rhba_pyld(fdmi,
530 (u8 *) ((struct ct_hdr_s *) pyld + 1));
531
532 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
533 FC_CLASS_3, (len + attr_len), &fchs,
534 bfa_fcs_port_fdmi_rhba_response, (void *)fdmi,
535 FC_MAX_PDUSZ, FC_FCCT_TOV);
536
537 bfa_sm_send_event(fdmi, FDMISM_EVENT_RHBA_SENT);
538}
539
540static u16
541bfa_fcs_port_fdmi_build_rhba_pyld(struct bfa_fcs_port_fdmi_s *fdmi,
542 u8 *pyld)
543{
544 struct bfa_fcs_port_s *port = fdmi->ms->port;
545 struct bfa_fcs_fdmi_hba_attr_s hba_attr; /* @todo */
546 struct bfa_fcs_fdmi_hba_attr_s *fcs_hba_attr = &hba_attr; /* @todo */
547 struct fdmi_rhba_s *rhba = (struct fdmi_rhba_s *) pyld;
548 struct fdmi_attr_s *attr;
549 u8 *curr_ptr;
550 u16 len, count;
551
552 /*
553 * get hba attributes
554 */
555 bfa_fcs_fdmi_get_hbaattr(fdmi, fcs_hba_attr);
556
557 rhba->hba_id = bfa_fcs_port_get_pwwn(port);
558 rhba->port_list.num_ports = bfa_os_htonl(1);
559 rhba->port_list.port_entry = bfa_fcs_port_get_pwwn(port);
560
561 len = sizeof(rhba->hba_id) + sizeof(rhba->port_list);
562
563 count = 0;
564 len += sizeof(rhba->hba_attr_blk.attr_count);
565
566 /*
567 * fill out the invididual entries of the HBA attrib Block
568 */
569 curr_ptr = (u8 *) &rhba->hba_attr_blk.hba_attr;
570
571 /*
572 * Node Name
573 */
574 attr = (struct fdmi_attr_s *) curr_ptr;
575 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_NODENAME);
576 attr->len = sizeof(wwn_t);
577 memcpy(attr->value, &bfa_fcs_port_get_nwwn(port), attr->len);
578 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
579 len += attr->len;
580 count++;
581 attr->len =
582 bfa_os_htons(attr->len + sizeof(attr->type) +
583 sizeof(attr->len));
584
585 /*
586 * Manufacturer
587 */
588 attr = (struct fdmi_attr_s *) curr_ptr;
589 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MANUFACTURER);
590 attr->len = (u16) strlen(fcs_hba_attr->manufacturer);
591 memcpy(attr->value, fcs_hba_attr->manufacturer, attr->len);
592 /* variable fields need to be 4 byte aligned */
593 attr->len = fc_roundup(attr->len, sizeof(u32));
594 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
595 len += attr->len;
596 count++;
597 attr->len =
598 bfa_os_htons(attr->len + sizeof(attr->type) +
599 sizeof(attr->len));
600
601 /*
602 * Serial Number
603 */
604 attr = (struct fdmi_attr_s *) curr_ptr;
605 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_SERIALNUM);
606 attr->len = (u16) strlen(fcs_hba_attr->serial_num);
607 memcpy(attr->value, fcs_hba_attr->serial_num, attr->len);
608 /* variable fields need to be 4 byte aligned */
609 attr->len = fc_roundup(attr->len, sizeof(u32));
610 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
611 len += attr->len;
612 count++;
613 attr->len =
614 bfa_os_htons(attr->len + sizeof(attr->type) +
615 sizeof(attr->len));
616
617 /*
618 * Model
619 */
620 attr = (struct fdmi_attr_s *) curr_ptr;
621 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL);
622 attr->len = (u16) strlen(fcs_hba_attr->model);
623 memcpy(attr->value, fcs_hba_attr->model, attr->len);
624 /* variable fields need to be 4 byte aligned */
625 attr->len = fc_roundup(attr->len, sizeof(u32));
626 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
627 len += attr->len;
628 count++;
629 attr->len =
630 bfa_os_htons(attr->len + sizeof(attr->type) +
631 sizeof(attr->len));
632
633 /*
634 * Model Desc
635 */
636 attr = (struct fdmi_attr_s *) curr_ptr;
637 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL_DESC);
638 attr->len = (u16) strlen(fcs_hba_attr->model_desc);
639 memcpy(attr->value, fcs_hba_attr->model_desc, attr->len);
640 /* variable fields need to be 4 byte aligned */
641 attr->len = fc_roundup(attr->len, sizeof(u32));
642 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
643 len += attr->len;
644 count++;
645 attr->len =
646 bfa_os_htons(attr->len + sizeof(attr->type) +
647 sizeof(attr->len));
648
649 /*
650 * H/W Version
651 */
652 if (fcs_hba_attr->hw_version[0] != '\0') {
653 attr = (struct fdmi_attr_s *) curr_ptr;
654 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_HW_VERSION);
655 attr->len = (u16) strlen(fcs_hba_attr->hw_version);
656 memcpy(attr->value, fcs_hba_attr->hw_version, attr->len);
657 /* variable fields need to be 4 byte aligned */
658 attr->len = fc_roundup(attr->len, sizeof(u32));
659 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
660 len += attr->len;
661 count++;
662 attr->len =
663 bfa_os_htons(attr->len + sizeof(attr->type) +
664 sizeof(attr->len));
665 }
666
667 /*
668 * Driver Version
669 */
670 attr = (struct fdmi_attr_s *) curr_ptr;
671 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_DRIVER_VERSION);
672 attr->len = (u16) strlen(fcs_hba_attr->driver_version);
673 memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
674 /* variable fields need to be 4 byte aligned */
675 attr->len = fc_roundup(attr->len, sizeof(u32));
676 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
677 len += attr->len;;
678 count++;
679 attr->len =
680 bfa_os_htons(attr->len + sizeof(attr->type) +
681 sizeof(attr->len));
682
683 /*
684 * Option Rom Version
685 */
686 if (fcs_hba_attr->option_rom_ver[0] != '\0') {
687 attr = (struct fdmi_attr_s *) curr_ptr;
688 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_ROM_VERSION);
689 attr->len = (u16) strlen(fcs_hba_attr->option_rom_ver);
690 memcpy(attr->value, fcs_hba_attr->option_rom_ver, attr->len);
691 /* variable fields need to be 4 byte aligned */
692 attr->len = fc_roundup(attr->len, sizeof(u32));
693 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
694 len += attr->len;
695 count++;
696 attr->len =
697 bfa_os_htons(attr->len + sizeof(attr->type) +
698 sizeof(attr->len));
699 }
700
701 /*
702 * f/w Version = driver version
703 */
704 attr = (struct fdmi_attr_s *) curr_ptr;
705 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_FW_VERSION);
706 attr->len = (u16) strlen(fcs_hba_attr->driver_version);
707 memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
708 /* variable fields need to be 4 byte aligned */
709 attr->len = fc_roundup(attr->len, sizeof(u32));
710 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
711 len += attr->len;
712 count++;
713 attr->len =
714 bfa_os_htons(attr->len + sizeof(attr->type) +
715 sizeof(attr->len));
716
717 /*
718 * OS Name
719 */
720 if (fcs_hba_attr->os_name[0] != '\0') {
721 attr = (struct fdmi_attr_s *) curr_ptr;
722 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_OS_NAME);
723 attr->len = (u16) strlen(fcs_hba_attr->os_name);
724 memcpy(attr->value, fcs_hba_attr->os_name, attr->len);
725 /* variable fields need to be 4 byte aligned */
726 attr->len = fc_roundup(attr->len, sizeof(u32));
727 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
728 len += attr->len;
729 count++;
730 attr->len =
731 bfa_os_htons(attr->len + sizeof(attr->type) +
732 sizeof(attr->len));
733 }
734
735 /*
736 * MAX_CT_PAYLOAD
737 */
738 attr = (struct fdmi_attr_s *) curr_ptr;
739 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MAX_CT);
740 attr->len = sizeof(fcs_hba_attr->max_ct_pyld);
741 memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, attr->len);
742 len += attr->len;
743 count++;
744 attr->len =
745 bfa_os_htons(attr->len + sizeof(attr->type) +
746 sizeof(attr->len));
747
748 /*
749 * Update size of payload
750 */
751 len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
752
753 rhba->hba_attr_blk.attr_count = bfa_os_htonl(count);
754 return len;
755}
756
757static void
758bfa_fcs_port_fdmi_rhba_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
759 void *cbarg, bfa_status_t req_status,
760 u32 rsp_len, u32 resid_len,
761 struct fchs_s *rsp_fchs)
762{
763 struct bfa_fcs_port_fdmi_s *fdmi = (struct bfa_fcs_port_fdmi_s *)cbarg;
764 struct bfa_fcs_port_s *port = fdmi->ms->port;
765 struct ct_hdr_s *cthdr = NULL;
766
767 bfa_trc(port->fcs, port->port_cfg.pwwn);
768
769 /*
770 * Sanity Checks
771 */
772 if (req_status != BFA_STATUS_OK) {
773 bfa_trc(port->fcs, req_status);
774 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
775 return;
776 }
777
778 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
779 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
780
781 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
782 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
783 return;
784 }
785
786 bfa_trc(port->fcs, cthdr->reason_code);
787 bfa_trc(port->fcs, cthdr->exp_code);
788 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
789}
790
791/**
792* RPRT : Register Port
793 */
794static void
795bfa_fcs_port_fdmi_send_rprt(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
796{
797 struct bfa_fcs_port_fdmi_s *fdmi = fdmi_cbarg;
798 struct bfa_fcs_port_s *port = fdmi->ms->port;
799 struct fchs_s fchs;
800 u16 len, attr_len;
801 struct bfa_fcxp_s *fcxp;
802 u8 *pyld;
803
804 bfa_trc(port->fcs, port->port_cfg.pwwn);
805
806 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
807 if (!fcxp) {
808 bfa_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
809 bfa_fcs_port_fdmi_send_rprt, fdmi);
810 return;
811 }
812 fdmi->fcxp = fcxp;
813
814 pyld = bfa_fcxp_get_reqbuf(fcxp);
815 bfa_os_memset(pyld, 0, FC_MAX_PDUSZ);
816
817 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_port_get_fcid(port),
818 FDMI_RPRT);
819
820 attr_len = bfa_fcs_port_fdmi_build_rprt_pyld(fdmi,
821 (u8 *) ((struct ct_hdr_s *) pyld + 1));
822
823 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
824 FC_CLASS_3, len + attr_len, &fchs,
825 bfa_fcs_port_fdmi_rprt_response, (void *)fdmi,
826 FC_MAX_PDUSZ, FC_FCCT_TOV);
827
828 bfa_sm_send_event(fdmi, FDMISM_EVENT_RPRT_SENT);
829}
830
831/**
832 * This routine builds Port Attribute Block that used in RPA, RPRT commands.
833 */
834static u16
835bfa_fcs_port_fdmi_build_portattr_block(struct bfa_fcs_port_fdmi_s *fdmi,
836 u8 *pyld)
837{
838 struct bfa_fcs_fdmi_port_attr_s fcs_port_attr;
839 struct fdmi_port_attr_s *port_attrib = (struct fdmi_port_attr_s *) pyld;
840 struct fdmi_attr_s *attr;
841 u8 *curr_ptr;
842 u16 len;
843 u8 count = 0;
844
845 /*
846 * get port attributes
847 */
848 bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr);
849
850 len = sizeof(port_attrib->attr_count);
851
852 /*
853 * fill out the invididual entries
854 */
855 curr_ptr = (u8 *) &port_attrib->port_attr;
856
857 /*
858 * FC4 Types
859 */
860 attr = (struct fdmi_attr_s *) curr_ptr;
861 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FC4_TYPES);
862 attr->len = sizeof(fcs_port_attr.supp_fc4_types);
863 memcpy(attr->value, fcs_port_attr.supp_fc4_types, attr->len);
864 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
865 len += attr->len;
866 ++count;
867 attr->len =
868 bfa_os_htons(attr->len + sizeof(attr->type) +
869 sizeof(attr->len));
870
871 /*
872 * Supported Speed
873 */
874 attr = (struct fdmi_attr_s *) curr_ptr;
875 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_SUPP_SPEED);
876 attr->len = sizeof(fcs_port_attr.supp_speed);
877 memcpy(attr->value, &fcs_port_attr.supp_speed, attr->len);
878 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
879 len += attr->len;
880 ++count;
881 attr->len =
882 bfa_os_htons(attr->len + sizeof(attr->type) +
883 sizeof(attr->len));
884
885 /*
886 * current Port Speed
887 */
888 attr = (struct fdmi_attr_s *) curr_ptr;
889 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_PORT_SPEED);
890 attr->len = sizeof(fcs_port_attr.curr_speed);
891 memcpy(attr->value, &fcs_port_attr.curr_speed, attr->len);
892 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
893 len += attr->len;
894 ++count;
895 attr->len =
896 bfa_os_htons(attr->len + sizeof(attr->type) +
897 sizeof(attr->len));
898
899 /*
900 * max frame size
901 */
902 attr = (struct fdmi_attr_s *) curr_ptr;
903 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FRAME_SIZE);
904 attr->len = sizeof(fcs_port_attr.max_frm_size);
905 memcpy(attr->value, &fcs_port_attr.max_frm_size, attr->len);
906 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
907 len += attr->len;
908 ++count;
909 attr->len =
910 bfa_os_htons(attr->len + sizeof(attr->type) +
911 sizeof(attr->len));
912
913 /*
914 * OS Device Name
915 */
916 if (fcs_port_attr.os_device_name[0] != '\0') {
917 attr = (struct fdmi_attr_s *) curr_ptr;
918 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_DEV_NAME);
919 attr->len = (u16) strlen(fcs_port_attr.os_device_name);
920 memcpy(attr->value, fcs_port_attr.os_device_name, attr->len);
921 /* variable fields need to be 4 byte aligned */
922 attr->len = fc_roundup(attr->len, sizeof(u32));
923 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
924 len += attr->len;
925 ++count;
926 attr->len =
927 bfa_os_htons(attr->len + sizeof(attr->type) +
928 sizeof(attr->len));
929
930 }
931 /*
932 * Host Name
933 */
934 if (fcs_port_attr.host_name[0] != '\0') {
935 attr = (struct fdmi_attr_s *) curr_ptr;
936 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_HOST_NAME);
937 attr->len = (u16) strlen(fcs_port_attr.host_name);
938 memcpy(attr->value, fcs_port_attr.host_name, attr->len);
939 /* variable fields need to be 4 byte aligned */
940 attr->len = fc_roundup(attr->len, sizeof(u32));
941 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
942 len += attr->len;
943 ++count;
944 attr->len =
945 bfa_os_htons(attr->len + sizeof(attr->type) +
946 sizeof(attr->len));
947
948 }
949
950 /*
951 * Update size of payload
952 */
953 port_attrib->attr_count = bfa_os_htonl(count);
954 len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
955 return len;
956}
957
958static u16
959bfa_fcs_port_fdmi_build_rprt_pyld(struct bfa_fcs_port_fdmi_s *fdmi,
960 u8 *pyld)
961{
962 struct bfa_fcs_port_s *port = fdmi->ms->port;
963 struct fdmi_rprt_s *rprt = (struct fdmi_rprt_s *) pyld;
964 u16 len;
965
966 rprt->hba_id = bfa_fcs_port_get_pwwn(bfa_fcs_get_base_port(port->fcs));
967 rprt->port_name = bfa_fcs_port_get_pwwn(port);
968
969 len = bfa_fcs_port_fdmi_build_portattr_block(fdmi,
970 (u8 *) &rprt->port_attr_blk);
971
972 len += sizeof(rprt->hba_id) + sizeof(rprt->port_name);
973
974 return len;
975}
976
977static void
978bfa_fcs_port_fdmi_rprt_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
979 void *cbarg, bfa_status_t req_status,
980 u32 rsp_len, u32 resid_len,
981 struct fchs_s *rsp_fchs)
982{
983 struct bfa_fcs_port_fdmi_s *fdmi = (struct bfa_fcs_port_fdmi_s *)cbarg;
984 struct bfa_fcs_port_s *port = fdmi->ms->port;
985 struct ct_hdr_s *cthdr = NULL;
986
987 bfa_trc(port->fcs, port->port_cfg.pwwn);
988
989 /*
990 * Sanity Checks
991 */
992 if (req_status != BFA_STATUS_OK) {
993 bfa_trc(port->fcs, req_status);
994 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
995 return;
996 }
997
998 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
999 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
1000
1001 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
1002 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
1003 return;
1004 }
1005
1006 bfa_trc(port->fcs, cthdr->reason_code);
1007 bfa_trc(port->fcs, cthdr->exp_code);
1008 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
1009}
1010
1011/**
1012* RPA : Register Port Attributes.
1013 */
1014static void
1015bfa_fcs_port_fdmi_send_rpa(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1016{
1017 struct bfa_fcs_port_fdmi_s *fdmi = fdmi_cbarg;
1018 struct bfa_fcs_port_s *port = fdmi->ms->port;
1019 struct fchs_s fchs;
1020 u16 len, attr_len;
1021 struct bfa_fcxp_s *fcxp;
1022 u8 *pyld;
1023
1024 bfa_trc(port->fcs, port->port_cfg.pwwn);
1025
1026 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
1027 if (!fcxp) {
1028 bfa_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
1029 bfa_fcs_port_fdmi_send_rpa, fdmi);
1030 return;
1031 }
1032 fdmi->fcxp = fcxp;
1033
1034 pyld = bfa_fcxp_get_reqbuf(fcxp);
1035 bfa_os_memset(pyld, 0, FC_MAX_PDUSZ);
1036
1037 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_port_get_fcid(port),
1038 FDMI_RPA);
1039
1040 attr_len = bfa_fcs_port_fdmi_build_rpa_pyld(fdmi,
1041 (u8 *) ((struct ct_hdr_s *) pyld + 1));
1042
1043 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1044 FC_CLASS_3, len + attr_len, &fchs,
1045 bfa_fcs_port_fdmi_rpa_response, (void *)fdmi,
1046 FC_MAX_PDUSZ, FC_FCCT_TOV);
1047
1048 bfa_sm_send_event(fdmi, FDMISM_EVENT_RPA_SENT);
1049}
1050
1051static u16
1052bfa_fcs_port_fdmi_build_rpa_pyld(struct bfa_fcs_port_fdmi_s *fdmi,
1053 u8 *pyld)
1054{
1055 struct bfa_fcs_port_s *port = fdmi->ms->port;
1056 struct fdmi_rpa_s *rpa = (struct fdmi_rpa_s *) pyld;
1057 u16 len;
1058
1059 rpa->port_name = bfa_fcs_port_get_pwwn(port);
1060
1061 len = bfa_fcs_port_fdmi_build_portattr_block(fdmi,
1062 (u8 *) &rpa->port_attr_blk);
1063
1064 len += sizeof(rpa->port_name);
1065
1066 return len;
1067}
1068
1069static void
1070bfa_fcs_port_fdmi_rpa_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
1071 void *cbarg, bfa_status_t req_status,
1072 u32 rsp_len, u32 resid_len,
1073 struct fchs_s *rsp_fchs)
1074{
1075 struct bfa_fcs_port_fdmi_s *fdmi = (struct bfa_fcs_port_fdmi_s *)cbarg;
1076 struct bfa_fcs_port_s *port = fdmi->ms->port;
1077 struct ct_hdr_s *cthdr = NULL;
1078
1079 bfa_trc(port->fcs, port->port_cfg.pwwn);
1080
1081 /*
1082 * Sanity Checks
1083 */
1084 if (req_status != BFA_STATUS_OK) {
1085 bfa_trc(port->fcs, req_status);
1086 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
1087 return;
1088 }
1089
1090 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
1091 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
1092
1093 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
1094 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
1095 return;
1096 }
1097
1098 bfa_trc(port->fcs, cthdr->reason_code);
1099 bfa_trc(port->fcs, cthdr->exp_code);
1100 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
1101}
1102
1103static void
1104bfa_fcs_port_fdmi_timeout(void *arg)
1105{
1106 struct bfa_fcs_port_fdmi_s *fdmi = (struct bfa_fcs_port_fdmi_s *)arg;
1107
1108 bfa_sm_send_event(fdmi, FDMISM_EVENT_TIMEOUT);
1109}
1110
1111static void
1112bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi,
1113 struct bfa_fcs_fdmi_hba_attr_s *hba_attr)
1114{
1115 struct bfa_fcs_port_s *port = fdmi->ms->port;
1116 struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info;
1117
1118 bfa_os_memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s));
1119
1120 bfa_ioc_get_adapter_manufacturer(&port->fcs->bfa->ioc,
1121 hba_attr->manufacturer);
1122 bfa_ioc_get_adapter_serial_num(&port->fcs->bfa->ioc,
1123 hba_attr->serial_num);
1124 bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc, hba_attr->model);
1125 bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc, hba_attr->model_desc);
1126 bfa_ioc_get_pci_chip_rev(&port->fcs->bfa->ioc, hba_attr->hw_version);
1127 bfa_ioc_get_adapter_optrom_ver(&port->fcs->bfa->ioc,
1128 hba_attr->option_rom_ver);
1129 bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc, hba_attr->fw_version);
1130
1131 strncpy(hba_attr->driver_version, (char *)driver_info->version,
1132 sizeof(hba_attr->driver_version));
1133
1134 strncpy(hba_attr->os_name, driver_info->host_os_name,
1135 sizeof(hba_attr->os_name));
1136
1137 /*
1138 * If there is a patch level, append it to the os name along with a
1139 * separator
1140 */
1141 if (driver_info->host_os_patch[0] != '\0') {
1142 strncat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
1143 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
1144 strncat(hba_attr->os_name, driver_info->host_os_patch,
1145 sizeof(driver_info->host_os_patch));
1146 }
1147
1148 hba_attr->max_ct_pyld = bfa_os_htonl(FC_MAX_PDUSZ);
1149
1150}
1151
1152static void
1153bfa_fcs_fdmi_get_portattr(struct bfa_fcs_port_fdmi_s *fdmi,
1154 struct bfa_fcs_fdmi_port_attr_s *port_attr)
1155{
1156 struct bfa_fcs_port_s *port = fdmi->ms->port;
1157 struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info;
1158 struct bfa_pport_attr_s pport_attr;
1159
1160 bfa_os_memset(port_attr, 0, sizeof(struct bfa_fcs_fdmi_port_attr_s));
1161
1162 /*
1163 * get pport attributes from hal
1164 */
1165 bfa_fcport_get_attr(port->fcs->bfa, &pport_attr);
1166
1167 /*
1168 * get FC4 type Bitmask
1169 */
1170 fc_get_fc4type_bitmask(FC_TYPE_FCP, port_attr->supp_fc4_types);
1171
1172 /*
1173 * Supported Speeds
1174 */
1175 port_attr->supp_speed = bfa_os_htonl(BFA_FCS_FDMI_SUPORTED_SPEEDS);
1176
1177 /*
1178 * Current Speed
1179 */
1180 port_attr->curr_speed = bfa_os_htonl(pport_attr.speed);
1181
1182 /*
1183 * Max PDU Size.
1184 */
1185 port_attr->max_frm_size = bfa_os_htonl(FC_MAX_PDUSZ);
1186
1187 /*
1188 * OS device Name
1189 */
1190 strncpy(port_attr->os_device_name, (char *)driver_info->os_device_name,
1191 sizeof(port_attr->os_device_name));
1192
1193 /*
1194 * Host name
1195 */
1196 strncpy(port_attr->host_name, (char *)driver_info->host_machine_name,
1197 sizeof(port_attr->host_name));
1198
1199}
1200
1201
1202void
1203bfa_fcs_port_fdmi_init(struct bfa_fcs_port_ms_s *ms)
1204{
1205 struct bfa_fcs_port_fdmi_s *fdmi = &ms->fdmi;
1206
1207 fdmi->ms = ms;
1208 if (ms->port->fcs->fdmi_enabled)
1209 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
1210 else
1211 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_disabled);
1212}
1213
1214void
1215bfa_fcs_port_fdmi_offline(struct bfa_fcs_port_ms_s *ms)
1216{
1217 struct bfa_fcs_port_fdmi_s *fdmi = &ms->fdmi;
1218
1219 fdmi->ms = ms;
1220 bfa_sm_send_event(fdmi, FDMISM_EVENT_PORT_OFFLINE);
1221}
1222
1223void
1224bfa_fcs_port_fdmi_online(struct bfa_fcs_port_ms_s *ms)
1225{
1226 struct bfa_fcs_port_fdmi_s *fdmi = &ms->fdmi;
1227
1228 fdmi->ms = ms;
1229 bfa_sm_send_event(fdmi, FDMISM_EVENT_PORT_ONLINE);
1230}
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen.h b/drivers/scsi/bfa/include/aen/bfa_aen.h
deleted file mode 100644
index 6abbab005db6..000000000000
--- a/drivers/scsi/bfa/include/aen/bfa_aen.h
+++ /dev/null
@@ -1,96 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_AEN_H__
18#define __BFA_AEN_H__
19
20#include "defs/bfa_defs_aen.h"
21#include "defs/bfa_defs_status.h"
22#include "cs/bfa_debug.h"
23
24#define BFA_AEN_MAX_ENTRY 512
25
26extern int bfa_aen_max_cfg_entry;
27struct bfa_aen_s {
28 void *bfad;
29 int max_entry;
30 int write_index;
31 int read_index;
32 int bfad_num;
33 int seq_num;
34 void (*aen_cb_notify)(void *bfad);
35 void (*gettimeofday)(struct bfa_timeval_s *tv);
36 struct bfa_trc_mod_s *trcmod;
37 int app_ri[BFA_AEN_MAX_APP]; /* For multiclient support */
38 struct bfa_aen_entry_s list[BFA_AEN_MAX_ENTRY]; /* Must be the last */
39};
40
41
42/**
43 * Public APIs
44 */
45static inline void
46bfa_aen_set_max_cfg_entry(int max_entry)
47{
48 bfa_aen_max_cfg_entry = max_entry;
49}
50
51static inline int
52bfa_aen_get_max_cfg_entry(void)
53{
54 return bfa_aen_max_cfg_entry;
55}
56
57static inline int
58bfa_aen_get_meminfo(void)
59{
60 return sizeof(struct bfa_aen_entry_s) * bfa_aen_get_max_cfg_entry();
61}
62
63static inline int
64bfa_aen_get_wi(struct bfa_aen_s *aen)
65{
66 return aen->write_index;
67}
68
69static inline int
70bfa_aen_get_ri(struct bfa_aen_s *aen)
71{
72 return aen->read_index;
73}
74
75static inline int
76bfa_aen_fetch_count(struct bfa_aen_s *aen, enum bfa_aen_app app_id)
77{
78 bfa_assert((app_id < BFA_AEN_MAX_APP) && (app_id >= bfa_aen_app_bcu));
79 return ((aen->write_index + aen->max_entry) - aen->app_ri[app_id])
80 % aen->max_entry;
81}
82
83int bfa_aen_init(struct bfa_aen_s *aen, struct bfa_trc_mod_s *trcmod,
84 void *bfad, int bfad_num, void (*aen_cb_notify)(void *),
85 void (*gettimeofday)(struct bfa_timeval_s *));
86
87void bfa_aen_post(struct bfa_aen_s *aen, enum bfa_aen_category aen_category,
88 int aen_type, union bfa_aen_data_u *aen_data);
89
90bfa_status_t bfa_aen_fetch(struct bfa_aen_s *aen,
91 struct bfa_aen_entry_s *aen_entry,
92 int entry_req, enum bfa_aen_app app_id, int *entry_ret);
93
94int bfa_aen_get_inst(struct bfa_aen_s *aen);
95
96#endif /* __BFA_AEN_H__ */
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_adapter.h b/drivers/scsi/bfa/include/aen/bfa_aen_adapter.h
deleted file mode 100644
index 260d3ea1cab3..000000000000
--- a/drivers/scsi/bfa/include/aen/bfa_aen_adapter.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for BFA_AEN_CAT_ADAPTER Module */
19#ifndef __bfa_aen_adapter_h__
20#define __bfa_aen_adapter_h__
21
22#include <cs/bfa_log.h>
23#include <defs/bfa_defs_aen.h>
24
25#define BFA_AEN_ADAPTER_ADD \
26 BFA_LOG_CREATE_ID(BFA_AEN_CAT_ADAPTER, BFA_ADAPTER_AEN_ADD)
27#define BFA_AEN_ADAPTER_REMOVE \
28 BFA_LOG_CREATE_ID(BFA_AEN_CAT_ADAPTER, BFA_ADAPTER_AEN_REMOVE)
29
30#endif
31
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_audit.h b/drivers/scsi/bfa/include/aen/bfa_aen_audit.h
deleted file mode 100644
index 12cd7aab5d53..000000000000
--- a/drivers/scsi/bfa/include/aen/bfa_aen_audit.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for BFA_AEN_CAT_AUDIT Module */
19#ifndef __bfa_aen_audit_h__
20#define __bfa_aen_audit_h__
21
22#include <cs/bfa_log.h>
23#include <defs/bfa_defs_aen.h>
24
25#define BFA_AEN_AUDIT_AUTH_ENABLE \
26 BFA_LOG_CREATE_ID(BFA_AEN_CAT_AUDIT, BFA_AUDIT_AEN_AUTH_ENABLE)
27#define BFA_AEN_AUDIT_AUTH_DISABLE \
28 BFA_LOG_CREATE_ID(BFA_AEN_CAT_AUDIT, BFA_AUDIT_AEN_AUTH_DISABLE)
29
30#endif
31
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_ethport.h b/drivers/scsi/bfa/include/aen/bfa_aen_ethport.h
deleted file mode 100644
index 507d0b58d149..000000000000
--- a/drivers/scsi/bfa/include/aen/bfa_aen_ethport.h
+++ /dev/null
@@ -1,35 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for BFA_AEN_CAT_ETHPORT Module */
19#ifndef __bfa_aen_ethport_h__
20#define __bfa_aen_ethport_h__
21
22#include <cs/bfa_log.h>
23#include <defs/bfa_defs_aen.h>
24
25#define BFA_AEN_ETHPORT_LINKUP \
26 BFA_LOG_CREATE_ID(BFA_AEN_CAT_ETHPORT, BFA_ETHPORT_AEN_LINKUP)
27#define BFA_AEN_ETHPORT_LINKDOWN \
28 BFA_LOG_CREATE_ID(BFA_AEN_CAT_ETHPORT, BFA_ETHPORT_AEN_LINKDOWN)
29#define BFA_AEN_ETHPORT_ENABLE \
30 BFA_LOG_CREATE_ID(BFA_AEN_CAT_ETHPORT, BFA_ETHPORT_AEN_ENABLE)
31#define BFA_AEN_ETHPORT_DISABLE \
32 BFA_LOG_CREATE_ID(BFA_AEN_CAT_ETHPORT, BFA_ETHPORT_AEN_DISABLE)
33
34#endif
35
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_ioc.h b/drivers/scsi/bfa/include/aen/bfa_aen_ioc.h
deleted file mode 100644
index 4daf96faa266..000000000000
--- a/drivers/scsi/bfa/include/aen/bfa_aen_ioc.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for BFA_AEN_CAT_IOC Module */
19#ifndef __bfa_aen_ioc_h__
20#define __bfa_aen_ioc_h__
21
22#include <cs/bfa_log.h>
23#include <defs/bfa_defs_aen.h>
24
25#define BFA_AEN_IOC_HBGOOD \
26 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_HBGOOD)
27#define BFA_AEN_IOC_HBFAIL \
28 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_HBFAIL)
29#define BFA_AEN_IOC_ENABLE \
30 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_ENABLE)
31#define BFA_AEN_IOC_DISABLE \
32 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_DISABLE)
33#define BFA_AEN_IOC_FWMISMATCH \
34 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_FWMISMATCH)
35#define BFA_AEN_IOC_FWCFG_ERROR \
36 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_FWCFG_ERROR)
37#define BFA_AEN_IOC_INVALID_VENDOR \
38 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_INVALID_VENDOR)
39#define BFA_AEN_IOC_INVALID_NWWN \
40 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_INVALID_NWWN)
41#define BFA_AEN_IOC_INVALID_PWWN \
42 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_INVALID_PWWN)
43
44#endif
45
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_itnim.h b/drivers/scsi/bfa/include/aen/bfa_aen_itnim.h
deleted file mode 100644
index a7d8ddcfef99..000000000000
--- a/drivers/scsi/bfa/include/aen/bfa_aen_itnim.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for BFA_AEN_CAT_ITNIM Module */
19#ifndef __bfa_aen_itnim_h__
20#define __bfa_aen_itnim_h__
21
22#include <cs/bfa_log.h>
23#include <defs/bfa_defs_aen.h>
24
25#define BFA_AEN_ITNIM_ONLINE \
26 BFA_LOG_CREATE_ID(BFA_AEN_CAT_ITNIM, BFA_ITNIM_AEN_ONLINE)
27#define BFA_AEN_ITNIM_OFFLINE \
28 BFA_LOG_CREATE_ID(BFA_AEN_CAT_ITNIM, BFA_ITNIM_AEN_OFFLINE)
29#define BFA_AEN_ITNIM_DISCONNECT \
30 BFA_LOG_CREATE_ID(BFA_AEN_CAT_ITNIM, BFA_ITNIM_AEN_DISCONNECT)
31
32#endif
33
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_lport.h b/drivers/scsi/bfa/include/aen/bfa_aen_lport.h
deleted file mode 100644
index 5a8ebb65193f..000000000000
--- a/drivers/scsi/bfa/include/aen/bfa_aen_lport.h
+++ /dev/null
@@ -1,51 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for BFA_AEN_CAT_LPORT Module */
19#ifndef __bfa_aen_lport_h__
20#define __bfa_aen_lport_h__
21
22#include <cs/bfa_log.h>
23#include <defs/bfa_defs_aen.h>
24
25#define BFA_AEN_LPORT_NEW \
26 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_NEW)
27#define BFA_AEN_LPORT_DELETE \
28 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_DELETE)
29#define BFA_AEN_LPORT_ONLINE \
30 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_ONLINE)
31#define BFA_AEN_LPORT_OFFLINE \
32 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_OFFLINE)
33#define BFA_AEN_LPORT_DISCONNECT \
34 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_DISCONNECT)
35#define BFA_AEN_LPORT_NEW_PROP \
36 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_NEW_PROP)
37#define BFA_AEN_LPORT_DELETE_PROP \
38 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_DELETE_PROP)
39#define BFA_AEN_LPORT_NEW_STANDARD \
40 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_NEW_STANDARD)
41#define BFA_AEN_LPORT_DELETE_STANDARD \
42 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_DELETE_STANDARD)
43#define BFA_AEN_LPORT_NPIV_DUP_WWN \
44 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_NPIV_DUP_WWN)
45#define BFA_AEN_LPORT_NPIV_FABRIC_MAX \
46 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_NPIV_FABRIC_MAX)
47#define BFA_AEN_LPORT_NPIV_UNKNOWN \
48 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_NPIV_UNKNOWN)
49
50#endif
51
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_port.h b/drivers/scsi/bfa/include/aen/bfa_aen_port.h
deleted file mode 100644
index 9add905a622d..000000000000
--- a/drivers/scsi/bfa/include/aen/bfa_aen_port.h
+++ /dev/null
@@ -1,57 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for BFA_AEN_CAT_PORT Module */
19#ifndef __bfa_aen_port_h__
20#define __bfa_aen_port_h__
21
22#include <cs/bfa_log.h>
23#include <defs/bfa_defs_aen.h>
24
25#define BFA_AEN_PORT_ONLINE \
26 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_ONLINE)
27#define BFA_AEN_PORT_OFFLINE \
28 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_OFFLINE)
29#define BFA_AEN_PORT_RLIR \
30 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_RLIR)
31#define BFA_AEN_PORT_SFP_INSERT \
32 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_SFP_INSERT)
33#define BFA_AEN_PORT_SFP_REMOVE \
34 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_SFP_REMOVE)
35#define BFA_AEN_PORT_SFP_POM \
36 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_SFP_POM)
37#define BFA_AEN_PORT_ENABLE \
38 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_ENABLE)
39#define BFA_AEN_PORT_DISABLE \
40 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_DISABLE)
41#define BFA_AEN_PORT_AUTH_ON \
42 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_AUTH_ON)
43#define BFA_AEN_PORT_AUTH_OFF \
44 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_AUTH_OFF)
45#define BFA_AEN_PORT_DISCONNECT \
46 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_DISCONNECT)
47#define BFA_AEN_PORT_QOS_NEG \
48 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_QOS_NEG)
49#define BFA_AEN_PORT_FABRIC_NAME_CHANGE \
50 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_FABRIC_NAME_CHANGE)
51#define BFA_AEN_PORT_SFP_ACCESS_ERROR \
52 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_SFP_ACCESS_ERROR)
53#define BFA_AEN_PORT_SFP_UNSUPPORT \
54 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_SFP_UNSUPPORT)
55
56#endif
57
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_rport.h b/drivers/scsi/bfa/include/aen/bfa_aen_rport.h
deleted file mode 100644
index 7e4be1fd5e15..000000000000
--- a/drivers/scsi/bfa/include/aen/bfa_aen_rport.h
+++ /dev/null
@@ -1,37 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for BFA_AEN_CAT_RPORT Module */
19#ifndef __bfa_aen_rport_h__
20#define __bfa_aen_rport_h__
21
22#include <cs/bfa_log.h>
23#include <defs/bfa_defs_aen.h>
24
25#define BFA_AEN_RPORT_ONLINE \
26 BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, BFA_RPORT_AEN_ONLINE)
27#define BFA_AEN_RPORT_OFFLINE \
28 BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, BFA_RPORT_AEN_OFFLINE)
29#define BFA_AEN_RPORT_DISCONNECT \
30 BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, BFA_RPORT_AEN_DISCONNECT)
31#define BFA_AEN_RPORT_QOS_PRIO \
32 BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, BFA_RPORT_AEN_QOS_PRIO)
33#define BFA_AEN_RPORT_QOS_FLOWID \
34 BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, BFA_RPORT_AEN_QOS_FLOWID)
35
36#endif
37
diff --git a/drivers/scsi/bfa/include/bfa.h b/drivers/scsi/bfa/include/bfa.h
deleted file mode 100644
index d52b32f5695c..000000000000
--- a/drivers/scsi/bfa/include/bfa.h
+++ /dev/null
@@ -1,203 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_H__
18#define __BFA_H__
19
20#include <bfa_os_inc.h>
21#include <cs/bfa_debug.h>
22#include <cs/bfa_q.h>
23#include <cs/bfa_trc.h>
24#include <cs/bfa_log.h>
25#include <cs/bfa_plog.h>
26#include <defs/bfa_defs_status.h>
27#include <defs/bfa_defs_ioc.h>
28#include <defs/bfa_defs_iocfc.h>
29#include <aen/bfa_aen.h>
30#include <bfi/bfi.h>
31
32struct bfa_s;
33#include <bfa_intr_priv.h>
34
35struct bfa_pcidev_s;
36
37/**
38 * PCI devices supported by the current BFA
39 */
40struct bfa_pciid_s {
41 u16 device_id;
42 u16 vendor_id;
43};
44
45extern char bfa_version[];
46
47/**
48 * BFA Power Mgmt Commands
49 */
50enum bfa_pm_cmd {
51 BFA_PM_CTL_D0 = 0,
52 BFA_PM_CTL_D1 = 1,
53 BFA_PM_CTL_D2 = 2,
54 BFA_PM_CTL_D3 = 3,
55};
56
57/**
58 * BFA memory resources
59 */
60enum bfa_mem_type {
61 BFA_MEM_TYPE_KVA = 1, /*! Kernel Virtual Memory *(non-dma-able) */
62 BFA_MEM_TYPE_DMA = 2, /*! DMA-able memory */
63 BFA_MEM_TYPE_MAX = BFA_MEM_TYPE_DMA,
64};
65
66struct bfa_mem_elem_s {
67 enum bfa_mem_type mem_type; /* see enum bfa_mem_type */
68 u32 mem_len; /* Total Length in Bytes */
69 u8 *kva; /* kernel virtual address */
70 u64 dma; /* dma address if DMA memory */
71 u8 *kva_curp; /* kva allocation cursor */
72 u64 dma_curp; /* dma allocation cursor */
73};
74
75struct bfa_meminfo_s {
76 struct bfa_mem_elem_s meminfo[BFA_MEM_TYPE_MAX];
77};
78#define bfa_meminfo_kva(_m) \
79 ((_m)->meminfo[BFA_MEM_TYPE_KVA - 1].kva_curp)
80#define bfa_meminfo_dma_virt(_m) \
81 ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].kva_curp)
82#define bfa_meminfo_dma_phys(_m) \
83 ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp)
84
85/**
86 * Generic Scatter Gather Element used by driver
87 */
88struct bfa_sge_s {
89 u32 sg_len;
90 void *sg_addr;
91};
92
93#define bfa_sge_to_be(__sge) do { \
94 ((u32 *)(__sge))[0] = bfa_os_htonl(((u32 *)(__sge))[0]); \
95 ((u32 *)(__sge))[1] = bfa_os_htonl(((u32 *)(__sge))[1]); \
96 ((u32 *)(__sge))[2] = bfa_os_htonl(((u32 *)(__sge))[2]); \
97} while (0)
98
99
100/*
101 * bfa stats interfaces
102 */
103#define bfa_stats(_mod, _stats) ((_mod)->stats._stats++)
104
105#define bfa_ioc_get_stats(__bfa, __ioc_stats) \
106 bfa_ioc_fetch_stats(&(__bfa)->ioc, __ioc_stats)
107#define bfa_ioc_clear_stats(__bfa) \
108 bfa_ioc_clr_stats(&(__bfa)->ioc)
109#define bfa_get_nports(__bfa) \
110 bfa_ioc_get_nports(&(__bfa)->ioc)
111#define bfa_get_adapter_manufacturer(__bfa, __manufacturer) \
112 bfa_ioc_get_adapter_manufacturer(&(__bfa)->ioc, __manufacturer)
113#define bfa_get_adapter_model(__bfa, __model) \
114 bfa_ioc_get_adapter_model(&(__bfa)->ioc, __model)
115#define bfa_get_adapter_serial_num(__bfa, __serial_num) \
116 bfa_ioc_get_adapter_serial_num(&(__bfa)->ioc, __serial_num)
117#define bfa_get_adapter_fw_ver(__bfa, __fw_ver) \
118 bfa_ioc_get_adapter_fw_ver(&(__bfa)->ioc, __fw_ver)
119#define bfa_get_adapter_optrom_ver(__bfa, __optrom_ver) \
120 bfa_ioc_get_adapter_optrom_ver(&(__bfa)->ioc, __optrom_ver)
121#define bfa_get_pci_chip_rev(__bfa, __chip_rev) \
122 bfa_ioc_get_pci_chip_rev(&(__bfa)->ioc, __chip_rev)
123#define bfa_get_ioc_state(__bfa) \
124 bfa_ioc_get_state(&(__bfa)->ioc)
125#define bfa_get_type(__bfa) \
126 bfa_ioc_get_type(&(__bfa)->ioc)
127#define bfa_get_mac(__bfa) \
128 bfa_ioc_get_mac(&(__bfa)->ioc)
129#define bfa_get_mfg_mac(__bfa) \
130 bfa_ioc_get_mfg_mac(&(__bfa)->ioc)
131#define bfa_get_fw_clock_res(__bfa) \
132 ((__bfa)->iocfc.cfgrsp->fwcfg.fw_tick_res)
133
134/*
135 * bfa API functions
136 */
137void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids);
138void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg);
139void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg);
140void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg,
141 struct bfa_meminfo_s *meminfo);
142void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
143 struct bfa_meminfo_s *meminfo,
144 struct bfa_pcidev_s *pcidev);
145void bfa_init_trc(struct bfa_s *bfa, struct bfa_trc_mod_s *trcmod);
146void bfa_init_log(struct bfa_s *bfa, struct bfa_log_mod_s *logmod);
147void bfa_init_aen(struct bfa_s *bfa, struct bfa_aen_s *aen);
148void bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog);
149void bfa_detach(struct bfa_s *bfa);
150void bfa_init(struct bfa_s *bfa);
151void bfa_start(struct bfa_s *bfa);
152void bfa_stop(struct bfa_s *bfa);
153void bfa_attach_fcs(struct bfa_s *bfa);
154void bfa_cb_init(void *bfad, bfa_status_t status);
155void bfa_cb_stop(void *bfad, bfa_status_t status);
156void bfa_cb_updateq(void *bfad, bfa_status_t status);
157
158bfa_boolean_t bfa_intx(struct bfa_s *bfa);
159void bfa_isr_enable(struct bfa_s *bfa);
160void bfa_isr_disable(struct bfa_s *bfa);
161void bfa_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
162 u32 *num_vecs, u32 *max_vec_bit);
163#define bfa_msix(__bfa, __vec) ((__bfa)->msix.handler[__vec](__bfa, __vec))
164
165void bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q);
166void bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q);
167void bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q);
168
169typedef void (*bfa_cb_ioc_t) (void *cbarg, enum bfa_status status);
170void bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr);
171bfa_status_t bfa_iocfc_get_stats(struct bfa_s *bfa,
172 struct bfa_iocfc_stats_s *stats,
173 bfa_cb_ioc_t cbfn, void *cbarg);
174bfa_status_t bfa_iocfc_clear_stats(struct bfa_s *bfa,
175 bfa_cb_ioc_t cbfn, void *cbarg);
176void bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr);
177
178void bfa_adapter_get_attr(struct bfa_s *bfa,
179 struct bfa_adapter_attr_s *ad_attr);
180u64 bfa_adapter_get_id(struct bfa_s *bfa);
181
182bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
183 struct bfa_iocfc_intr_attr_s *attr);
184
185void bfa_iocfc_enable(struct bfa_s *bfa);
186void bfa_iocfc_disable(struct bfa_s *bfa);
187void bfa_ioc_auto_recover(bfa_boolean_t auto_recover);
188void bfa_chip_reset(struct bfa_s *bfa);
189void bfa_cb_ioc_disable(void *bfad);
190void bfa_timer_tick(struct bfa_s *bfa);
191#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \
192 bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout)
193
194/*
195 * BFA debug API functions
196 */
197bfa_status_t bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen);
198bfa_status_t bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen);
199void bfa_debug_fwsave_clear(struct bfa_s *bfa);
200
201#include "bfa_priv.h"
202
203#endif /* __BFA_H__ */
diff --git a/drivers/scsi/bfa/include/bfa_fcpim.h b/drivers/scsi/bfa/include/bfa_fcpim.h
deleted file mode 100644
index 4bc9453081df..000000000000
--- a/drivers/scsi/bfa/include/bfa_fcpim.h
+++ /dev/null
@@ -1,177 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCPIM_H__
19#define __BFA_FCPIM_H__
20
21#include <bfa.h>
22#include <bfa_svc.h>
23#include <bfi/bfi_fcpim.h>
24#include <defs/bfa_defs_fcpim.h>
25
26/*
27 * forward declarations
28 */
29struct bfa_itnim_s;
30struct bfa_ioim_s;
31struct bfa_tskim_s;
32struct bfad_ioim_s;
33struct bfad_tskim_s;
34
35/*
36 * bfa fcpim module API functions
37 */
38void bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov);
39u16 bfa_fcpim_path_tov_get(struct bfa_s *bfa);
40void bfa_fcpim_qdepth_set(struct bfa_s *bfa, u16 q_depth);
41u16 bfa_fcpim_qdepth_get(struct bfa_s *bfa);
42bfa_status_t bfa_fcpim_get_modstats(struct bfa_s *bfa,
43 struct bfa_fcpim_stats_s *modstats);
44bfa_status_t bfa_fcpim_clr_modstats(struct bfa_s *bfa);
45void bfa_fcpim_set_ioredirect(struct bfa_s *bfa, bfa_boolean_t state);
46void bfa_fcpim_update_ioredirect(struct bfa_s *bfa);
47void bfa_cb_ioredirect_state_change(void *hcb_bfad, bfa_boolean_t ioredirect);
48
49#define bfa_fcpim_ioredirect_enabled(__bfa) \
50 (((struct bfa_fcpim_mod_s *)(BFA_FCPIM_MOD(__bfa)))->ioredirect)
51
52#define bfa_fcpim_get_next_reqq(__bfa, __qid) \
53{ \
54 struct bfa_fcpim_mod_s *__fcpim = BFA_FCPIM_MOD(__bfa); \
55 __fcpim->reqq++; \
56 __fcpim->reqq &= (BFI_IOC_MAX_CQS - 1); \
57 *(__qid) = __fcpim->reqq; \
58}
59
60#define bfa_iocfc_map_msg_to_qid(__msg, __qid) \
61 *(__qid) = (u8)((__msg) & (BFI_IOC_MAX_CQS - 1));
62
63
64/*
65 * bfa itnim API functions
66 */
67struct bfa_itnim_s *bfa_itnim_create(struct bfa_s *bfa,
68 struct bfa_rport_s *rport, void *itnim);
69void bfa_itnim_delete(struct bfa_itnim_s *itnim);
70void bfa_itnim_online(struct bfa_itnim_s *itnim,
71 bfa_boolean_t seq_rec);
72void bfa_itnim_offline(struct bfa_itnim_s *itnim);
73void bfa_itnim_get_stats(struct bfa_itnim_s *itnim,
74 struct bfa_itnim_hal_stats_s *stats);
75void bfa_itnim_clear_stats(struct bfa_itnim_s *itnim);
76
77#define bfa_itnim_get_reqq(__ioim) (((struct bfa_ioim_s *)__ioim)->itnim->reqq)
78
79/**
80 * BFA completion callback for bfa_itnim_online().
81 *
82 * @param[in] itnim FCS or driver itnim instance
83 *
84 * return None
85 */
86void bfa_cb_itnim_online(void *itnim);
87
88/**
89 * BFA completion callback for bfa_itnim_offline().
90 *
91 * @param[in] itnim FCS or driver itnim instance
92 *
93 * return None
94 */
95void bfa_cb_itnim_offline(void *itnim);
96void bfa_cb_itnim_tov_begin(void *itnim);
97void bfa_cb_itnim_tov(void *itnim);
98
99/**
100 * BFA notification to FCS/driver for second level error recovery.
101 *
102 * Atleast one I/O request has timedout and target is unresponsive to
103 * repeated abort requests. Second level error recovery should be initiated
104 * by starting implicit logout and recovery procedures.
105 *
106 * @param[in] itnim FCS or driver itnim instance
107 *
108 * return None
109 */
110void bfa_cb_itnim_sler(void *itnim);
111
112/*
113 * bfa ioim API functions
114 */
115struct bfa_ioim_s *bfa_ioim_alloc(struct bfa_s *bfa,
116 struct bfad_ioim_s *dio,
117 struct bfa_itnim_s *itnim,
118 u16 nsgles);
119
120void bfa_ioim_free(struct bfa_ioim_s *ioim);
121void bfa_ioim_start(struct bfa_ioim_s *ioim);
122void bfa_ioim_abort(struct bfa_ioim_s *ioim);
123void bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim,
124 bfa_boolean_t iotov);
125
126
127/**
128 * I/O completion notification.
129 *
130 * @param[in] dio driver IO structure
131 * @param[in] io_status IO completion status
132 * @param[in] scsi_status SCSI status returned by target
133 * @param[in] sns_len SCSI sense length, 0 if none
134 * @param[in] sns_info SCSI sense data, if any
135 * @param[in] residue Residual length
136 *
137 * @return None
138 */
139void bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio,
140 enum bfi_ioim_status io_status,
141 u8 scsi_status, int sns_len,
142 u8 *sns_info, s32 residue);
143
144/**
145 * I/O good completion notification.
146 *
147 * @param[in] dio driver IO structure
148 *
149 * @return None
150 */
151void bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio);
152
153/**
154 * I/O abort completion notification
155 *
156 * @param[in] dio driver IO that was aborted
157 *
158 * @return None
159 */
160void bfa_cb_ioim_abort(void *bfad, struct bfad_ioim_s *dio);
161void bfa_cb_ioim_resfree(void *hcb_bfad);
162
163void bfa_cb_ioim_resfree(void *hcb_bfad);
164
165/*
166 * bfa tskim API functions
167 */
168struct bfa_tskim_s *bfa_tskim_alloc(struct bfa_s *bfa,
169 struct bfad_tskim_s *dtsk);
170void bfa_tskim_free(struct bfa_tskim_s *tskim);
171void bfa_tskim_start(struct bfa_tskim_s *tskim,
172 struct bfa_itnim_s *itnim, lun_t lun,
173 enum fcp_tm_cmnd tm, u8 t_secs);
174void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
175 enum bfi_tskim_status tsk_status);
176
177#endif /* __BFA_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/include/bfa_fcptm.h b/drivers/scsi/bfa/include/bfa_fcptm.h
deleted file mode 100644
index 5f5ffe0bb1bb..000000000000
--- a/drivers/scsi/bfa/include/bfa_fcptm.h
+++ /dev/null
@@ -1,47 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCPTM_H__
19#define __BFA_FCPTM_H__
20
21#include <bfa.h>
22#include <bfa_svc.h>
23#include <bfi/bfi_fcptm.h>
24
25/*
26 * forward declarations
27 */
28struct bfa_tin_s;
29struct bfa_iotm_s;
30struct bfa_tsktm_s;
31
32/*
33 * bfa fcptm module API functions
34 */
35void bfa_fcptm_path_tov_set(struct bfa_s *bfa, u16 path_tov);
36u16 bfa_fcptm_path_tov_get(struct bfa_s *bfa);
37void bfa_fcptm_qdepth_set(struct bfa_s *bfa, u16 q_depth);
38u16 bfa_fcptm_qdepth_get(struct bfa_s *bfa);
39
40/*
41 * bfa tin API functions
42 */
43void bfa_tin_get_stats(struct bfa_tin_s *tin, struct bfa_tin_stats_s *stats);
44void bfa_tin_clear_stats(struct bfa_tin_s *tin);
45
46#endif /* __BFA_FCPTM_H__ */
47
diff --git a/drivers/scsi/bfa/include/bfa_svc.h b/drivers/scsi/bfa/include/bfa_svc.h
deleted file mode 100644
index 7840943d73b0..000000000000
--- a/drivers/scsi/bfa/include/bfa_svc.h
+++ /dev/null
@@ -1,338 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_SVC_H__
18#define __BFA_SVC_H__
19
20/*
21 * forward declarations
22 */
23struct bfa_fcxp_s;
24
25#include <defs/bfa_defs_status.h>
26#include <defs/bfa_defs_pport.h>
27#include <defs/bfa_defs_rport.h>
28#include <defs/bfa_defs_qos.h>
29#include <defs/bfa_defs_fcport.h>
30#include <cs/bfa_sm.h>
31#include <bfa.h>
32
33/**
34 * BFA rport information.
35 */
36struct bfa_rport_info_s {
37 u16 max_frmsz; /* max rcv pdu size */
38 u32 pid:24, /* remote port ID */
39 lp_tag:8; /* tag */
40 u32 local_pid:24, /* local port ID */
41 cisc:8; /* CIRO supported */
42 u8 fc_class; /* supported FC classes. enum fc_cos */
43 u8 vf_en; /* virtual fabric enable */
44 u16 vf_id; /* virtual fabric ID */
45 enum bfa_pport_speed speed; /* Rport's current speed */
46};
47
48/**
49 * BFA rport data structure
50 */
51struct bfa_rport_s {
52 struct list_head qe; /* queue element */
53 bfa_sm_t sm; /* state machine */
54 struct bfa_s *bfa; /* backpointer to BFA */
55 void *rport_drv; /* fcs/driver rport object */
56 u16 fw_handle; /* firmware rport handle */
57 u16 rport_tag; /* BFA rport tag */
58 struct bfa_rport_info_s rport_info; /* rport info from fcs/driver */
59 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
60 struct bfa_cb_qe_s hcb_qe; /* BFA callback qelem */
61 struct bfa_rport_hal_stats_s stats; /* BFA rport statistics */
62 struct bfa_rport_qos_attr_s qos_attr;
63 union a {
64 bfa_status_t status; /* f/w status */
65 void *fw_msg; /* QoS scn event */
66 } event_arg;
67};
68#define BFA_RPORT_FC_COS(_rport) ((_rport)->rport_info.fc_class)
69
70/**
71 * Send completion callback.
72 */
73typedef void (*bfa_cb_fcxp_send_t) (void *bfad_fcxp, struct bfa_fcxp_s *fcxp,
74 void *cbarg, enum bfa_status req_status,
75 u32 rsp_len, u32 resid_len,
76 struct fchs_s *rsp_fchs);
77
78/**
79 * BFA fcxp allocation (asynchronous)
80 */
81typedef void (*bfa_fcxp_alloc_cbfn_t) (void *cbarg, struct bfa_fcxp_s *fcxp);
82
83struct bfa_fcxp_wqe_s {
84 struct list_head qe;
85 bfa_fcxp_alloc_cbfn_t alloc_cbfn;
86 void *alloc_cbarg;
87};
88
89typedef u64 (*bfa_fcxp_get_sgaddr_t) (void *bfad_fcxp, int sgeid);
90typedef u32 (*bfa_fcxp_get_sglen_t) (void *bfad_fcxp, int sgeid);
91
92#define BFA_UF_BUFSZ (2 * 1024 + 256)
93
94/**
95 * @todo private
96 */
97struct bfa_uf_buf_s {
98 u8 d[BFA_UF_BUFSZ];
99};
100
101
102struct bfa_uf_s {
103 struct list_head qe; /* queue element */
104 struct bfa_s *bfa; /* bfa instance */
105 u16 uf_tag; /* identifying tag fw msgs */
106 u16 vf_id;
107 u16 src_rport_handle;
108 u16 rsvd;
109 u8 *data_ptr;
110 u16 data_len; /* actual receive length */
111 u16 pb_len; /* posted buffer length */
112 void *buf_kva; /* buffer virtual address */
113 u64 buf_pa; /* buffer physical address */
114 struct bfa_cb_qe_s hcb_qe; /* comp: BFA comp qelem */
115 struct bfa_sge_s sges[BFI_SGE_INLINE_MAX];
116};
117
118typedef void (*bfa_cb_pport_t) (void *cbarg, enum bfa_status status);
119
120/**
121 * bfa lport login/logout service interface
122 */
123struct bfa_lps_s {
124 struct list_head qe; /* queue element */
125 struct bfa_s *bfa; /* parent bfa instance */
126 bfa_sm_t sm; /* finite state machine */
127 u8 lp_tag; /* lport tag */
128 u8 reqq; /* lport request queue */
129 u8 alpa; /* ALPA for loop topologies */
130 u32 lp_pid; /* lport port ID */
131 bfa_boolean_t fdisc; /* send FDISC instead of FLOGI */
132 bfa_boolean_t auth_en; /* enable authentication */
133 bfa_boolean_t auth_req; /* authentication required */
134 bfa_boolean_t npiv_en; /* NPIV is allowed by peer */
135 bfa_boolean_t fport; /* attached peer is F_PORT */
136 bfa_boolean_t brcd_switch;/* attached peer is brcd switch */
137 bfa_status_t status; /* login status */
138 u16 pdusz; /* max receive PDU size */
139 u16 pr_bbcred; /* BB_CREDIT from peer */
140 u8 lsrjt_rsn; /* LSRJT reason */
141 u8 lsrjt_expl; /* LSRJT explanation */
142 wwn_t pwwn; /* port wwn of lport */
143 wwn_t nwwn; /* node wwn of lport */
144 wwn_t pr_pwwn; /* port wwn of lport peer */
145 wwn_t pr_nwwn; /* node wwn of lport peer */
146 mac_t lp_mac; /* fpma/spma MAC for lport */
147 mac_t fcf_mac; /* FCF MAC of lport */
148 struct bfa_reqq_wait_s wqe; /* request wait queue element */
149 void *uarg; /* user callback arg */
150 struct bfa_cb_qe_s hcb_qe; /* comp: callback qelem */
151 struct bfi_lps_login_rsp_s *loginrsp;
152 bfa_eproto_status_t ext_status;
153};
154
155#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port))
156
157/*
158 * bfa pport API functions
159 */
160bfa_status_t bfa_fcport_enable(struct bfa_s *bfa);
161bfa_status_t bfa_fcport_disable(struct bfa_s *bfa);
162bfa_status_t bfa_fcport_cfg_speed(struct bfa_s *bfa,
163 enum bfa_pport_speed speed);
164enum bfa_pport_speed bfa_fcport_get_speed(struct bfa_s *bfa);
165bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa,
166 enum bfa_pport_topology topo);
167enum bfa_pport_topology bfa_fcport_get_topology(struct bfa_s *bfa);
168bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa);
169bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa);
170u8 bfa_fcport_get_myalpa(struct bfa_s *bfa);
171bfa_status_t bfa_fcport_clr_hardalpa(struct bfa_s *bfa);
172bfa_status_t bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize);
173u16 bfa_fcport_get_maxfrsize(struct bfa_s *bfa);
174u32 bfa_fcport_mypid(struct bfa_s *bfa);
175u8 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa);
176bfa_status_t bfa_fcport_trunk_enable(struct bfa_s *bfa, u8 bitmap);
177bfa_status_t bfa_fcport_trunk_disable(struct bfa_s *bfa);
178bfa_boolean_t bfa_fcport_trunk_query(struct bfa_s *bfa, u32 *bitmap);
179void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr);
180wwn_t bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node);
181void bfa_fcport_event_register(struct bfa_s *bfa,
182 void (*event_cbfn) (void *cbarg,
183 bfa_pport_event_t event), void *event_cbarg);
184bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa);
185void bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off);
186void bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off);
187bfa_status_t bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa,
188 enum bfa_pport_speed speed);
189enum bfa_pport_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa);
190
191void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit);
192void bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status);
193void bfa_fcport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon,
194 bfa_boolean_t link_e2e_beacon);
195void bfa_cb_pport_event(void *cbarg, bfa_pport_event_t event);
196void bfa_fcport_qos_get_attr(struct bfa_s *bfa,
197 struct bfa_qos_attr_s *qos_attr);
198void bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
199 struct bfa_qos_vc_attr_s *qos_vc_attr);
200bfa_status_t bfa_fcport_get_qos_stats(struct bfa_s *bfa,
201 union bfa_fcport_stats_u *stats,
202 bfa_cb_pport_t cbfn, void *cbarg);
203bfa_status_t bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn,
204 void *cbarg);
205bfa_status_t bfa_fcport_get_fcoe_stats(struct bfa_s *bfa,
206 union bfa_fcport_stats_u *stats,
207 bfa_cb_pport_t cbfn, void *cbarg);
208bfa_status_t bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn,
209 void *cbarg);
210
211bfa_boolean_t bfa_fcport_is_ratelim(struct bfa_s *bfa);
212bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa);
213bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
214 union bfa_fcport_stats_u *stats,
215 bfa_cb_pport_t cbfn, void *cbarg);
216bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn,
217 void *cbarg);
218bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa);
219
220/*
221 * bfa rport API functions
222 */
223struct bfa_rport_s *bfa_rport_create(struct bfa_s *bfa, void *rport_drv);
224void bfa_rport_delete(struct bfa_rport_s *rport);
225void bfa_rport_online(struct bfa_rport_s *rport,
226 struct bfa_rport_info_s *rport_info);
227void bfa_rport_offline(struct bfa_rport_s *rport);
228void bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_pport_speed speed);
229void bfa_rport_get_stats(struct bfa_rport_s *rport,
230 struct bfa_rport_hal_stats_s *stats);
231void bfa_rport_clear_stats(struct bfa_rport_s *rport);
232void bfa_cb_rport_online(void *rport);
233void bfa_cb_rport_offline(void *rport);
234void bfa_cb_rport_qos_scn_flowid(void *rport,
235 struct bfa_rport_qos_attr_s old_qos_attr,
236 struct bfa_rport_qos_attr_s new_qos_attr);
237void bfa_cb_rport_qos_scn_prio(void *rport,
238 struct bfa_rport_qos_attr_s old_qos_attr,
239 struct bfa_rport_qos_attr_s new_qos_attr);
240void bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
241 struct bfa_rport_qos_attr_s *qos_attr);
242
243/*
244 * bfa fcxp API functions
245 */
246struct bfa_fcxp_s *bfa_fcxp_alloc(void *bfad_fcxp, struct bfa_s *bfa,
247 int nreq_sgles, int nrsp_sgles,
248 bfa_fcxp_get_sgaddr_t get_req_sga,
249 bfa_fcxp_get_sglen_t get_req_sglen,
250 bfa_fcxp_get_sgaddr_t get_rsp_sga,
251 bfa_fcxp_get_sglen_t get_rsp_sglen);
252void bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
253 bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *cbarg);
254void bfa_fcxp_walloc_cancel(struct bfa_s *bfa,
255 struct bfa_fcxp_wqe_s *wqe);
256void bfa_fcxp_discard(struct bfa_fcxp_s *fcxp);
257
258void *bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp);
259void *bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp);
260
261void bfa_fcxp_free(struct bfa_fcxp_s *fcxp);
262
263void bfa_fcxp_send(struct bfa_fcxp_s *fcxp,
264 struct bfa_rport_s *rport, u16 vf_id, u8 lp_tag,
265 bfa_boolean_t cts, enum fc_cos cos,
266 u32 reqlen, struct fchs_s *fchs,
267 bfa_cb_fcxp_send_t cbfn,
268 void *cbarg,
269 u32 rsp_maxlen, u8 rsp_timeout);
270bfa_status_t bfa_fcxp_abort(struct bfa_fcxp_s *fcxp);
271u32 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp);
272u32 bfa_fcxp_get_maxrsp(struct bfa_s *bfa);
273
274static inline void *
275bfa_uf_get_frmbuf(struct bfa_uf_s *uf)
276{
277 return uf->data_ptr;
278}
279
280static inline u16
281bfa_uf_get_frmlen(struct bfa_uf_s *uf)
282{
283 return uf->data_len;
284}
285
286/**
287 * Callback prototype for unsolicited frame receive handler.
288 *
289 * @param[in] cbarg callback arg for receive handler
290 * @param[in] uf unsolicited frame descriptor
291 *
292 * @return None
293 */
294typedef void (*bfa_cb_uf_recv_t) (void *cbarg, struct bfa_uf_s *uf);
295
296/*
297 * bfa uf API functions
298 */
299void bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv,
300 void *cbarg);
301void bfa_uf_free(struct bfa_uf_s *uf);
302
303/**
304 * bfa lport service api
305 */
306
307u32 bfa_lps_get_max_vport(struct bfa_s *bfa);
308struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa);
309void bfa_lps_delete(struct bfa_lps_s *lps);
310void bfa_lps_discard(struct bfa_lps_s *lps);
311void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
312 wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en);
313void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
314 wwn_t nwwn);
315void bfa_lps_flogo(struct bfa_lps_s *lps);
316void bfa_lps_fdisclogo(struct bfa_lps_s *lps);
317u8 bfa_lps_get_tag(struct bfa_lps_s *lps);
318bfa_boolean_t bfa_lps_is_npiv_en(struct bfa_lps_s *lps);
319bfa_boolean_t bfa_lps_is_fport(struct bfa_lps_s *lps);
320bfa_boolean_t bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps);
321bfa_boolean_t bfa_lps_is_authreq(struct bfa_lps_s *lps);
322bfa_eproto_status_t bfa_lps_get_extstatus(struct bfa_lps_s *lps);
323u32 bfa_lps_get_pid(struct bfa_lps_s *lps);
324u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid);
325u16 bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps);
326wwn_t bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps);
327wwn_t bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps);
328u8 bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps);
329u8 bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps);
330mac_t bfa_lps_get_lp_mac(struct bfa_lps_s *lps);
331void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status);
332void bfa_cb_lps_flogo_comp(void *bfad, void *uarg);
333void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status);
334void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
335void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
336
337#endif /* __BFA_SVC_H__ */
338
diff --git a/drivers/scsi/bfa/include/bfa_timer.h b/drivers/scsi/bfa/include/bfa_timer.h
deleted file mode 100644
index f71087448222..000000000000
--- a/drivers/scsi/bfa/include/bfa_timer.h
+++ /dev/null
@@ -1,53 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_TIMER_H__
18#define __BFA_TIMER_H__
19
20#include <bfa_os_inc.h>
21#include <cs/bfa_q.h>
22
23struct bfa_s;
24
25typedef void (*bfa_timer_cbfn_t)(void *);
26
27/**
28 * BFA timer data structure
29 */
30struct bfa_timer_s {
31 struct list_head qe;
32 bfa_timer_cbfn_t timercb;
33 void *arg;
34 int timeout; /**< in millisecs. */
35};
36
37/**
38 * Timer module structure
39 */
40struct bfa_timer_mod_s {
41 struct list_head timer_q;
42};
43
44#define BFA_TIMER_FREQ 200 /**< specified in millisecs */
45
46void bfa_timer_beat(struct bfa_timer_mod_s *mod);
47void bfa_timer_init(struct bfa_timer_mod_s *mod);
48void bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
49 bfa_timer_cbfn_t timercb, void *arg,
50 unsigned int timeout);
51void bfa_timer_stop(struct bfa_timer_s *timer);
52
53#endif /* __BFA_TIMER_H__ */
diff --git a/drivers/scsi/bfa/include/bfi/bfi.h b/drivers/scsi/bfa/include/bfi/bfi.h
deleted file mode 100644
index a550e80cabd2..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi.h
+++ /dev/null
@@ -1,174 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_H__
19#define __BFI_H__
20
21#include <bfa_os_inc.h>
22#include <defs/bfa_defs_status.h>
23
24#pragma pack(1)
25
26/**
27 * Msg header common to all msgs
28 */
29struct bfi_mhdr_s {
30 u8 msg_class; /* @ref bfi_mclass_t */
31 u8 msg_id; /* msg opcode with in the class */
32 union {
33 struct {
34 u8 rsvd;
35 u8 lpu_id; /* msg destination */
36 } h2i;
37 u16 i2htok; /* token in msgs to host */
38 } mtag;
39};
40
41#define bfi_h2i_set(_mh, _mc, _op, _lpuid) do { \
42 (_mh).msg_class = (_mc); \
43 (_mh).msg_id = (_op); \
44 (_mh).mtag.h2i.lpu_id = (_lpuid); \
45} while (0)
46
47#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \
48 (_mh).msg_class = (_mc); \
49 (_mh).msg_id = (_op); \
50 (_mh).mtag.i2htok = (_i2htok); \
51} while (0)
52
53/*
54 * Message opcodes: 0-127 to firmware, 128-255 to host
55 */
56#define BFI_I2H_OPCODE_BASE 128
57#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
58
59/**
60 ****************************************************************************
61 *
62 * Scatter Gather Element and Page definition
63 *
64 ****************************************************************************
65 */
66
67#define BFI_SGE_INLINE 1
68#define BFI_SGE_INLINE_MAX (BFI_SGE_INLINE + 1)
69
70/**
71 * SG Flags
72 */
73enum {
74 BFI_SGE_DATA = 0, /* data address, not last */
75 BFI_SGE_DATA_CPL = 1, /* data addr, last in current page */
76 BFI_SGE_DATA_LAST = 3, /* data address, last */
77 BFI_SGE_LINK = 2, /* link address */
78 BFI_SGE_PGDLEN = 2, /* cumulative data length for page */
79};
80
81/**
82 * DMA addresses
83 */
84union bfi_addr_u {
85 struct {
86 u32 addr_lo;
87 u32 addr_hi;
88 } a32;
89};
90
91/**
92 * Scatter Gather Element
93 */
94struct bfi_sge_s {
95#ifdef __BIGENDIAN
96 u32 flags:2,
97 rsvd:2,
98 sg_len:28;
99#else
100 u32 sg_len:28,
101 rsvd:2,
102 flags:2;
103#endif
104 union bfi_addr_u sga;
105};
106
107/**
108 * Scatter Gather Page
109 */
110#define BFI_SGPG_DATA_SGES 7
111#define BFI_SGPG_SGES_MAX (BFI_SGPG_DATA_SGES + 1)
112#define BFI_SGPG_RSVD_WD_LEN 8
113struct bfi_sgpg_s {
114 struct bfi_sge_s sges[BFI_SGPG_SGES_MAX];
115 u32 rsvd[BFI_SGPG_RSVD_WD_LEN];
116};
117
118/*
119 * Large Message structure - 128 Bytes size Msgs
120 */
121#define BFI_LMSG_SZ 128
122#define BFI_LMSG_PL_WSZ \
123 ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr_s)) / 4)
124
125struct bfi_msg_s {
126 struct bfi_mhdr_s mhdr;
127 u32 pl[BFI_LMSG_PL_WSZ];
128};
129
130/**
131 * Mailbox message structure
132 */
133#define BFI_MBMSG_SZ 7
134struct bfi_mbmsg_s {
135 struct bfi_mhdr_s mh;
136 u32 pl[BFI_MBMSG_SZ];
137};
138
139/**
140 * Message Classes
141 */
142enum bfi_mclass {
143 BFI_MC_IOC = 1, /* IO Controller (IOC) */
144 BFI_MC_DIAG = 2, /* Diagnostic Msgs */
145 BFI_MC_FLASH = 3, /* Flash message class */
146 BFI_MC_CEE = 4, /* CEE */
147 BFI_MC_FCPORT = 5, /* FC port */
148 BFI_MC_IOCFC = 6, /* FC - IO Controller (IOC) */
149 BFI_MC_LL = 7, /* Link Layer */
150 BFI_MC_UF = 8, /* Unsolicited frame receive */
151 BFI_MC_FCXP = 9, /* FC Transport */
152 BFI_MC_LPS = 10, /* lport fc login services */
153 BFI_MC_RPORT = 11, /* Remote port */
154 BFI_MC_ITNIM = 12, /* I-T nexus (Initiator mode) */
155 BFI_MC_IOIM_READ = 13, /* read IO (Initiator mode) */
156 BFI_MC_IOIM_WRITE = 14, /* write IO (Initiator mode) */
157 BFI_MC_IOIM_IO = 15, /* IO (Initiator mode) */
158 BFI_MC_IOIM = 16, /* IO (Initiator mode) */
159 BFI_MC_IOIM_IOCOM = 17, /* good IO completion */
160 BFI_MC_TSKIM = 18, /* Initiator Task management */
161 BFI_MC_SBOOT = 19, /* SAN boot services */
162 BFI_MC_IPFC = 20, /* IP over FC Msgs */
163 BFI_MC_PORT = 21, /* Physical port */
164 BFI_MC_MAX = 32
165};
166
167#define BFI_IOC_MAX_CQS 4
168#define BFI_IOC_MAX_CQS_ASIC 8
169#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */
170
171#pragma pack()
172
173#endif /* __BFI_H__ */
174
diff --git a/drivers/scsi/bfa/include/bfi/bfi_boot.h b/drivers/scsi/bfa/include/bfi/bfi_boot.h
deleted file mode 100644
index 5955afe7d108..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_boot.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17/*
18 * bfi_boot.h
19 */
20
21#ifndef __BFI_BOOT_H__
22#define __BFI_BOOT_H__
23
24#define BFI_BOOT_TYPE_OFF 8
25#define BFI_BOOT_PARAM_OFF 12
26
27#define BFI_BOOT_TYPE_NORMAL 0 /* param is device id */
28#define BFI_BOOT_TYPE_FLASH 1
29#define BFI_BOOT_TYPE_MEMTEST 2
30
31#define BFI_BOOT_MEMTEST_RES_ADDR 0x900
32#define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3
33
34#endif
diff --git a/drivers/scsi/bfa/include/bfi/bfi_cee.h b/drivers/scsi/bfa/include/bfi/bfi_cee.h
deleted file mode 100644
index 0970596583ea..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_cee.h
+++ /dev/null
@@ -1,119 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17/**
18 * Copyright (c) 2006-2009 Brocade Communications Systems, Inc.
19 * All rights reserved.
20 *
21 * bfi_dcbx.h BFI Interface (Mailbox commands and related structures)
22 * between host driver and DCBX/LLDP firmware module.
23 *
24**/
25
26#ifndef __BFI_CEE_H__
27#define __BFI_CEE_H__
28
29#include <bfi/bfi.h>
30
31#pragma pack(1)
32
33
34enum bfi_cee_h2i_msgs_e {
35 BFI_CEE_H2I_GET_CFG_REQ = 1,
36 BFI_CEE_H2I_RESET_STATS = 2,
37 BFI_CEE_H2I_GET_STATS_REQ = 3,
38};
39
40
41enum bfi_cee_i2h_msgs_e {
42 BFI_CEE_I2H_GET_CFG_RSP = BFA_I2HM(1),
43 BFI_CEE_I2H_RESET_STATS_RSP = BFA_I2HM(2),
44 BFI_CEE_I2H_GET_STATS_RSP = BFA_I2HM(3),
45};
46
47
48/* Data structures */
49
50/*
51 * BFI_CEE_H2I_RESET_STATS
52 */
53struct bfi_lldp_reset_stats_s {
54 struct bfi_mhdr_s mh;
55};
56
57/*
58 * BFI_CEE_H2I_RESET_STATS
59 */
60struct bfi_cee_reset_stats_s {
61 struct bfi_mhdr_s mh;
62};
63
64/*
65 * BFI_CEE_H2I_GET_CFG_REQ
66 */
67struct bfi_cee_get_req_s {
68 struct bfi_mhdr_s mh;
69 union bfi_addr_u dma_addr;
70};
71
72
73/*
74 * BFI_CEE_I2H_GET_CFG_RSP
75 */
76struct bfi_cee_get_rsp_s {
77 struct bfi_mhdr_s mh;
78 u8 cmd_status;
79 u8 rsvd[3];
80};
81
82/*
83 * BFI_CEE_H2I_GET_STATS_REQ
84 */
85struct bfi_cee_stats_req_s {
86 struct bfi_mhdr_s mh;
87 union bfi_addr_u dma_addr;
88};
89
90
91/*
92 * BFI_CEE_I2H_GET_STATS_RSP
93 */
94struct bfi_cee_stats_rsp_s {
95 struct bfi_mhdr_s mh;
96 u8 cmd_status;
97 u8 rsvd[3];
98};
99
100
101
102union bfi_cee_h2i_msg_u {
103 struct bfi_mhdr_s mh;
104 struct bfi_cee_get_req_s get_req;
105 struct bfi_cee_stats_req_s stats_req;
106};
107
108
109union bfi_cee_i2h_msg_u {
110 struct bfi_mhdr_s mh;
111 struct bfi_cee_get_rsp_s get_rsp;
112 struct bfi_cee_stats_rsp_s stats_rsp;
113};
114
115#pragma pack()
116
117
118#endif /* __BFI_CEE_H__ */
119
diff --git a/drivers/scsi/bfa/include/bfi/bfi_ctreg.h b/drivers/scsi/bfa/include/bfi/bfi_ctreg.h
deleted file mode 100644
index c0ef5a93b797..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_ctreg.h
+++ /dev/null
@@ -1,640 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/*
19 * bfi_ctreg.h catapult host block register definitions
20 *
21 * !!! Do not edit. Auto generated. !!!
22 */
23
24#ifndef __BFI_CTREG_H__
25#define __BFI_CTREG_H__
26
27
28#define HOSTFN0_LPU_MBOX0_0 0x00019200
29#define HOSTFN1_LPU_MBOX0_8 0x00019260
30#define LPU_HOSTFN0_MBOX0_0 0x00019280
31#define LPU_HOSTFN1_MBOX0_8 0x000192e0
32#define HOSTFN2_LPU_MBOX0_0 0x00019400
33#define HOSTFN3_LPU_MBOX0_8 0x00019460
34#define LPU_HOSTFN2_MBOX0_0 0x00019480
35#define LPU_HOSTFN3_MBOX0_8 0x000194e0
36#define HOSTFN0_INT_STATUS 0x00014000
37#define __HOSTFN0_HALT_OCCURRED 0x01000000
38#define __HOSTFN0_INT_STATUS_LVL_MK 0x00f00000
39#define __HOSTFN0_INT_STATUS_LVL_SH 20
40#define __HOSTFN0_INT_STATUS_LVL(_v) ((_v) << __HOSTFN0_INT_STATUS_LVL_SH)
41#define __HOSTFN0_INT_STATUS_P_MK 0x000f0000
42#define __HOSTFN0_INT_STATUS_P_SH 16
43#define __HOSTFN0_INT_STATUS_P(_v) ((_v) << __HOSTFN0_INT_STATUS_P_SH)
44#define __HOSTFN0_INT_STATUS_F 0x0000ffff
45#define HOSTFN0_INT_MSK 0x00014004
46#define HOST_PAGE_NUM_FN0 0x00014008
47#define __HOST_PAGE_NUM_FN 0x000001ff
48#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c
49#define __MSIX_ERR_INDEX_FN 0x000001ff
50#define HOSTFN1_INT_STATUS 0x00014100
51#define __HOSTFN1_HALT_OCCURRED 0x01000000
52#define __HOSTFN1_INT_STATUS_LVL_MK 0x00f00000
53#define __HOSTFN1_INT_STATUS_LVL_SH 20
54#define __HOSTFN1_INT_STATUS_LVL(_v) ((_v) << __HOSTFN1_INT_STATUS_LVL_SH)
55#define __HOSTFN1_INT_STATUS_P_MK 0x000f0000
56#define __HOSTFN1_INT_STATUS_P_SH 16
57#define __HOSTFN1_INT_STATUS_P(_v) ((_v) << __HOSTFN1_INT_STATUS_P_SH)
58#define __HOSTFN1_INT_STATUS_F 0x0000ffff
59#define HOSTFN1_INT_MSK 0x00014104
60#define HOST_PAGE_NUM_FN1 0x00014108
61#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c
62#define APP_PLL_425_CTL_REG 0x00014204
63#define __P_425_PLL_LOCK 0x80000000
64#define __APP_PLL_425_SRAM_USE_100MHZ 0x00100000
65#define __APP_PLL_425_RESET_TIMER_MK 0x000e0000
66#define __APP_PLL_425_RESET_TIMER_SH 17
67#define __APP_PLL_425_RESET_TIMER(_v) ((_v) << __APP_PLL_425_RESET_TIMER_SH)
68#define __APP_PLL_425_LOGIC_SOFT_RESET 0x00010000
69#define __APP_PLL_425_CNTLMT0_1_MK 0x0000c000
70#define __APP_PLL_425_CNTLMT0_1_SH 14
71#define __APP_PLL_425_CNTLMT0_1(_v) ((_v) << __APP_PLL_425_CNTLMT0_1_SH)
72#define __APP_PLL_425_JITLMT0_1_MK 0x00003000
73#define __APP_PLL_425_JITLMT0_1_SH 12
74#define __APP_PLL_425_JITLMT0_1(_v) ((_v) << __APP_PLL_425_JITLMT0_1_SH)
75#define __APP_PLL_425_HREF 0x00000800
76#define __APP_PLL_425_HDIV 0x00000400
77#define __APP_PLL_425_P0_1_MK 0x00000300
78#define __APP_PLL_425_P0_1_SH 8
79#define __APP_PLL_425_P0_1(_v) ((_v) << __APP_PLL_425_P0_1_SH)
80#define __APP_PLL_425_Z0_2_MK 0x000000e0
81#define __APP_PLL_425_Z0_2_SH 5
82#define __APP_PLL_425_Z0_2(_v) ((_v) << __APP_PLL_425_Z0_2_SH)
83#define __APP_PLL_425_RSEL200500 0x00000010
84#define __APP_PLL_425_ENARST 0x00000008
85#define __APP_PLL_425_BYPASS 0x00000004
86#define __APP_PLL_425_LRESETN 0x00000002
87#define __APP_PLL_425_ENABLE 0x00000001
88#define APP_PLL_312_CTL_REG 0x00014208
89#define __P_312_PLL_LOCK 0x80000000
90#define __ENABLE_MAC_AHB_1 0x00800000
91#define __ENABLE_MAC_AHB_0 0x00400000
92#define __ENABLE_MAC_1 0x00200000
93#define __ENABLE_MAC_0 0x00100000
94#define __APP_PLL_312_RESET_TIMER_MK 0x000e0000
95#define __APP_PLL_312_RESET_TIMER_SH 17
96#define __APP_PLL_312_RESET_TIMER(_v) ((_v) << __APP_PLL_312_RESET_TIMER_SH)
97#define __APP_PLL_312_LOGIC_SOFT_RESET 0x00010000
98#define __APP_PLL_312_CNTLMT0_1_MK 0x0000c000
99#define __APP_PLL_312_CNTLMT0_1_SH 14
100#define __APP_PLL_312_CNTLMT0_1(_v) ((_v) << __APP_PLL_312_CNTLMT0_1_SH)
101#define __APP_PLL_312_JITLMT0_1_MK 0x00003000
102#define __APP_PLL_312_JITLMT0_1_SH 12
103#define __APP_PLL_312_JITLMT0_1(_v) ((_v) << __APP_PLL_312_JITLMT0_1_SH)
104#define __APP_PLL_312_HREF 0x00000800
105#define __APP_PLL_312_HDIV 0x00000400
106#define __APP_PLL_312_P0_1_MK 0x00000300
107#define __APP_PLL_312_P0_1_SH 8
108#define __APP_PLL_312_P0_1(_v) ((_v) << __APP_PLL_312_P0_1_SH)
109#define __APP_PLL_312_Z0_2_MK 0x000000e0
110#define __APP_PLL_312_Z0_2_SH 5
111#define __APP_PLL_312_Z0_2(_v) ((_v) << __APP_PLL_312_Z0_2_SH)
112#define __APP_PLL_312_RSEL200500 0x00000010
113#define __APP_PLL_312_ENARST 0x00000008
114#define __APP_PLL_312_BYPASS 0x00000004
115#define __APP_PLL_312_LRESETN 0x00000002
116#define __APP_PLL_312_ENABLE 0x00000001
117#define MBIST_CTL_REG 0x00014220
118#define __EDRAM_BISTR_START 0x00000004
119#define __MBIST_RESET 0x00000002
120#define __MBIST_START 0x00000001
121#define MBIST_STAT_REG 0x00014224
122#define __EDRAM_BISTR_STATUS 0x00000008
123#define __EDRAM_BISTR_DONE 0x00000004
124#define __MEM_BIT_STATUS 0x00000002
125#define __MBIST_DONE 0x00000001
126#define HOST_SEM0_REG 0x00014230
127#define __HOST_SEMAPHORE 0x00000001
128#define HOST_SEM1_REG 0x00014234
129#define HOST_SEM2_REG 0x00014238
130#define HOST_SEM3_REG 0x0001423c
131#define HOST_SEM0_INFO_REG 0x00014240
132#define HOST_SEM1_INFO_REG 0x00014244
133#define HOST_SEM2_INFO_REG 0x00014248
134#define HOST_SEM3_INFO_REG 0x0001424c
135#define ETH_MAC_SER_REG 0x00014288
136#define __APP_EMS_CKBUFAMPIN 0x00000020
137#define __APP_EMS_REFCLKSEL 0x00000010
138#define __APP_EMS_CMLCKSEL 0x00000008
139#define __APP_EMS_REFCKBUFEN2 0x00000004
140#define __APP_EMS_REFCKBUFEN1 0x00000002
141#define __APP_EMS_CHANNEL_SEL 0x00000001
142#define HOSTFN2_INT_STATUS 0x00014300
143#define __HOSTFN2_HALT_OCCURRED 0x01000000
144#define __HOSTFN2_INT_STATUS_LVL_MK 0x00f00000
145#define __HOSTFN2_INT_STATUS_LVL_SH 20
146#define __HOSTFN2_INT_STATUS_LVL(_v) ((_v) << __HOSTFN2_INT_STATUS_LVL_SH)
147#define __HOSTFN2_INT_STATUS_P_MK 0x000f0000
148#define __HOSTFN2_INT_STATUS_P_SH 16
149#define __HOSTFN2_INT_STATUS_P(_v) ((_v) << __HOSTFN2_INT_STATUS_P_SH)
150#define __HOSTFN2_INT_STATUS_F 0x0000ffff
151#define HOSTFN2_INT_MSK 0x00014304
152#define HOST_PAGE_NUM_FN2 0x00014308
153#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c
154#define HOSTFN3_INT_STATUS 0x00014400
155#define __HALT_OCCURRED 0x01000000
156#define __HOSTFN3_INT_STATUS_LVL_MK 0x00f00000
157#define __HOSTFN3_INT_STATUS_LVL_SH 20
158#define __HOSTFN3_INT_STATUS_LVL(_v) ((_v) << __HOSTFN3_INT_STATUS_LVL_SH)
159#define __HOSTFN3_INT_STATUS_P_MK 0x000f0000
160#define __HOSTFN3_INT_STATUS_P_SH 16
161#define __HOSTFN3_INT_STATUS_P(_v) ((_v) << __HOSTFN3_INT_STATUS_P_SH)
162#define __HOSTFN3_INT_STATUS_F 0x0000ffff
163#define HOSTFN3_INT_MSK 0x00014404
164#define HOST_PAGE_NUM_FN3 0x00014408
165#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c
166#define FNC_ID_REG 0x00014600
167#define __FUNCTION_NUMBER 0x00000007
168#define FNC_PERS_REG 0x00014604
169#define __F3_FUNCTION_ACTIVE 0x80000000
170#define __F3_FUNCTION_MODE 0x40000000
171#define __F3_PORT_MAP_MK 0x30000000
172#define __F3_PORT_MAP_SH 28
173#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH)
174#define __F3_VM_MODE 0x08000000
175#define __F3_INTX_STATUS_MK 0x07000000
176#define __F3_INTX_STATUS_SH 24
177#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH)
178#define __F2_FUNCTION_ACTIVE 0x00800000
179#define __F2_FUNCTION_MODE 0x00400000
180#define __F2_PORT_MAP_MK 0x00300000
181#define __F2_PORT_MAP_SH 20
182#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH)
183#define __F2_VM_MODE 0x00080000
184#define __F2_INTX_STATUS_MK 0x00070000
185#define __F2_INTX_STATUS_SH 16
186#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH)
187#define __F1_FUNCTION_ACTIVE 0x00008000
188#define __F1_FUNCTION_MODE 0x00004000
189#define __F1_PORT_MAP_MK 0x00003000
190#define __F1_PORT_MAP_SH 12
191#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH)
192#define __F1_VM_MODE 0x00000800
193#define __F1_INTX_STATUS_MK 0x00000700
194#define __F1_INTX_STATUS_SH 8
195#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH)
196#define __F0_FUNCTION_ACTIVE 0x00000080
197#define __F0_FUNCTION_MODE 0x00000040
198#define __F0_PORT_MAP_MK 0x00000030
199#define __F0_PORT_MAP_SH 4
200#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH)
201#define __F0_VM_MODE 0x00000008
202#define __F0_INTX_STATUS 0x00000007
203enum {
204 __F0_INTX_STATUS_MSIX = 0x0,
205 __F0_INTX_STATUS_INTA = 0x1,
206 __F0_INTX_STATUS_INTB = 0x2,
207 __F0_INTX_STATUS_INTC = 0x3,
208 __F0_INTX_STATUS_INTD = 0x4,
209};
210#define OP_MODE 0x0001460c
211#define __APP_ETH_CLK_LOWSPEED 0x00000004
212#define __GLOBAL_CORECLK_HALFSPEED 0x00000002
213#define __GLOBAL_FCOE_MODE 0x00000001
214#define HOST_SEM4_REG 0x00014610
215#define HOST_SEM5_REG 0x00014614
216#define HOST_SEM6_REG 0x00014618
217#define HOST_SEM7_REG 0x0001461c
218#define HOST_SEM4_INFO_REG 0x00014620
219#define HOST_SEM5_INFO_REG 0x00014624
220#define HOST_SEM6_INFO_REG 0x00014628
221#define HOST_SEM7_INFO_REG 0x0001462c
222#define HOSTFN0_LPU0_MBOX0_CMD_STAT 0x00019000
223#define __HOSTFN0_LPU0_MBOX0_INFO_MK 0xfffffffe
224#define __HOSTFN0_LPU0_MBOX0_INFO_SH 1
225#define __HOSTFN0_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU0_MBOX0_INFO_SH)
226#define __HOSTFN0_LPU0_MBOX0_CMD_STATUS 0x00000001
227#define HOSTFN0_LPU1_MBOX0_CMD_STAT 0x00019004
228#define __HOSTFN0_LPU1_MBOX0_INFO_MK 0xfffffffe
229#define __HOSTFN0_LPU1_MBOX0_INFO_SH 1
230#define __HOSTFN0_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU1_MBOX0_INFO_SH)
231#define __HOSTFN0_LPU1_MBOX0_CMD_STATUS 0x00000001
232#define LPU0_HOSTFN0_MBOX0_CMD_STAT 0x00019008
233#define __LPU0_HOSTFN0_MBOX0_INFO_MK 0xfffffffe
234#define __LPU0_HOSTFN0_MBOX0_INFO_SH 1
235#define __LPU0_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN0_MBOX0_INFO_SH)
236#define __LPU0_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
237#define LPU1_HOSTFN0_MBOX0_CMD_STAT 0x0001900c
238#define __LPU1_HOSTFN0_MBOX0_INFO_MK 0xfffffffe
239#define __LPU1_HOSTFN0_MBOX0_INFO_SH 1
240#define __LPU1_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN0_MBOX0_INFO_SH)
241#define __LPU1_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
242#define HOSTFN1_LPU0_MBOX0_CMD_STAT 0x00019010
243#define __HOSTFN1_LPU0_MBOX0_INFO_MK 0xfffffffe
244#define __HOSTFN1_LPU0_MBOX0_INFO_SH 1
245#define __HOSTFN1_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU0_MBOX0_INFO_SH)
246#define __HOSTFN1_LPU0_MBOX0_CMD_STATUS 0x00000001
247#define HOSTFN1_LPU1_MBOX0_CMD_STAT 0x00019014
248#define __HOSTFN1_LPU1_MBOX0_INFO_MK 0xfffffffe
249#define __HOSTFN1_LPU1_MBOX0_INFO_SH 1
250#define __HOSTFN1_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU1_MBOX0_INFO_SH)
251#define __HOSTFN1_LPU1_MBOX0_CMD_STATUS 0x00000001
252#define LPU0_HOSTFN1_MBOX0_CMD_STAT 0x00019018
253#define __LPU0_HOSTFN1_MBOX0_INFO_MK 0xfffffffe
254#define __LPU0_HOSTFN1_MBOX0_INFO_SH 1
255#define __LPU0_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN1_MBOX0_INFO_SH)
256#define __LPU0_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
257#define LPU1_HOSTFN1_MBOX0_CMD_STAT 0x0001901c
258#define __LPU1_HOSTFN1_MBOX0_INFO_MK 0xfffffffe
259#define __LPU1_HOSTFN1_MBOX0_INFO_SH 1
260#define __LPU1_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN1_MBOX0_INFO_SH)
261#define __LPU1_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
262#define HOSTFN2_LPU0_MBOX0_CMD_STAT 0x00019150
263#define __HOSTFN2_LPU0_MBOX0_INFO_MK 0xfffffffe
264#define __HOSTFN2_LPU0_MBOX0_INFO_SH 1
265#define __HOSTFN2_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU0_MBOX0_INFO_SH)
266#define __HOSTFN2_LPU0_MBOX0_CMD_STATUS 0x00000001
267#define HOSTFN2_LPU1_MBOX0_CMD_STAT 0x00019154
268#define __HOSTFN2_LPU1_MBOX0_INFO_MK 0xfffffffe
269#define __HOSTFN2_LPU1_MBOX0_INFO_SH 1
270#define __HOSTFN2_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU1_MBOX0_INFO_SH)
271#define __HOSTFN2_LPU1_MBOX0BOX0_CMD_STATUS 0x00000001
272#define LPU0_HOSTFN2_MBOX0_CMD_STAT 0x00019158
273#define __LPU0_HOSTFN2_MBOX0_INFO_MK 0xfffffffe
274#define __LPU0_HOSTFN2_MBOX0_INFO_SH 1
275#define __LPU0_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN2_MBOX0_INFO_SH)
276#define __LPU0_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
277#define LPU1_HOSTFN2_MBOX0_CMD_STAT 0x0001915c
278#define __LPU1_HOSTFN2_MBOX0_INFO_MK 0xfffffffe
279#define __LPU1_HOSTFN2_MBOX0_INFO_SH 1
280#define __LPU1_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN2_MBOX0_INFO_SH)
281#define __LPU1_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
282#define HOSTFN3_LPU0_MBOX0_CMD_STAT 0x00019160
283#define __HOSTFN3_LPU0_MBOX0_INFO_MK 0xfffffffe
284#define __HOSTFN3_LPU0_MBOX0_INFO_SH 1
285#define __HOSTFN3_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU0_MBOX0_INFO_SH)
286#define __HOSTFN3_LPU0_MBOX0_CMD_STATUS 0x00000001
287#define HOSTFN3_LPU1_MBOX0_CMD_STAT 0x00019164
288#define __HOSTFN3_LPU1_MBOX0_INFO_MK 0xfffffffe
289#define __HOSTFN3_LPU1_MBOX0_INFO_SH 1
290#define __HOSTFN3_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU1_MBOX0_INFO_SH)
291#define __HOSTFN3_LPU1_MBOX0_CMD_STATUS 0x00000001
292#define LPU0_HOSTFN3_MBOX0_CMD_STAT 0x00019168
293#define __LPU0_HOSTFN3_MBOX0_INFO_MK 0xfffffffe
294#define __LPU0_HOSTFN3_MBOX0_INFO_SH 1
295#define __LPU0_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN3_MBOX0_INFO_SH)
296#define __LPU0_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
297#define LPU1_HOSTFN3_MBOX0_CMD_STAT 0x0001916c
298#define __LPU1_HOSTFN3_MBOX0_INFO_MK 0xfffffffe
299#define __LPU1_HOSTFN3_MBOX0_INFO_SH 1
300#define __LPU1_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN3_MBOX0_INFO_SH)
301#define __LPU1_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
302#define FW_INIT_HALT_P0 0x000191ac
303#define __FW_INIT_HALT_P 0x00000001
304#define FW_INIT_HALT_P1 0x000191bc
305#define CPE_PI_PTR_Q0 0x00038000
306#define __CPE_PI_UNUSED_MK 0xffff0000
307#define __CPE_PI_UNUSED_SH 16
308#define __CPE_PI_UNUSED(_v) ((_v) << __CPE_PI_UNUSED_SH)
309#define __CPE_PI_PTR 0x0000ffff
310#define CPE_PI_PTR_Q1 0x00038040
311#define CPE_CI_PTR_Q0 0x00038004
312#define __CPE_CI_UNUSED_MK 0xffff0000
313#define __CPE_CI_UNUSED_SH 16
314#define __CPE_CI_UNUSED(_v) ((_v) << __CPE_CI_UNUSED_SH)
315#define __CPE_CI_PTR 0x0000ffff
316#define CPE_CI_PTR_Q1 0x00038044
317#define CPE_DEPTH_Q0 0x00038008
318#define __CPE_DEPTH_UNUSED_MK 0xf8000000
319#define __CPE_DEPTH_UNUSED_SH 27
320#define __CPE_DEPTH_UNUSED(_v) ((_v) << __CPE_DEPTH_UNUSED_SH)
321#define __CPE_MSIX_VEC_INDEX_MK 0x07ff0000
322#define __CPE_MSIX_VEC_INDEX_SH 16
323#define __CPE_MSIX_VEC_INDEX(_v) ((_v) << __CPE_MSIX_VEC_INDEX_SH)
324#define __CPE_DEPTH 0x0000ffff
325#define CPE_DEPTH_Q1 0x00038048
326#define CPE_QCTRL_Q0 0x0003800c
327#define __CPE_CTRL_UNUSED30_MK 0xfc000000
328#define __CPE_CTRL_UNUSED30_SH 26
329#define __CPE_CTRL_UNUSED30(_v) ((_v) << __CPE_CTRL_UNUSED30_SH)
330#define __CPE_FUNC_INT_CTRL_MK 0x03000000
331#define __CPE_FUNC_INT_CTRL_SH 24
332#define __CPE_FUNC_INT_CTRL(_v) ((_v) << __CPE_FUNC_INT_CTRL_SH)
333enum {
334 __CPE_FUNC_INT_CTRL_DISABLE = 0x0,
335 __CPE_FUNC_INT_CTRL_F2NF = 0x1,
336 __CPE_FUNC_INT_CTRL_3QUART = 0x2,
337 __CPE_FUNC_INT_CTRL_HALF = 0x3,
338};
339#define __CPE_CTRL_UNUSED20_MK 0x00f00000
340#define __CPE_CTRL_UNUSED20_SH 20
341#define __CPE_CTRL_UNUSED20(_v) ((_v) << __CPE_CTRL_UNUSED20_SH)
342#define __CPE_SCI_TH_MK 0x000f0000
343#define __CPE_SCI_TH_SH 16
344#define __CPE_SCI_TH(_v) ((_v) << __CPE_SCI_TH_SH)
345#define __CPE_CTRL_UNUSED10_MK 0x0000c000
346#define __CPE_CTRL_UNUSED10_SH 14
347#define __CPE_CTRL_UNUSED10(_v) ((_v) << __CPE_CTRL_UNUSED10_SH)
348#define __CPE_ACK_PENDING 0x00002000
349#define __CPE_CTRL_UNUSED40_MK 0x00001c00
350#define __CPE_CTRL_UNUSED40_SH 10
351#define __CPE_CTRL_UNUSED40(_v) ((_v) << __CPE_CTRL_UNUSED40_SH)
352#define __CPE_PCIEID_MK 0x00000300
353#define __CPE_PCIEID_SH 8
354#define __CPE_PCIEID(_v) ((_v) << __CPE_PCIEID_SH)
355#define __CPE_CTRL_UNUSED00_MK 0x000000fe
356#define __CPE_CTRL_UNUSED00_SH 1
357#define __CPE_CTRL_UNUSED00(_v) ((_v) << __CPE_CTRL_UNUSED00_SH)
358#define __CPE_ESIZE 0x00000001
359#define CPE_QCTRL_Q1 0x0003804c
360#define __CPE_CTRL_UNUSED31_MK 0xfc000000
361#define __CPE_CTRL_UNUSED31_SH 26
362#define __CPE_CTRL_UNUSED31(_v) ((_v) << __CPE_CTRL_UNUSED31_SH)
363#define __CPE_CTRL_UNUSED21_MK 0x00f00000
364#define __CPE_CTRL_UNUSED21_SH 20
365#define __CPE_CTRL_UNUSED21(_v) ((_v) << __CPE_CTRL_UNUSED21_SH)
366#define __CPE_CTRL_UNUSED11_MK 0x0000c000
367#define __CPE_CTRL_UNUSED11_SH 14
368#define __CPE_CTRL_UNUSED11(_v) ((_v) << __CPE_CTRL_UNUSED11_SH)
369#define __CPE_CTRL_UNUSED41_MK 0x00001c00
370#define __CPE_CTRL_UNUSED41_SH 10
371#define __CPE_CTRL_UNUSED41(_v) ((_v) << __CPE_CTRL_UNUSED41_SH)
372#define __CPE_CTRL_UNUSED01_MK 0x000000fe
373#define __CPE_CTRL_UNUSED01_SH 1
374#define __CPE_CTRL_UNUSED01(_v) ((_v) << __CPE_CTRL_UNUSED01_SH)
375#define RME_PI_PTR_Q0 0x00038020
376#define __LATENCY_TIME_STAMP_MK 0xffff0000
377#define __LATENCY_TIME_STAMP_SH 16
378#define __LATENCY_TIME_STAMP(_v) ((_v) << __LATENCY_TIME_STAMP_SH)
379#define __RME_PI_PTR 0x0000ffff
380#define RME_PI_PTR_Q1 0x00038060
381#define RME_CI_PTR_Q0 0x00038024
382#define __DELAY_TIME_STAMP_MK 0xffff0000
383#define __DELAY_TIME_STAMP_SH 16
384#define __DELAY_TIME_STAMP(_v) ((_v) << __DELAY_TIME_STAMP_SH)
385#define __RME_CI_PTR 0x0000ffff
386#define RME_CI_PTR_Q1 0x00038064
387#define RME_DEPTH_Q0 0x00038028
388#define __RME_DEPTH_UNUSED_MK 0xf8000000
389#define __RME_DEPTH_UNUSED_SH 27
390#define __RME_DEPTH_UNUSED(_v) ((_v) << __RME_DEPTH_UNUSED_SH)
391#define __RME_MSIX_VEC_INDEX_MK 0x07ff0000
392#define __RME_MSIX_VEC_INDEX_SH 16
393#define __RME_MSIX_VEC_INDEX(_v) ((_v) << __RME_MSIX_VEC_INDEX_SH)
394#define __RME_DEPTH 0x0000ffff
395#define RME_DEPTH_Q1 0x00038068
396#define RME_QCTRL_Q0 0x0003802c
397#define __RME_INT_LATENCY_TIMER_MK 0xff000000
398#define __RME_INT_LATENCY_TIMER_SH 24
399#define __RME_INT_LATENCY_TIMER(_v) ((_v) << __RME_INT_LATENCY_TIMER_SH)
400#define __RME_INT_DELAY_TIMER_MK 0x00ff0000
401#define __RME_INT_DELAY_TIMER_SH 16
402#define __RME_INT_DELAY_TIMER(_v) ((_v) << __RME_INT_DELAY_TIMER_SH)
403#define __RME_INT_DELAY_DISABLE 0x00008000
404#define __RME_DLY_DELAY_DISABLE 0x00004000
405#define __RME_ACK_PENDING 0x00002000
406#define __RME_FULL_INTERRUPT_DISABLE 0x00001000
407#define __RME_CTRL_UNUSED10_MK 0x00000c00
408#define __RME_CTRL_UNUSED10_SH 10
409#define __RME_CTRL_UNUSED10(_v) ((_v) << __RME_CTRL_UNUSED10_SH)
410#define __RME_PCIEID_MK 0x00000300
411#define __RME_PCIEID_SH 8
412#define __RME_PCIEID(_v) ((_v) << __RME_PCIEID_SH)
413#define __RME_CTRL_UNUSED00_MK 0x000000fe
414#define __RME_CTRL_UNUSED00_SH 1
415#define __RME_CTRL_UNUSED00(_v) ((_v) << __RME_CTRL_UNUSED00_SH)
416#define __RME_ESIZE 0x00000001
417#define RME_QCTRL_Q1 0x0003806c
418#define __RME_CTRL_UNUSED11_MK 0x00000c00
419#define __RME_CTRL_UNUSED11_SH 10
420#define __RME_CTRL_UNUSED11(_v) ((_v) << __RME_CTRL_UNUSED11_SH)
421#define __RME_CTRL_UNUSED01_MK 0x000000fe
422#define __RME_CTRL_UNUSED01_SH 1
423#define __RME_CTRL_UNUSED01(_v) ((_v) << __RME_CTRL_UNUSED01_SH)
424#define PSS_CTL_REG 0x00018800
425#define __PSS_I2C_CLK_DIV_MK 0x007f0000
426#define __PSS_I2C_CLK_DIV_SH 16
427#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH)
428#define __PSS_LMEM_INIT_DONE 0x00001000
429#define __PSS_LMEM_RESET 0x00000200
430#define __PSS_LMEM_INIT_EN 0x00000100
431#define __PSS_LPU1_RESET 0x00000002
432#define __PSS_LPU0_RESET 0x00000001
433#define PSS_ERR_STATUS_REG 0x00018810
434#define __PSS_LPU1_TCM_READ_ERR 0x00200000
435#define __PSS_LPU0_TCM_READ_ERR 0x00100000
436#define __PSS_LMEM5_CORR_ERR 0x00080000
437#define __PSS_LMEM4_CORR_ERR 0x00040000
438#define __PSS_LMEM3_CORR_ERR 0x00020000
439#define __PSS_LMEM2_CORR_ERR 0x00010000
440#define __PSS_LMEM1_CORR_ERR 0x00008000
441#define __PSS_LMEM0_CORR_ERR 0x00004000
442#define __PSS_LMEM5_UNCORR_ERR 0x00002000
443#define __PSS_LMEM4_UNCORR_ERR 0x00001000
444#define __PSS_LMEM3_UNCORR_ERR 0x00000800
445#define __PSS_LMEM2_UNCORR_ERR 0x00000400
446#define __PSS_LMEM1_UNCORR_ERR 0x00000200
447#define __PSS_LMEM0_UNCORR_ERR 0x00000100
448#define __PSS_BAL_PERR 0x00000080
449#define __PSS_DIP_IF_ERR 0x00000040
450#define __PSS_IOH_IF_ERR 0x00000020
451#define __PSS_TDS_IF_ERR 0x00000010
452#define __PSS_RDS_IF_ERR 0x00000008
453#define __PSS_SGM_IF_ERR 0x00000004
454#define __PSS_LPU1_RAM_ERR 0x00000002
455#define __PSS_LPU0_RAM_ERR 0x00000001
456#define ERR_SET_REG 0x00018818
457#define __PSS_ERR_STATUS_SET 0x003fffff
458#define PMM_1T_RESET_REG_P0 0x0002381c
459#define __PMM_1T_RESET_P 0x00000001
460#define PMM_1T_RESET_REG_P1 0x00023c1c
461#define HQM_QSET0_RXQ_DRBL_P0 0x00038000
462#define __RXQ0_ADD_VECTORS_P 0x80000000
463#define __RXQ0_STOP_P 0x40000000
464#define __RXQ0_PRD_PTR_P 0x0000ffff
465#define HQM_QSET1_RXQ_DRBL_P0 0x00038080
466#define __RXQ1_ADD_VECTORS_P 0x80000000
467#define __RXQ1_STOP_P 0x40000000
468#define __RXQ1_PRD_PTR_P 0x0000ffff
469#define HQM_QSET0_RXQ_DRBL_P1 0x0003c000
470#define HQM_QSET1_RXQ_DRBL_P1 0x0003c080
471#define HQM_QSET0_TXQ_DRBL_P0 0x00038020
472#define __TXQ0_ADD_VECTORS_P 0x80000000
473#define __TXQ0_STOP_P 0x40000000
474#define __TXQ0_PRD_PTR_P 0x0000ffff
475#define HQM_QSET1_TXQ_DRBL_P0 0x000380a0
476#define __TXQ1_ADD_VECTORS_P 0x80000000
477#define __TXQ1_STOP_P 0x40000000
478#define __TXQ1_PRD_PTR_P 0x0000ffff
479#define HQM_QSET0_TXQ_DRBL_P1 0x0003c020
480#define HQM_QSET1_TXQ_DRBL_P1 0x0003c0a0
481#define HQM_QSET0_IB_DRBL_1_P0 0x00038040
482#define __IB1_0_ACK_P 0x80000000
483#define __IB1_0_DISABLE_P 0x40000000
484#define __IB1_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff
485#define HQM_QSET1_IB_DRBL_1_P0 0x000380c0
486#define __IB1_1_ACK_P 0x80000000
487#define __IB1_1_DISABLE_P 0x40000000
488#define __IB1_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff
489#define HQM_QSET0_IB_DRBL_1_P1 0x0003c040
490#define HQM_QSET1_IB_DRBL_1_P1 0x0003c0c0
491#define HQM_QSET0_IB_DRBL_2_P0 0x00038060
492#define __IB2_0_ACK_P 0x80000000
493#define __IB2_0_DISABLE_P 0x40000000
494#define __IB2_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff
495#define HQM_QSET1_IB_DRBL_2_P0 0x000380e0
496#define __IB2_1_ACK_P 0x80000000
497#define __IB2_1_DISABLE_P 0x40000000
498#define __IB2_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff
499#define HQM_QSET0_IB_DRBL_2_P1 0x0003c060
500#define HQM_QSET1_IB_DRBL_2_P1 0x0003c0e0
501
502
503/*
504 * These definitions are either in error/missing in spec. Its auto-generated
505 * from hard coded values in regparse.pl.
506 */
507#define __EMPHPOST_AT_4G_MK_FIX 0x0000001c
508#define __EMPHPOST_AT_4G_SH_FIX 0x00000002
509#define __EMPHPRE_AT_4G_FIX 0x00000003
510#define __SFP_TXRATE_EN_FIX 0x00000100
511#define __SFP_RXRATE_EN_FIX 0x00000080
512
513
514/*
515 * These register definitions are auto-generated from hard coded values
516 * in regparse.pl.
517 */
518
519
520/*
521 * These register mapping definitions are auto-generated from mapping tables
522 * in regparse.pl.
523 */
524#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG
525#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG
526#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
527#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
528#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
529
530#define CPE_DEPTH_Q(__n) \
531 (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0))
532#define CPE_QCTRL_Q(__n) \
533 (CPE_QCTRL_Q0 + (__n) * (CPE_QCTRL_Q1 - CPE_QCTRL_Q0))
534#define CPE_PI_PTR_Q(__n) \
535 (CPE_PI_PTR_Q0 + (__n) * (CPE_PI_PTR_Q1 - CPE_PI_PTR_Q0))
536#define CPE_CI_PTR_Q(__n) \
537 (CPE_CI_PTR_Q0 + (__n) * (CPE_CI_PTR_Q1 - CPE_CI_PTR_Q0))
538#define RME_DEPTH_Q(__n) \
539 (RME_DEPTH_Q0 + (__n) * (RME_DEPTH_Q1 - RME_DEPTH_Q0))
540#define RME_QCTRL_Q(__n) \
541 (RME_QCTRL_Q0 + (__n) * (RME_QCTRL_Q1 - RME_QCTRL_Q0))
542#define RME_PI_PTR_Q(__n) \
543 (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0))
544#define RME_CI_PTR_Q(__n) \
545 (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0))
546#define HQM_QSET_RXQ_DRBL_P0(__n) \
547 (HQM_QSET0_RXQ_DRBL_P0 + (__n) * (HQM_QSET1_RXQ_DRBL_P0 - \
548 HQM_QSET0_RXQ_DRBL_P0))
549#define HQM_QSET_TXQ_DRBL_P0(__n) \
550 (HQM_QSET0_TXQ_DRBL_P0 + (__n) * (HQM_QSET1_TXQ_DRBL_P0 - \
551 HQM_QSET0_TXQ_DRBL_P0))
552#define HQM_QSET_IB_DRBL_1_P0(__n) \
553 (HQM_QSET0_IB_DRBL_1_P0 + (__n) * (HQM_QSET1_IB_DRBL_1_P0 - \
554 HQM_QSET0_IB_DRBL_1_P0))
555#define HQM_QSET_IB_DRBL_2_P0(__n) \
556 (HQM_QSET0_IB_DRBL_2_P0 + (__n) * (HQM_QSET1_IB_DRBL_2_P0 - \
557 HQM_QSET0_IB_DRBL_2_P0))
558#define HQM_QSET_RXQ_DRBL_P1(__n) \
559 (HQM_QSET0_RXQ_DRBL_P1 + (__n) * (HQM_QSET1_RXQ_DRBL_P1 - \
560 HQM_QSET0_RXQ_DRBL_P1))
561#define HQM_QSET_TXQ_DRBL_P1(__n) \
562 (HQM_QSET0_TXQ_DRBL_P1 + (__n) * (HQM_QSET1_TXQ_DRBL_P1 - \
563 HQM_QSET0_TXQ_DRBL_P1))
564#define HQM_QSET_IB_DRBL_1_P1(__n) \
565 (HQM_QSET0_IB_DRBL_1_P1 + (__n) * (HQM_QSET1_IB_DRBL_1_P1 - \
566 HQM_QSET0_IB_DRBL_1_P1))
567#define HQM_QSET_IB_DRBL_2_P1(__n) \
568 (HQM_QSET0_IB_DRBL_2_P1 + (__n) * (HQM_QSET1_IB_DRBL_2_P1 - \
569 HQM_QSET0_IB_DRBL_2_P1))
570
571#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
572#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
573#define CPE_Q_MASK(__q) ((__q) & 0x3)
574#define RME_Q_MASK(__q) ((__q) & 0x3)
575
576
577/*
578 * PCI MSI-X vector defines
579 */
580enum {
581 BFA_MSIX_CPE_Q0 = 0,
582 BFA_MSIX_CPE_Q1 = 1,
583 BFA_MSIX_CPE_Q2 = 2,
584 BFA_MSIX_CPE_Q3 = 3,
585 BFA_MSIX_RME_Q0 = 4,
586 BFA_MSIX_RME_Q1 = 5,
587 BFA_MSIX_RME_Q2 = 6,
588 BFA_MSIX_RME_Q3 = 7,
589 BFA_MSIX_LPU_ERR = 8,
590 BFA_MSIX_CT_MAX = 9,
591};
592
593/*
594 * And corresponding host interrupt status bit field defines
595 */
596#define __HFN_INT_CPE_Q0 0x00000001U
597#define __HFN_INT_CPE_Q1 0x00000002U
598#define __HFN_INT_CPE_Q2 0x00000004U
599#define __HFN_INT_CPE_Q3 0x00000008U
600#define __HFN_INT_CPE_Q4 0x00000010U
601#define __HFN_INT_CPE_Q5 0x00000020U
602#define __HFN_INT_CPE_Q6 0x00000040U
603#define __HFN_INT_CPE_Q7 0x00000080U
604#define __HFN_INT_RME_Q0 0x00000100U
605#define __HFN_INT_RME_Q1 0x00000200U
606#define __HFN_INT_RME_Q2 0x00000400U
607#define __HFN_INT_RME_Q3 0x00000800U
608#define __HFN_INT_RME_Q4 0x00001000U
609#define __HFN_INT_RME_Q5 0x00002000U
610#define __HFN_INT_RME_Q6 0x00004000U
611#define __HFN_INT_RME_Q7 0x00008000U
612#define __HFN_INT_ERR_EMC 0x00010000U
613#define __HFN_INT_ERR_LPU0 0x00020000U
614#define __HFN_INT_ERR_LPU1 0x00040000U
615#define __HFN_INT_ERR_PSS 0x00080000U
616#define __HFN_INT_MBOX_LPU0 0x00100000U
617#define __HFN_INT_MBOX_LPU1 0x00200000U
618#define __HFN_INT_MBOX1_LPU0 0x00400000U
619#define __HFN_INT_MBOX1_LPU1 0x00800000U
620#define __HFN_INT_LL_HALT 0x01000000U
621#define __HFN_INT_CPE_MASK 0x000000ffU
622#define __HFN_INT_RME_MASK 0x0000ff00U
623
624
625/*
626 * catapult memory map.
627 */
628#define LL_PGN_HQM0 0x0096
629#define LL_PGN_HQM1 0x0097
630#define PSS_SMEM_PAGE_START 0x8000
631#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15))
632#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff)
633
634/*
635 * End of catapult memory map
636 */
637
638
639#endif /* __BFI_CTREG_H__ */
640
diff --git a/drivers/scsi/bfa/include/bfi/bfi_fabric.h b/drivers/scsi/bfa/include/bfi/bfi_fabric.h
deleted file mode 100644
index c0669ed41078..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_fabric.h
+++ /dev/null
@@ -1,92 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_FABRIC_H__
19#define __BFI_FABRIC_H__
20
21#include <bfi/bfi.h>
22
23#pragma pack(1)
24
25enum bfi_fabric_h2i_msgs {
26 BFI_FABRIC_H2I_CREATE_REQ = 1,
27 BFI_FABRIC_H2I_DELETE_REQ = 2,
28 BFI_FABRIC_H2I_SETAUTH = 3,
29};
30
31enum bfi_fabric_i2h_msgs {
32 BFI_FABRIC_I2H_CREATE_RSP = BFA_I2HM(1),
33 BFI_FABRIC_I2H_DELETE_RSP = BFA_I2HM(2),
34 BFI_FABRIC_I2H_SETAUTH_RSP = BFA_I2HM(3),
35 BFI_FABRIC_I2H_ONLINE = BFA_I2HM(4),
36 BFI_FABRIC_I2H_OFFLINE = BFA_I2HM(5),
37};
38
39struct bfi_fabric_create_req_s {
40 bfi_mhdr_t mh; /* common msg header */
41 u8 vf_en; /* virtual fabric enable */
42 u8 rsvd;
43 u16 vf_id; /* virtual fabric ID */
44 wwn_t pwwn; /* port name */
45 wwn_t nwwn; /* node name */
46};
47
48struct bfi_fabric_create_rsp_s {
49 bfi_mhdr_t mh; /* common msg header */
50 u16 bfa_handle; /* host fabric handle */
51 u8 status; /* fabric create status */
52 u8 rsvd;
53};
54
55struct bfi_fabric_delete_req_s {
56 bfi_mhdr_t mh; /* common msg header */
57 u16 fw_handle; /* firmware fabric handle */
58 u16 rsvd;
59};
60
61struct bfi_fabric_delete_rsp_s {
62 bfi_mhdr_t mh; /* common msg header */
63 u16 bfa_handle; /* host fabric handle */
64 u8 status; /* fabric deletion status */
65 u8 rsvd;
66};
67
68#define BFI_FABRIC_AUTHSECRET_LEN 64
69struct bfi_fabric_setauth_req_s {
70 bfi_mhdr_t mh; /* common msg header */
71 u16 fw_handle; /* f/w handle of fabric */
72 u8 algorithm;
73 u8 group;
74 u8 secret[BFI_FABRIC_AUTHSECRET_LEN];
75};
76
77union bfi_fabric_h2i_msg_u {
78 bfi_msg_t *msg;
79 struct bfi_fabric_create_req_s *create_req;
80 struct bfi_fabric_delete_req_s *delete_req;
81};
82
83union bfi_fabric_i2h_msg_u {
84 bfi_msg_t *msg;
85 struct bfi_fabric_create_rsp_s *create_rsp;
86 struct bfi_fabric_delete_rsp_s *delete_rsp;
87};
88
89#pragma pack()
90
91#endif /* __BFI_FABRIC_H__ */
92
diff --git a/drivers/scsi/bfa/include/bfi/bfi_fcpim.h b/drivers/scsi/bfa/include/bfi/bfi_fcpim.h
deleted file mode 100644
index 52c059fb4c3a..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_fcpim.h
+++ /dev/null
@@ -1,301 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_FCPIM_H__
19#define __BFI_FCPIM_H__
20
21#include "bfi.h"
22#include <protocol/fcp.h>
23
24#pragma pack(1)
25
26/*
27 * Initiator mode I-T nexus interface defines.
28 */
29
30enum bfi_itnim_h2i {
31 BFI_ITNIM_H2I_CREATE_REQ = 1, /* i-t nexus creation */
32 BFI_ITNIM_H2I_DELETE_REQ = 2, /* i-t nexus deletion */
33};
34
35enum bfi_itnim_i2h {
36 BFI_ITNIM_I2H_CREATE_RSP = BFA_I2HM(1),
37 BFI_ITNIM_I2H_DELETE_RSP = BFA_I2HM(2),
38 BFI_ITNIM_I2H_SLER_EVENT = BFA_I2HM(3),
39};
40
41struct bfi_itnim_create_req_s {
42 struct bfi_mhdr_s mh; /* common msg header */
43 u16 fw_handle; /* f/w handle for itnim */
44 u8 class; /* FC class for IO */
45 u8 seq_rec; /* sequence recovery support */
46 u8 msg_no; /* seq id of the msg */
47};
48
49struct bfi_itnim_create_rsp_s {
50 struct bfi_mhdr_s mh; /* common msg header */
51 u16 bfa_handle; /* bfa handle for itnim */
52 u8 status; /* fcp request status */
53 u8 seq_id; /* seq id of the msg */
54};
55
56struct bfi_itnim_delete_req_s {
57 struct bfi_mhdr_s mh; /* common msg header */
58 u16 fw_handle; /* f/w itnim handle */
59 u8 seq_id; /* seq id of the msg */
60 u8 rsvd;
61};
62
63struct bfi_itnim_delete_rsp_s {
64 struct bfi_mhdr_s mh; /* common msg header */
65 u16 bfa_handle; /* bfa handle for itnim */
66 u8 status; /* fcp request status */
67 u8 seq_id; /* seq id of the msg */
68};
69
70struct bfi_itnim_sler_event_s {
71 struct bfi_mhdr_s mh; /* common msg header */
72 u16 bfa_handle; /* bfa handle for itnim */
73 u16 rsvd;
74};
75
76union bfi_itnim_h2i_msg_u {
77 struct bfi_itnim_create_req_s *create_req;
78 struct bfi_itnim_delete_req_s *delete_req;
79 struct bfi_msg_s *msg;
80};
81
82union bfi_itnim_i2h_msg_u {
83 struct bfi_itnim_create_rsp_s *create_rsp;
84 struct bfi_itnim_delete_rsp_s *delete_rsp;
85 struct bfi_itnim_sler_event_s *sler_event;
86 struct bfi_msg_s *msg;
87};
88
89/*
90 * Initiator mode IO interface defines.
91 */
92
93enum bfi_ioim_h2i {
94 BFI_IOIM_H2I_IOABORT_REQ = 1, /* IO abort request */
95 BFI_IOIM_H2I_IOCLEANUP_REQ = 2, /* IO cleanup request */
96};
97
98enum bfi_ioim_i2h {
99 BFI_IOIM_I2H_IO_RSP = BFA_I2HM(1), /* non-fp IO response */
100 BFI_IOIM_I2H_IOABORT_RSP = BFA_I2HM(2),/* ABORT rsp */
101};
102
103/**
104 * IO command DIF info
105 */
106struct bfi_ioim_dif_s {
107 u32 dif_info[4];
108};
109
110/**
111 * FCP IO messages overview
112 *
113 * @note
114 * - Max CDB length supported is 64 bytes.
115 * - SCSI Linked commands and SCSI bi-directional Commands not
116 * supported.
117 *
118 */
119struct bfi_ioim_req_s {
120 struct bfi_mhdr_s mh; /* Common msg header */
121 u16 io_tag; /* I/O tag */
122 u16 rport_hdl; /* itnim/rport firmware handle */
123 struct fcp_cmnd_s cmnd; /* IO request info */
124
125 /**
126 * SG elements array within the IO request must be double word
127 * aligned. This aligment is required to optimize SGM setup for the IO.
128 */
129 struct bfi_sge_s sges[BFI_SGE_INLINE_MAX];
130 u8 io_timeout;
131 u8 dif_en;
132 u8 rsvd_a[2];
133 struct bfi_ioim_dif_s dif;
134};
135
136/**
137 * This table shows various IO status codes from firmware and their
138 * meaning. Host driver can use these status codes to further process
139 * IO completions.
140 *
141 * BFI_IOIM_STS_OK : IO completed with error free SCSI &
142 * transport status.
143 * - io-tag can be reused.
144 *
145 * BFA_IOIM_STS_SCSI_ERR : IO completed with scsi error.
146 * - io-tag can be reused.
147 *
148 * BFI_IOIM_STS_HOST_ABORTED : IO was aborted successfully due to
149 * host request.
150 * - io-tag cannot be reused yet.
151 *
152 * BFI_IOIM_STS_ABORTED : IO was aborted successfully
153 * internally by f/w.
154 * - io-tag cannot be reused yet.
155 *
156 * BFI_IOIM_STS_TIMEDOUT : IO timedout and ABTS/RRQ is happening
157 * in the firmware and
158 * - io-tag cannot be reused yet.
159 *
160 * BFI_IOIM_STS_SQER_NEEDED : Firmware could not recover the IO
161 * with sequence level error
162 * logic and hence host needs to retry
163 * this IO with a different IO tag
164 * - io-tag cannot be used yet.
165 *
166 * BFI_IOIM_STS_NEXUS_ABORT : Second Level Error Recovery from host
167 * is required because 2 consecutive ABTS
168 * timedout and host needs logout and
169 * re-login with the target
170 * - io-tag cannot be used yet.
171 *
172 * BFI_IOIM_STS_UNDERRUN : IO completed with SCSI status good,
173 * but the data tranferred is less than
174 * the fcp data length in the command.
175 * ex. SCSI INQUIRY where transferred
176 * data length and residue count in FCP
177 * response accounts for total fcp-dl
178 * - io-tag can be reused.
179 *
180 * BFI_IOIM_STS_OVERRUN : IO completed with SCSI status good,
181 * but the data transerred is more than
182 * fcp data length in the command. ex.
183 * TAPE IOs where blocks can of unequal
184 * lengths.
185 * - io-tag can be reused.
186 *
187 * BFI_IOIM_STS_RES_FREE : Firmware has completed using io-tag
188 * during abort process
189 * - io-tag can be reused.
190 *
191 * BFI_IOIM_STS_PROTO_ERR : Firmware detected a protocol error.
192 * ex target sent more data than
193 * requested, or there was data frame
194 * loss and other reasons
195 * - io-tag cannot be used yet.
196 *
197 * BFI_IOIM_STS_DIF_ERR : Firwmare detected DIF error. ex: DIF
198 * CRC err or Ref Tag err or App tag err.
199 * - io-tag can be reused.
200 *
201 * BFA_IOIM_STS_TSK_MGT_ABORT : IO was aborted because of Task
202 * Management command from the host
203 * - io-tag can be reused.
204 *
205 * BFI_IOIM_STS_UTAG : Firmware does not know about this
206 * io_tag.
207 * - io-tag can be reused.
208 */
209enum bfi_ioim_status {
210 BFI_IOIM_STS_OK = 0,
211 BFI_IOIM_STS_HOST_ABORTED = 1,
212 BFI_IOIM_STS_ABORTED = 2,
213 BFI_IOIM_STS_TIMEDOUT = 3,
214 BFI_IOIM_STS_RES_FREE = 4,
215 BFI_IOIM_STS_SQER_NEEDED = 5,
216 BFI_IOIM_STS_PROTO_ERR = 6,
217 BFI_IOIM_STS_UTAG = 7,
218 BFI_IOIM_STS_PATHTOV = 8,
219};
220
221#define BFI_IOIM_SNSLEN (256)
222/**
223 * I/O response message
224 */
225struct bfi_ioim_rsp_s {
226 struct bfi_mhdr_s mh; /* common msg header */
227 u16 io_tag; /* completed IO tag */
228 u16 bfa_rport_hndl; /* releated rport handle */
229 u8 io_status; /* IO completion status */
230 u8 reuse_io_tag; /* IO tag can be reused */
231 u16 abort_tag; /* host abort request tag */
232 u8 scsi_status; /* scsi status from target */
233 u8 sns_len; /* scsi sense length */
234 u8 resid_flags; /* IO residue flags */
235 u8 rsvd_a;
236 u32 residue; /* IO residual length in bytes */
237 u32 rsvd_b[3];
238};
239
240struct bfi_ioim_abort_req_s {
241 struct bfi_mhdr_s mh; /* Common msg header */
242 u16 io_tag; /* I/O tag */
243 u16 abort_tag; /* unique request tag */
244};
245
246/*
247 * Initiator mode task management command interface defines.
248 */
249
250enum bfi_tskim_h2i {
251 BFI_TSKIM_H2I_TM_REQ = 1, /* task-mgmt command */
252 BFI_TSKIM_H2I_ABORT_REQ = 2, /* task-mgmt command */
253};
254
255enum bfi_tskim_i2h {
256 BFI_TSKIM_I2H_TM_RSP = BFA_I2HM(1),
257};
258
259struct bfi_tskim_req_s {
260 struct bfi_mhdr_s mh; /* Common msg header */
261 u16 tsk_tag; /* task management tag */
262 u16 itn_fhdl; /* itn firmware handle */
263 lun_t lun; /* LU number */
264 u8 tm_flags; /* see fcp_tm_cmnd_t */
265 u8 t_secs; /* Timeout value in seconds */
266 u8 rsvd[2];
267};
268
269struct bfi_tskim_abortreq_s {
270 struct bfi_mhdr_s mh; /* Common msg header */
271 u16 tsk_tag; /* task management tag */
272 u16 rsvd;
273};
274
275enum bfi_tskim_status {
276 /*
277 * Following are FCP-4 spec defined status codes,
278 * **DO NOT CHANGE THEM **
279 */
280 BFI_TSKIM_STS_OK = 0,
281 BFI_TSKIM_STS_NOT_SUPP = 4,
282 BFI_TSKIM_STS_FAILED = 5,
283
284 /**
285 * Defined by BFA
286 */
287 BFI_TSKIM_STS_TIMEOUT = 10, /* TM request timedout */
288 BFI_TSKIM_STS_ABORTED = 11, /* Aborted on host request */
289};
290
291struct bfi_tskim_rsp_s {
292 struct bfi_mhdr_s mh; /* Common msg header */
293 u16 tsk_tag; /* task mgmt cmnd tag */
294 u8 tsk_status; /* @ref bfi_tskim_status */
295 u8 rsvd;
296};
297
298#pragma pack()
299
300#endif /* __BFI_FCPIM_H__ */
301
diff --git a/drivers/scsi/bfa/include/bfi/bfi_fcxp.h b/drivers/scsi/bfa/include/bfi/bfi_fcxp.h
deleted file mode 100644
index e0e995a32828..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_fcxp.h
+++ /dev/null
@@ -1,71 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_FCXP_H__
19#define __BFI_FCXP_H__
20
21#include "bfi.h"
22
23#pragma pack(1)
24
25enum bfi_fcxp_h2i {
26 BFI_FCXP_H2I_SEND_REQ = 1,
27};
28
29enum bfi_fcxp_i2h {
30 BFI_FCXP_I2H_SEND_RSP = BFA_I2HM(1),
31};
32
33#define BFA_FCXP_MAX_SGES 2
34
35/**
36 * FCXP send request structure
37 */
38struct bfi_fcxp_send_req_s {
39 struct bfi_mhdr_s mh; /* Common msg header */
40 u16 fcxp_tag; /* driver request tag */
41 u16 max_frmsz; /* max send frame size */
42 u16 vf_id; /* vsan tag if applicable */
43 u16 rport_fw_hndl; /* FW Handle for the remote port */
44 u8 class; /* FC class used for req/rsp */
45 u8 rsp_timeout; /* timeout in secs, 0-no response */
46 u8 cts; /* continue sequence */
47 u8 lp_tag; /* lport tag */
48 struct fchs_s fchs; /* request FC header structure */
49 u32 req_len; /* request payload length */
50 u32 rsp_maxlen; /* max response length expected */
51 struct bfi_sge_s req_sge[BFA_FCXP_MAX_SGES]; /* request buf */
52 struct bfi_sge_s rsp_sge[BFA_FCXP_MAX_SGES]; /* response buf */
53};
54
55/**
56 * FCXP send response structure
57 */
58struct bfi_fcxp_send_rsp_s {
59 struct bfi_mhdr_s mh; /* Common msg header */
60 u16 fcxp_tag; /* send request tag */
61 u8 req_status; /* request status */
62 u8 rsvd;
63 u32 rsp_len; /* actual response length */
64 u32 residue_len; /* residual response length */
65 struct fchs_s fchs; /* response FC header structure */
66};
67
68#pragma pack()
69
70#endif /* __BFI_FCXP_H__ */
71
diff --git a/drivers/scsi/bfa/include/bfi/bfi_ioc.h b/drivers/scsi/bfa/include/bfi/bfi_ioc.h
deleted file mode 100644
index 450ded6e9bc2..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_ioc.h
+++ /dev/null
@@ -1,208 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_IOC_H__
19#define __BFI_IOC_H__
20
21#include "bfi.h"
22#include <defs/bfa_defs_ioc.h>
23
24#pragma pack(1)
25
26enum bfi_ioc_h2i_msgs {
27 BFI_IOC_H2I_ENABLE_REQ = 1,
28 BFI_IOC_H2I_DISABLE_REQ = 2,
29 BFI_IOC_H2I_GETATTR_REQ = 3,
30 BFI_IOC_H2I_DBG_SYNC = 4,
31 BFI_IOC_H2I_DBG_DUMP = 5,
32};
33
34enum bfi_ioc_i2h_msgs {
35 BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1),
36 BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2),
37 BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3),
38 BFI_IOC_I2H_READY_EVENT = BFA_I2HM(4),
39 BFI_IOC_I2H_HBEAT = BFA_I2HM(5),
40};
41
42/**
43 * BFI_IOC_H2I_GETATTR_REQ message
44 */
45struct bfi_ioc_getattr_req_s {
46 struct bfi_mhdr_s mh;
47 union bfi_addr_u attr_addr;
48};
49
50struct bfi_ioc_attr_s {
51 wwn_t mfg_pwwn; /* Mfg port wwn */
52 wwn_t mfg_nwwn; /* Mfg node wwn */
53 mac_t mfg_mac; /* Mfg mac */
54 u16 rsvd_a;
55 wwn_t pwwn;
56 wwn_t nwwn;
57 mac_t mac; /* PBC or Mfg mac */
58 u16 rsvd_b;
59 char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
60 u8 pcie_gen;
61 u8 pcie_lanes_orig;
62 u8 pcie_lanes;
63 u8 rx_bbcredit; /* receive buffer credits */
64 u32 adapter_prop; /* adapter properties */
65 u16 maxfrsize; /* max receive frame size */
66 char asic_rev;
67 u8 rsvd_c;
68 char fw_version[BFA_VERSION_LEN];
69 char optrom_version[BFA_VERSION_LEN];
70 struct bfa_mfg_vpd_s vpd;
71 u32 card_type; /* card type */
72};
73
74/**
75 * BFI_IOC_I2H_GETATTR_REPLY message
76 */
77struct bfi_ioc_getattr_reply_s {
78 struct bfi_mhdr_s mh; /* Common msg header */
79 u8 status; /* cfg reply status */
80 u8 rsvd[3];
81};
82
83/**
84 * Firmware memory page offsets
85 */
86#define BFI_IOC_SMEM_PG0_CB (0x40)
87#define BFI_IOC_SMEM_PG0_CT (0x180)
88
89/**
90 * Firmware trace offset
91 */
92#define BFI_IOC_TRC_OFF (0x4b00)
93#define BFI_IOC_TRC_ENTS 256
94
95#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
96#define BFI_IOC_MD5SUM_SZ 4
97struct bfi_ioc_image_hdr_s {
98 u32 signature; /* constant signature */
99 u32 rsvd_a;
100 u32 exec; /* exec vector */
101 u32 param; /* parameters */
102 u32 rsvd_b[4];
103 u32 md5sum[BFI_IOC_MD5SUM_SZ];
104};
105
106/**
107 * BFI_IOC_I2H_READY_EVENT message
108 */
109struct bfi_ioc_rdy_event_s {
110 struct bfi_mhdr_s mh; /* common msg header */
111 u8 init_status; /* init event status */
112 u8 rsvd[3];
113};
114
115struct bfi_ioc_hbeat_s {
116 struct bfi_mhdr_s mh; /* common msg header */
117 u32 hb_count; /* current heart beat count */
118};
119
120/**
121 * IOC hardware/firmware state
122 */
123enum bfi_ioc_state {
124 BFI_IOC_UNINIT = 0, /* not initialized */
125 BFI_IOC_INITING = 1, /* h/w is being initialized */
126 BFI_IOC_HWINIT = 2, /* h/w is initialized */
127 BFI_IOC_CFG = 3, /* IOC configuration in progress */
128 BFI_IOC_OP = 4, /* IOC is operational */
129 BFI_IOC_DISABLING = 5, /* IOC is being disabled */
130 BFI_IOC_DISABLED = 6, /* IOC is disabled */
131 BFI_IOC_CFG_DISABLED = 7, /* IOC is being disabled;transient */
132 BFI_IOC_FAIL = 8, /* IOC heart-beat failure */
133 BFI_IOC_MEMTEST = 9, /* IOC is doing memtest */
134};
135
136#define BFI_IOC_ENDIAN_SIG 0x12345678
137
138enum {
139 BFI_ADAPTER_TYPE_FC = 0x01, /* FC adapters */
140 BFI_ADAPTER_TYPE_MK = 0x0f0000, /* adapter type mask */
141 BFI_ADAPTER_TYPE_SH = 16, /* adapter type shift */
142 BFI_ADAPTER_NPORTS_MK = 0xff00, /* number of ports mask */
143 BFI_ADAPTER_NPORTS_SH = 8, /* number of ports shift */
144 BFI_ADAPTER_SPEED_MK = 0xff, /* adapter speed mask */
145 BFI_ADAPTER_SPEED_SH = 0, /* adapter speed shift */
146 BFI_ADAPTER_PROTO = 0x100000, /* prototype adapaters */
147 BFI_ADAPTER_TTV = 0x200000, /* TTV debug capable */
148 BFI_ADAPTER_UNSUPP = 0x400000, /* unknown adapter type */
149};
150
151#define BFI_ADAPTER_GETP(__prop, __adap_prop) \
152 (((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >> \
153 BFI_ADAPTER_ ## __prop ## _SH)
154#define BFI_ADAPTER_SETP(__prop, __val) \
155 ((__val) << BFI_ADAPTER_ ## __prop ## _SH)
156#define BFI_ADAPTER_IS_PROTO(__adap_type) \
157 ((__adap_type) & BFI_ADAPTER_PROTO)
158#define BFI_ADAPTER_IS_TTV(__adap_type) \
159 ((__adap_type) & BFI_ADAPTER_TTV)
160#define BFI_ADAPTER_IS_UNSUPP(__adap_type) \
161 ((__adap_type) & BFI_ADAPTER_UNSUPP)
162#define BFI_ADAPTER_IS_SPECIAL(__adap_type) \
163 ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
164 BFI_ADAPTER_UNSUPP))
165
166/**
167 * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
168 */
169struct bfi_ioc_ctrl_req_s {
170 struct bfi_mhdr_s mh;
171 u8 ioc_class;
172 u8 rsvd[3];
173};
174
175/**
176 * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
177 */
178struct bfi_ioc_ctrl_reply_s {
179 struct bfi_mhdr_s mh; /* Common msg header */
180 u8 status; /* enable/disable status */
181 u8 rsvd[3];
182};
183
184#define BFI_IOC_MSGSZ 8
185/**
186 * H2I Messages
187 */
188union bfi_ioc_h2i_msg_u {
189 struct bfi_mhdr_s mh;
190 struct bfi_ioc_ctrl_req_s enable_req;
191 struct bfi_ioc_ctrl_req_s disable_req;
192 struct bfi_ioc_getattr_req_s getattr_req;
193 u32 mboxmsg[BFI_IOC_MSGSZ];
194};
195
196/**
197 * I2H Messages
198 */
199union bfi_ioc_i2h_msg_u {
200 struct bfi_mhdr_s mh;
201 struct bfi_ioc_rdy_event_s rdy_event;
202 u32 mboxmsg[BFI_IOC_MSGSZ];
203};
204
205#pragma pack()
206
207#endif /* __BFI_IOC_H__ */
208
diff --git a/drivers/scsi/bfa/include/bfi/bfi_iocfc.h b/drivers/scsi/bfa/include/bfi/bfi_iocfc.h
deleted file mode 100644
index ccdfcc5d7e0b..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_iocfc.h
+++ /dev/null
@@ -1,179 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_IOCFC_H__
19#define __BFI_IOCFC_H__
20
21#include "bfi.h"
22#include <bfi/bfi_pbc.h>
23#include <defs/bfa_defs_ioc.h>
24#include <defs/bfa_defs_iocfc.h>
25#include <defs/bfa_defs_boot.h>
26
27#pragma pack(1)
28
29enum bfi_iocfc_h2i_msgs {
30 BFI_IOCFC_H2I_CFG_REQ = 1,
31 BFI_IOCFC_H2I_GET_STATS_REQ = 2,
32 BFI_IOCFC_H2I_CLEAR_STATS_REQ = 3,
33 BFI_IOCFC_H2I_SET_INTR_REQ = 4,
34 BFI_IOCFC_H2I_UPDATEQ_REQ = 5,
35};
36
37enum bfi_iocfc_i2h_msgs {
38 BFI_IOCFC_I2H_CFG_REPLY = BFA_I2HM(1),
39 BFI_IOCFC_I2H_GET_STATS_RSP = BFA_I2HM(2),
40 BFI_IOCFC_I2H_CLEAR_STATS_RSP = BFA_I2HM(3),
41 BFI_IOCFC_I2H_UPDATEQ_RSP = BFA_I2HM(5),
42};
43
44struct bfi_iocfc_cfg_s {
45 u8 num_cqs; /* Number of CQs to be used */
46 u8 sense_buf_len; /* SCSI sense length */
47 u8 trunk_enabled; /* port trunking enabled */
48 u8 trunk_ports; /* trunk ports bit map */
49 u32 endian_sig; /* endian signature of host */
50
51 /**
52 * Request and response circular queue base addresses, size and
53 * shadow index pointers.
54 */
55 union bfi_addr_u req_cq_ba[BFI_IOC_MAX_CQS];
56 union bfi_addr_u req_shadow_ci[BFI_IOC_MAX_CQS];
57 u16 req_cq_elems[BFI_IOC_MAX_CQS];
58 union bfi_addr_u rsp_cq_ba[BFI_IOC_MAX_CQS];
59 union bfi_addr_u rsp_shadow_pi[BFI_IOC_MAX_CQS];
60 u16 rsp_cq_elems[BFI_IOC_MAX_CQS];
61
62 union bfi_addr_u stats_addr; /* DMA-able address for stats */
63 union bfi_addr_u cfgrsp_addr; /* config response dma address */
64 union bfi_addr_u ioim_snsbase; /* IO sense buffer base address */
65 struct bfa_iocfc_intr_attr_s intr_attr; /* IOC interrupt attributes */
66};
67
68/**
69 * Boot target wwn information for this port. This contains either the stored
70 * or discovered boot target port wwns for the port.
71 */
72struct bfi_iocfc_bootwwns {
73 wwn_t wwn[BFA_BOOT_BOOTLUN_MAX];
74 u8 nwwns;
75 u8 rsvd[7];
76};
77
78struct bfi_iocfc_cfgrsp_s {
79 struct bfa_iocfc_fwcfg_s fwcfg;
80 struct bfa_iocfc_intr_attr_s intr_attr;
81 struct bfi_iocfc_bootwwns bootwwns;
82 struct bfi_pbc_s pbc_cfg;
83};
84
85/**
86 * BFI_IOCFC_H2I_CFG_REQ message
87 */
88struct bfi_iocfc_cfg_req_s {
89 struct bfi_mhdr_s mh;
90 union bfi_addr_u ioc_cfg_dma_addr;
91};
92
93/**
94 * BFI_IOCFC_I2H_CFG_REPLY message
95 */
96struct bfi_iocfc_cfg_reply_s {
97 struct bfi_mhdr_s mh; /* Common msg header */
98 u8 cfg_success; /* cfg reply status */
99 u8 lpu_bm; /* LPUs assigned for this IOC */
100 u8 rsvd[2];
101};
102
103/**
104 * BFI_IOCFC_H2I_GET_STATS_REQ & BFI_IOCFC_H2I_CLEAR_STATS_REQ messages
105 */
106struct bfi_iocfc_stats_req_s {
107 struct bfi_mhdr_s mh; /* msg header */
108 u32 msgtag; /* msgtag for reply */
109};
110
111/**
112 * BFI_IOCFC_I2H_GET_STATS_RSP & BFI_IOCFC_I2H_CLEAR_STATS_RSP messages
113 */
114struct bfi_iocfc_stats_rsp_s {
115 struct bfi_mhdr_s mh; /* common msg header */
116 u8 status; /* reply status */
117 u8 rsvd[3];
118 u32 msgtag; /* msgtag for reply */
119};
120
121/**
122 * BFI_IOCFC_H2I_SET_INTR_REQ message
123 */
124struct bfi_iocfc_set_intr_req_s {
125 struct bfi_mhdr_s mh; /* common msg header */
126 u8 coalesce; /* enable intr coalescing*/
127 u8 rsvd[3];
128 u16 delay; /* delay timer 0..1125us */
129 u16 latency; /* latency timer 0..225us */
130};
131
132/**
133 * BFI_IOCFC_H2I_UPDATEQ_REQ message
134 */
135struct bfi_iocfc_updateq_req_s {
136 struct bfi_mhdr_s mh; /* common msg header */
137 u32 reqq_ba; /* reqq base addr */
138 u32 rspq_ba; /* rspq base addr */
139 u32 reqq_sci; /* reqq shadow ci */
140 u32 rspq_spi; /* rspq shadow pi */
141};
142
143/**
144 * BFI_IOCFC_I2H_UPDATEQ_RSP message
145 */
146struct bfi_iocfc_updateq_rsp_s {
147 struct bfi_mhdr_s mh; /* common msg header */
148 u8 status; /* updateq status */
149 u8 rsvd[3];
150};
151
152/**
153 * H2I Messages
154 */
155union bfi_iocfc_h2i_msg_u {
156 struct bfi_mhdr_s mh;
157 struct bfi_iocfc_cfg_req_s cfg_req;
158 struct bfi_iocfc_stats_req_s stats_get;
159 struct bfi_iocfc_stats_req_s stats_clr;
160 struct bfi_iocfc_updateq_req_s updateq_req;
161 u32 mboxmsg[BFI_IOC_MSGSZ];
162};
163
164/**
165 * I2H Messages
166 */
167union bfi_iocfc_i2h_msg_u {
168 struct bfi_mhdr_s mh;
169 struct bfi_iocfc_cfg_reply_s cfg_reply;
170 struct bfi_iocfc_stats_rsp_s stats_get_rsp;
171 struct bfi_iocfc_stats_rsp_s stats_clr_rsp;
172 struct bfi_iocfc_updateq_rsp_s updateq_rsp;
173 u32 mboxmsg[BFI_IOC_MSGSZ];
174};
175
176#pragma pack()
177
178#endif /* __BFI_IOCFC_H__ */
179
diff --git a/drivers/scsi/bfa/include/bfi/bfi_lport.h b/drivers/scsi/bfa/include/bfi/bfi_lport.h
deleted file mode 100644
index 29010614bac9..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_lport.h
+++ /dev/null
@@ -1,89 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_LPORT_H__
19#define __BFI_LPORT_H__
20
21#include <bfi/bfi.h>
22
23#pragma pack(1)
24
25enum bfi_lport_h2i_msgs {
26 BFI_LPORT_H2I_CREATE_REQ = 1,
27 BFI_LPORT_H2I_DELETE_REQ = 2,
28};
29
30enum bfi_lport_i2h_msgs {
31 BFI_LPORT_I2H_CREATE_RSP = BFA_I2HM(1),
32 BFI_LPORT_I2H_DELETE_RSP = BFA_I2HM(2),
33 BFI_LPORT_I2H_ONLINE = BFA_I2HM(3),
34 BFI_LPORT_I2H_OFFLINE = BFA_I2HM(4),
35};
36
37#define BFI_LPORT_MAX_SYNNAME 64
38
39enum bfi_lport_role_e {
40 BFI_LPORT_ROLE_FCPIM = 1,
41 BFI_LPORT_ROLE_FCPTM = 2,
42 BFI_LPORT_ROLE_IPFC = 4,
43};
44
45struct bfi_lport_create_req_s {
46 bfi_mhdr_t mh; /* common msg header */
47 u16 fabric_fwhdl; /* parent fabric instance */
48 u8 roles; /* lport FC-4 roles */
49 u8 rsvd;
50 wwn_t pwwn; /* port name */
51 wwn_t nwwn; /* node name */
52 u8 symname[BFI_LPORT_MAX_SYNNAME];
53};
54
55struct bfi_lport_create_rsp_s {
56 bfi_mhdr_t mh; /* common msg header */
57 u8 status; /* lport creation status */
58 u8 rsvd[3];
59};
60
61struct bfi_lport_delete_req_s {
62 bfi_mhdr_t mh; /* common msg header */
63 u16 fw_handle; /* firmware lport handle */
64 u16 rsvd;
65};
66
67struct bfi_lport_delete_rsp_s {
68 bfi_mhdr_t mh; /* common msg header */
69 u16 bfa_handle; /* host lport handle */
70 u8 status; /* lport deletion status */
71 u8 rsvd;
72};
73
74union bfi_lport_h2i_msg_u {
75 bfi_msg_t *msg;
76 struct bfi_lport_create_req_s *create_req;
77 struct bfi_lport_delete_req_s *delete_req;
78};
79
80union bfi_lport_i2h_msg_u {
81 bfi_msg_t *msg;
82 struct bfi_lport_create_rsp_s *create_rsp;
83 struct bfi_lport_delete_rsp_s *delete_rsp;
84};
85
86#pragma pack()
87
88#endif /* __BFI_LPORT_H__ */
89
diff --git a/drivers/scsi/bfa/include/bfi/bfi_lps.h b/drivers/scsi/bfa/include/bfi/bfi_lps.h
deleted file mode 100644
index 7ed31bbb8696..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_lps.h
+++ /dev/null
@@ -1,104 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_LPS_H__
19#define __BFI_LPS_H__
20
21#include <bfi/bfi.h>
22
23#pragma pack(1)
24
25enum bfi_lps_h2i_msgs {
26 BFI_LPS_H2I_LOGIN_REQ = 1,
27 BFI_LPS_H2I_LOGOUT_REQ = 2,
28};
29
30enum bfi_lps_i2h_msgs {
31 BFI_LPS_H2I_LOGIN_RSP = BFA_I2HM(1),
32 BFI_LPS_H2I_LOGOUT_RSP = BFA_I2HM(2),
33 BFI_LPS_H2I_CVL_EVENT = BFA_I2HM(3),
34};
35
36struct bfi_lps_login_req_s {
37 struct bfi_mhdr_s mh; /* common msg header */
38 u8 lp_tag;
39 u8 alpa;
40 u16 pdu_size;
41 wwn_t pwwn;
42 wwn_t nwwn;
43 u8 fdisc;
44 u8 auth_en;
45 u8 rsvd[2];
46};
47
48struct bfi_lps_login_rsp_s {
49 struct bfi_mhdr_s mh; /* common msg header */
50 u8 lp_tag;
51 u8 status;
52 u8 lsrjt_rsn;
53 u8 lsrjt_expl;
54 wwn_t port_name;
55 wwn_t node_name;
56 u16 bb_credit;
57 u8 f_port;
58 u8 npiv_en;
59 u32 lp_pid:24;
60 u32 auth_req:8;
61 mac_t lp_mac;
62 mac_t fcf_mac;
63 u8 ext_status;
64 u8 brcd_switch;/* attached peer is brcd switch */
65};
66
67struct bfi_lps_logout_req_s {
68 struct bfi_mhdr_s mh; /* common msg header */
69 u8 lp_tag;
70 u8 rsvd[3];
71 wwn_t port_name;
72};
73
74struct bfi_lps_logout_rsp_s {
75 struct bfi_mhdr_s mh; /* common msg header */
76 u8 lp_tag;
77 u8 status;
78 u8 rsvd[2];
79};
80
81struct bfi_lps_cvl_event_s {
82 struct bfi_mhdr_s mh; /* common msg header */
83 u8 lp_tag;
84 u8 rsvd[3];
85};
86
87union bfi_lps_h2i_msg_u {
88 struct bfi_mhdr_s *msg;
89 struct bfi_lps_login_req_s *login_req;
90 struct bfi_lps_logout_req_s *logout_req;
91};
92
93union bfi_lps_i2h_msg_u {
94 struct bfi_msg_s *msg;
95 struct bfi_lps_login_rsp_s *login_rsp;
96 struct bfi_lps_logout_rsp_s *logout_rsp;
97 struct bfi_lps_cvl_event_s *cvl_event;
98};
99
100#pragma pack()
101
102#endif /* __BFI_LPS_H__ */
103
104
diff --git a/drivers/scsi/bfa/include/bfi/bfi_pbc.h b/drivers/scsi/bfa/include/bfi/bfi_pbc.h
deleted file mode 100644
index 88a4154c30c0..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_pbc.h
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_PBC_H__
19#define __BFI_PBC_H__
20
21#pragma pack(1)
22
23#define BFI_PBC_MAX_BLUNS 8
24#define BFI_PBC_MAX_VPORTS 16
25
26#define BFI_PBC_PORT_DISABLED 2
27/**
28 * PBC boot lun configuration
29 */
30struct bfi_pbc_blun_s {
31 wwn_t tgt_pwwn;
32 lun_t tgt_lun;
33};
34
35/**
36 * PBC virtual port configuration
37 */
38struct bfi_pbc_vport_s {
39 wwn_t vp_pwwn;
40 wwn_t vp_nwwn;
41};
42
43/**
44 * BFI pre-boot configuration information
45 */
46struct bfi_pbc_s {
47 u8 port_enabled;
48 u8 boot_enabled;
49 u8 nbluns;
50 u8 nvports;
51 u8 port_speed;
52 u8 rsvd_a;
53 u16 hss;
54 wwn_t pbc_pwwn;
55 wwn_t pbc_nwwn;
56 struct bfi_pbc_blun_s blun[BFI_PBC_MAX_BLUNS];
57 struct bfi_pbc_vport_s vport[BFI_PBC_MAX_VPORTS];
58};
59
60#pragma pack()
61
62#endif /* __BFI_PBC_H__ */
diff --git a/drivers/scsi/bfa/include/bfi/bfi_port.h b/drivers/scsi/bfa/include/bfi/bfi_port.h
deleted file mode 100644
index 3ec3bea110ba..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_port.h
+++ /dev/null
@@ -1,115 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFI_PORT_H__
18#define __BFI_PORT_H__
19
20#include <bfi/bfi.h>
21#include <defs/bfa_defs_pport.h>
22
23#pragma pack(1)
24
25enum bfi_port_h2i {
26 BFI_PORT_H2I_ENABLE_REQ = (1),
27 BFI_PORT_H2I_DISABLE_REQ = (2),
28 BFI_PORT_H2I_GET_STATS_REQ = (3),
29 BFI_PORT_H2I_CLEAR_STATS_REQ = (4),
30};
31
32enum bfi_port_i2h {
33 BFI_PORT_I2H_ENABLE_RSP = BFA_I2HM(1),
34 BFI_PORT_I2H_DISABLE_RSP = BFA_I2HM(2),
35 BFI_PORT_I2H_GET_STATS_RSP = BFA_I2HM(3),
36 BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4),
37};
38
39/**
40 * Generic REQ type
41 */
42struct bfi_port_generic_req_s {
43 struct bfi_mhdr_s mh; /* msg header */
44 u32 msgtag; /* msgtag for reply */
45 u32 rsvd;
46};
47
48/**
49 * Generic RSP type
50 */
51struct bfi_port_generic_rsp_s {
52 struct bfi_mhdr_s mh; /* common msg header */
53 u8 status; /* port enable status */
54 u8 rsvd[3];
55 u32 msgtag; /* msgtag for reply */
56};
57
58/**
59 * @todo
60 * BFI_PORT_H2I_ENABLE_REQ
61 */
62
63/**
64 * @todo
65 * BFI_PORT_I2H_ENABLE_RSP
66 */
67
68/**
69 * BFI_PORT_H2I_DISABLE_REQ
70 */
71
72/**
73 * BFI_PORT_I2H_DISABLE_RSP
74 */
75
76/**
77 * BFI_PORT_H2I_GET_STATS_REQ
78 */
79struct bfi_port_get_stats_req_s {
80 struct bfi_mhdr_s mh; /* common msg header */
81 union bfi_addr_u dma_addr;
82};
83
84/**
85 * BFI_PORT_I2H_GET_STATS_RSP
86 */
87
88/**
89 * BFI_PORT_H2I_CLEAR_STATS_REQ
90 */
91
92/**
93 * BFI_PORT_I2H_CLEAR_STATS_RSP
94 */
95
96union bfi_port_h2i_msg_u {
97 struct bfi_mhdr_s mh;
98 struct bfi_port_generic_req_s enable_req;
99 struct bfi_port_generic_req_s disable_req;
100 struct bfi_port_get_stats_req_s getstats_req;
101 struct bfi_port_generic_req_s clearstats_req;
102};
103
104union bfi_port_i2h_msg_u {
105 struct bfi_mhdr_s mh;
106 struct bfi_port_generic_rsp_s enable_rsp;
107 struct bfi_port_generic_rsp_s disable_rsp;
108 struct bfi_port_generic_rsp_s getstats_rsp;
109 struct bfi_port_generic_rsp_s clearstats_rsp;
110};
111
112#pragma pack()
113
114#endif /* __BFI_PORT_H__ */
115
diff --git a/drivers/scsi/bfa/include/bfi/bfi_pport.h b/drivers/scsi/bfa/include/bfi/bfi_pport.h
deleted file mode 100644
index 50dcf45c7470..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_pport.h
+++ /dev/null
@@ -1,118 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFI_PPORT_H__
18#define __BFI_PPORT_H__
19
20#include <bfi/bfi.h>
21#include <defs/bfa_defs_pport.h>
22
23#pragma pack(1)
24
25enum bfi_fcport_h2i {
26 BFI_FCPORT_H2I_ENABLE_REQ = (1),
27 BFI_FCPORT_H2I_DISABLE_REQ = (2),
28 BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ = (3),
29 BFI_FCPORT_H2I_STATS_GET_REQ = (4),
30 BFI_FCPORT_H2I_STATS_CLEAR_REQ = (5),
31};
32
33enum bfi_fcport_i2h {
34 BFI_FCPORT_I2H_ENABLE_RSP = BFA_I2HM(1),
35 BFI_FCPORT_I2H_DISABLE_RSP = BFA_I2HM(2),
36 BFI_FCPORT_I2H_SET_SVC_PARAMS_RSP = BFA_I2HM(3),
37 BFI_FCPORT_I2H_STATS_GET_RSP = BFA_I2HM(4),
38 BFI_FCPORT_I2H_STATS_CLEAR_RSP = BFA_I2HM(5),
39 BFI_FCPORT_I2H_EVENT = BFA_I2HM(6),
40};
41
42/**
43 * Generic REQ type
44 */
45struct bfi_fcport_req_s {
46 struct bfi_mhdr_s mh; /* msg header */
47 u32 msgtag; /* msgtag for reply */
48};
49
50/**
51 * Generic RSP type
52 */
53struct bfi_fcport_rsp_s {
54 struct bfi_mhdr_s mh; /* common msg header */
55 u8 status; /* port enable status */
56 u8 rsvd[3];
57 u32 msgtag; /* msgtag for reply */
58};
59
60/**
61 * BFI_FCPORT_H2I_ENABLE_REQ
62 */
63struct bfi_fcport_enable_req_s {
64 struct bfi_mhdr_s mh; /* msg header */
65 u32 rsvd1;
66 wwn_t nwwn; /* node wwn of physical port */
67 wwn_t pwwn; /* port wwn of physical port */
68 struct bfa_pport_cfg_s port_cfg; /* port configuration */
69 union bfi_addr_u stats_dma_addr; /* DMA address for stats */
70 u32 msgtag; /* msgtag for reply */
71 u32 rsvd2;
72};
73
74/**
75 * BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ
76 */
77struct bfi_fcport_set_svc_params_req_s {
78 struct bfi_mhdr_s mh; /* msg header */
79 u16 tx_bbcredit; /* Tx credits */
80 u16 rsvd;
81};
82
83/**
84 * BFI_FCPORT_I2H_EVENT
85 */
86struct bfi_fcport_event_s {
87 struct bfi_mhdr_s mh; /* common msg header */
88 struct bfa_pport_link_s link_state;
89};
90
91/**
92 * fcport H2I message
93 */
94union bfi_fcport_h2i_msg_u {
95 struct bfi_mhdr_s *mhdr;
96 struct bfi_fcport_enable_req_s *penable;
97 struct bfi_fcport_req_s *pdisable;
98 struct bfi_fcport_set_svc_params_req_s *psetsvcparams;
99 struct bfi_fcport_req_s *pstatsget;
100 struct bfi_fcport_req_s *pstatsclear;
101};
102
103/**
104 * fcport I2H message
105 */
106union bfi_fcport_i2h_msg_u {
107 struct bfi_msg_s *msg;
108 struct bfi_fcport_rsp_s *penable_rsp;
109 struct bfi_fcport_rsp_s *pdisable_rsp;
110 struct bfi_fcport_rsp_s *psetsvcparams_rsp;
111 struct bfi_fcport_rsp_s *pstatsget_rsp;
112 struct bfi_fcport_rsp_s *pstatsclear_rsp;
113 struct bfi_fcport_event_s *event;
114};
115
116#pragma pack()
117
118#endif /* __BFI_PPORT_H__ */
diff --git a/drivers/scsi/bfa/include/bfi/bfi_rport.h b/drivers/scsi/bfa/include/bfi/bfi_rport.h
deleted file mode 100644
index e1cd83b56ec6..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_rport.h
+++ /dev/null
@@ -1,104 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_RPORT_H__
19#define __BFI_RPORT_H__
20
21#include <bfi/bfi.h>
22
23#pragma pack(1)
24
25enum bfi_rport_h2i_msgs {
26 BFI_RPORT_H2I_CREATE_REQ = 1,
27 BFI_RPORT_H2I_DELETE_REQ = 2,
28 BFI_RPORT_H2I_SET_SPEED_REQ = 3,
29};
30
31enum bfi_rport_i2h_msgs {
32 BFI_RPORT_I2H_CREATE_RSP = BFA_I2HM(1),
33 BFI_RPORT_I2H_DELETE_RSP = BFA_I2HM(2),
34 BFI_RPORT_I2H_QOS_SCN = BFA_I2HM(3),
35};
36
37struct bfi_rport_create_req_s {
38 struct bfi_mhdr_s mh; /* common msg header */
39 u16 bfa_handle; /* host rport handle */
40 u16 max_frmsz; /* max rcv pdu size */
41 u32 pid:24, /* remote port ID */
42 lp_tag:8; /* local port tag */
43 u32 local_pid:24, /* local port ID */
44 cisc:8;
45 u8 fc_class; /* supported FC classes */
46 u8 vf_en; /* virtual fabric enable */
47 u16 vf_id; /* virtual fabric ID */
48};
49
50struct bfi_rport_create_rsp_s {
51 struct bfi_mhdr_s mh; /* common msg header */
52 u8 status; /* rport creation status */
53 u8 rsvd[3];
54 u16 bfa_handle; /* host rport handle */
55 u16 fw_handle; /* firmware rport handle */
56 struct bfa_rport_qos_attr_s qos_attr; /* QoS Attributes */
57};
58
59struct bfa_rport_speed_req_s {
60 struct bfi_mhdr_s mh; /* common msg header */
61 u16 fw_handle; /* firmware rport handle */
62 u8 speed; /*! rport's speed via RPSC */
63 u8 rsvd;
64};
65
66struct bfi_rport_delete_req_s {
67 struct bfi_mhdr_s mh; /* common msg header */
68 u16 fw_handle; /* firmware rport handle */
69 u16 rsvd;
70};
71
72struct bfi_rport_delete_rsp_s {
73 struct bfi_mhdr_s mh; /* common msg header */
74 u16 bfa_handle; /* host rport handle */
75 u8 status; /* rport deletion status */
76 u8 rsvd;
77};
78
79struct bfi_rport_qos_scn_s {
80 struct bfi_mhdr_s mh; /* common msg header */
81 u16 bfa_handle; /* host rport handle */
82 u16 rsvd;
83 struct bfa_rport_qos_attr_s old_qos_attr; /* Old QoS Attributes */
84 struct bfa_rport_qos_attr_s new_qos_attr; /* New QoS Attributes */
85};
86
87union bfi_rport_h2i_msg_u {
88 struct bfi_msg_s *msg;
89 struct bfi_rport_create_req_s *create_req;
90 struct bfi_rport_delete_req_s *delete_req;
91 struct bfi_rport_speed_req_s *speed_req;
92};
93
94union bfi_rport_i2h_msg_u {
95 struct bfi_msg_s *msg;
96 struct bfi_rport_create_rsp_s *create_rsp;
97 struct bfi_rport_delete_rsp_s *delete_rsp;
98 struct bfi_rport_qos_scn_s *qos_scn_evt;
99};
100
101#pragma pack()
102
103#endif /* __BFI_RPORT_H__ */
104
diff --git a/drivers/scsi/bfa/include/bfi/bfi_uf.h b/drivers/scsi/bfa/include/bfi/bfi_uf.h
deleted file mode 100644
index f328a9e7e622..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_uf.h
+++ /dev/null
@@ -1,52 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_UF_H__
19#define __BFI_UF_H__
20
21#include "bfi.h"
22
23#pragma pack(1)
24
25enum bfi_uf_h2i {
26 BFI_UF_H2I_BUF_POST = 1,
27};
28
29enum bfi_uf_i2h {
30 BFI_UF_I2H_FRM_RCVD = BFA_I2HM(1),
31};
32
33#define BFA_UF_MAX_SGES 2
34
35struct bfi_uf_buf_post_s {
36 struct bfi_mhdr_s mh; /* Common msg header */
37 u16 buf_tag; /* buffer tag */
38 u16 buf_len; /* total buffer length */
39 struct bfi_sge_s sge[BFA_UF_MAX_SGES]; /* buffer DMA SGEs */
40};
41
42struct bfi_uf_frm_rcvd_s {
43 struct bfi_mhdr_s mh; /* Common msg header */
44 u16 buf_tag; /* buffer tag */
45 u16 rsvd;
46 u16 frm_len; /* received frame length */
47 u16 xfr_len; /* tranferred length */
48};
49
50#pragma pack()
51
52#endif /* __BFI_UF_H__ */
diff --git a/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h b/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h
deleted file mode 100644
index a75a1f3be315..000000000000
--- a/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h
+++ /dev/null
@@ -1,40 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_cna_trcmod.h CNA trace modules
20 */
21
22#ifndef __BFA_CNA_TRCMOD_H__
23#define __BFA_CNA_TRCMOD_H__
24
25#include <cs/bfa_trc.h>
26
27/*
28 * !!! Only append to the enums defined here to avoid any versioning
29 * !!! needed between trace utility and driver version
30 */
31enum {
32 BFA_TRC_CNA_CEE = 1,
33 BFA_TRC_CNA_PORT = 2,
34 BFA_TRC_CNA_IOC = 3,
35 BFA_TRC_CNA_DIAG = 4,
36 BFA_TRC_CNA_IOC_CB = 5,
37 BFA_TRC_CNA_IOC_CT = 6,
38};
39
40#endif /* __BFA_CNA_TRCMOD_H__ */
diff --git a/drivers/scsi/bfa/include/cna/cee/bfa_cee.h b/drivers/scsi/bfa/include/cna/cee/bfa_cee.h
deleted file mode 100644
index 77f297f68046..000000000000
--- a/drivers/scsi/bfa/include/cna/cee/bfa_cee.h
+++ /dev/null
@@ -1,77 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_CEE_H__
19#define __BFA_CEE_H__
20
21#include <defs/bfa_defs_cee.h>
22#include <bfa_ioc.h>
23#include <cs/bfa_trc.h>
24#include <cs/bfa_log.h>
25
26typedef void (*bfa_cee_get_attr_cbfn_t) (void *dev, bfa_status_t status);
27typedef void (*bfa_cee_get_stats_cbfn_t) (void *dev, bfa_status_t status);
28typedef void (*bfa_cee_reset_stats_cbfn_t) (void *dev, bfa_status_t status);
29typedef void (*bfa_cee_hbfail_cbfn_t) (void *dev, bfa_status_t status);
30
31struct bfa_cee_cbfn_s {
32 bfa_cee_get_attr_cbfn_t get_attr_cbfn;
33 void *get_attr_cbarg;
34 bfa_cee_get_stats_cbfn_t get_stats_cbfn;
35 void *get_stats_cbarg;
36 bfa_cee_reset_stats_cbfn_t reset_stats_cbfn;
37 void *reset_stats_cbarg;
38};
39
40struct bfa_cee_s {
41 void *dev;
42 bfa_boolean_t get_attr_pending;
43 bfa_boolean_t get_stats_pending;
44 bfa_boolean_t reset_stats_pending;
45 bfa_status_t get_attr_status;
46 bfa_status_t get_stats_status;
47 bfa_status_t reset_stats_status;
48 struct bfa_cee_cbfn_s cbfn;
49 struct bfa_ioc_hbfail_notify_s hbfail;
50 struct bfa_trc_mod_s *trcmod;
51 struct bfa_log_mod_s *logmod;
52 struct bfa_cee_attr_s *attr;
53 struct bfa_cee_stats_s *stats;
54 struct bfa_dma_s attr_dma;
55 struct bfa_dma_s stats_dma;
56 struct bfa_ioc_s *ioc;
57 struct bfa_mbox_cmd_s get_cfg_mb;
58 struct bfa_mbox_cmd_s get_stats_mb;
59 struct bfa_mbox_cmd_s reset_stats_mb;
60};
61
62u32 bfa_cee_meminfo(void);
63void bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva,
64 u64 dma_pa);
65void bfa_cee_attach(struct bfa_cee_s *cee, struct bfa_ioc_s *ioc, void *dev,
66 struct bfa_trc_mod_s *trcmod,
67 struct bfa_log_mod_s *logmod);
68void bfa_cee_detach(struct bfa_cee_s *cee);
69bfa_status_t bfa_cee_get_attr(struct bfa_cee_s *cee,
70 struct bfa_cee_attr_s *attr,
71 bfa_cee_get_attr_cbfn_t cbfn, void *cbarg);
72bfa_status_t bfa_cee_get_stats(struct bfa_cee_s *cee,
73 struct bfa_cee_stats_s *stats,
74 bfa_cee_get_stats_cbfn_t cbfn, void *cbarg);
75bfa_status_t bfa_cee_reset_stats(struct bfa_cee_s *cee,
76 bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg);
77#endif /* __BFA_CEE_H__ */
diff --git a/drivers/scsi/bfa/include/cna/port/bfa_port.h b/drivers/scsi/bfa/include/cna/port/bfa_port.h
deleted file mode 100644
index d7babaf97848..000000000000
--- a/drivers/scsi/bfa/include/cna/port/bfa_port.h
+++ /dev/null
@@ -1,70 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_PORT_H__
19#define __BFA_PORT_H__
20
21#include <defs/bfa_defs_port.h>
22#include <bfa_ioc.h>
23#include <cs/bfa_trc.h>
24#include <cs/bfa_log.h>
25
26typedef void (*bfa_port_stats_cbfn_t) (void *dev, bfa_status_t status);
27typedef void (*bfa_port_endis_cbfn_t) (void *dev, bfa_status_t status);
28
29struct bfa_port_s {
30 void *dev;
31 struct bfa_ioc_s *ioc;
32 struct bfa_trc_mod_s *trcmod;
33 struct bfa_log_mod_s *logmod;
34 u32 msgtag;
35 bfa_boolean_t stats_busy;
36 struct bfa_mbox_cmd_s stats_mb;
37 bfa_port_stats_cbfn_t stats_cbfn;
38 void *stats_cbarg;
39 bfa_status_t stats_status;
40 u32 stats_reset_time;
41 union bfa_pport_stats_u *stats;
42 struct bfa_dma_s stats_dma;
43 bfa_boolean_t endis_pending;
44 struct bfa_mbox_cmd_s endis_mb;
45 bfa_port_endis_cbfn_t endis_cbfn;
46 void *endis_cbarg;
47 bfa_status_t endis_status;
48 struct bfa_ioc_hbfail_notify_s hbfail;
49};
50
51void bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
52 void *dev, struct bfa_trc_mod_s *trcmod,
53 struct bfa_log_mod_s *logmod);
54void bfa_port_detach(struct bfa_port_s *port);
55void bfa_port_hbfail(void *arg);
56
57bfa_status_t bfa_port_get_stats(struct bfa_port_s *port,
58 union bfa_pport_stats_u *stats,
59 bfa_port_stats_cbfn_t cbfn, void *cbarg);
60bfa_status_t bfa_port_clear_stats(struct bfa_port_s *port,
61 bfa_port_stats_cbfn_t cbfn, void *cbarg);
62bfa_status_t bfa_port_enable(struct bfa_port_s *port,
63 bfa_port_endis_cbfn_t cbfn, void *cbarg);
64bfa_status_t bfa_port_disable(struct bfa_port_s *port,
65 bfa_port_endis_cbfn_t cbfn, void *cbarg);
66u32 bfa_port_meminfo(void);
67void bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva,
68 u64 dma_pa);
69
70#endif /* __BFA_PORT_H__ */
diff --git a/drivers/scsi/bfa/include/cna/pstats/ethport_defs.h b/drivers/scsi/bfa/include/cna/pstats/ethport_defs.h
deleted file mode 100644
index 1563ee512218..000000000000
--- a/drivers/scsi/bfa/include/cna/pstats/ethport_defs.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved.
4 *
5 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License (GPL) Version 2 as
9 * published by the Free Software Foundation
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 */
16
17#ifndef __ETHPORT_DEFS_H__
18#define __ETHPORT_DEFS_H__
19
20struct bnad_drv_stats {
21 u64 netif_queue_stop;
22 u64 netif_queue_wakeup;
23 u64 tso4;
24 u64 tso6;
25 u64 tso_err;
26 u64 tcpcsum_offload;
27 u64 udpcsum_offload;
28 u64 csum_help;
29 u64 csum_help_err;
30
31 u64 hw_stats_updates;
32 u64 netif_rx_schedule;
33 u64 netif_rx_complete;
34 u64 netif_rx_dropped;
35};
36#endif
diff --git a/drivers/scsi/bfa/include/cna/pstats/phyport_defs.h b/drivers/scsi/bfa/include/cna/pstats/phyport_defs.h
deleted file mode 100644
index eb7548030d0f..000000000000
--- a/drivers/scsi/bfa/include/cna/pstats/phyport_defs.h
+++ /dev/null
@@ -1,218 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved.
4 *
5 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License (GPL) Version 2 as
9 * published by the Free Software Foundation
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 */
16
17#ifndef __PHYPORT_DEFS_H__
18#define __PHYPORT_DEFS_H__
19
20#define BNA_TXF_ID_MAX 64
21#define BNA_RXF_ID_MAX 64
22
23/*
24 * Statistics
25 */
26
27/*
28 * TxF Frame Statistics
29 */
30struct bna_stats_txf {
31 u64 ucast_octets;
32 u64 ucast;
33 u64 ucast_vlan;
34
35 u64 mcast_octets;
36 u64 mcast;
37 u64 mcast_vlan;
38
39 u64 bcast_octets;
40 u64 bcast;
41 u64 bcast_vlan;
42
43 u64 errors;
44 u64 filter_vlan; /* frames filtered due to VLAN */
45 u64 filter_mac_sa; /* frames filtered due to SA check */
46};
47
48/*
49 * RxF Frame Statistics
50 */
51struct bna_stats_rxf {
52 u64 ucast_octets;
53 u64 ucast;
54 u64 ucast_vlan;
55
56 u64 mcast_octets;
57 u64 mcast;
58 u64 mcast_vlan;
59
60 u64 bcast_octets;
61 u64 bcast;
62 u64 bcast_vlan;
63 u64 frame_drops;
64};
65
66/*
67 * FC Tx Frame Statistics
68 */
69struct bna_stats_fc_tx {
70 u64 txf_ucast_octets;
71 u64 txf_ucast;
72 u64 txf_ucast_vlan;
73
74 u64 txf_mcast_octets;
75 u64 txf_mcast;
76 u64 txf_mcast_vlan;
77
78 u64 txf_bcast_octets;
79 u64 txf_bcast;
80 u64 txf_bcast_vlan;
81
82 u64 txf_parity_errors;
83 u64 txf_timeout;
84 u64 txf_fid_parity_errors;
85};
86
87/*
88 * FC Rx Frame Statistics
89 */
90struct bna_stats_fc_rx {
91 u64 rxf_ucast_octets;
92 u64 rxf_ucast;
93 u64 rxf_ucast_vlan;
94
95 u64 rxf_mcast_octets;
96 u64 rxf_mcast;
97 u64 rxf_mcast_vlan;
98
99 u64 rxf_bcast_octets;
100 u64 rxf_bcast;
101 u64 rxf_bcast_vlan;
102};
103
104/*
105 * RAD Frame Statistics
106 */
107struct cna_stats_rad {
108 u64 rx_frames;
109 u64 rx_octets;
110 u64 rx_vlan_frames;
111
112 u64 rx_ucast;
113 u64 rx_ucast_octets;
114 u64 rx_ucast_vlan;
115
116 u64 rx_mcast;
117 u64 rx_mcast_octets;
118 u64 rx_mcast_vlan;
119
120 u64 rx_bcast;
121 u64 rx_bcast_octets;
122 u64 rx_bcast_vlan;
123
124 u64 rx_drops;
125};
126
127/*
128 * BPC Tx Registers
129 */
130struct cna_stats_bpc_tx {
131 u64 tx_pause[8];
132 u64 tx_zero_pause[8]; /* Pause cancellation */
133 u64 tx_first_pause[8]; /* Pause initiation rather
134 *than retention */
135};
136
137/*
138 * BPC Rx Registers
139 */
140struct cna_stats_bpc_rx {
141 u64 rx_pause[8];
142 u64 rx_zero_pause[8]; /* Pause cancellation */
143 u64 rx_first_pause[8]; /* Pause initiation rather
144 *than retention */
145};
146
147/*
148 * MAC Rx Statistics
149 */
150struct cna_stats_mac_rx {
151 u64 frame_64; /* both rx and tx counter */
152 u64 frame_65_127; /* both rx and tx counter */
153 u64 frame_128_255; /* both rx and tx counter */
154 u64 frame_256_511; /* both rx and tx counter */
155 u64 frame_512_1023; /* both rx and tx counter */
156 u64 frame_1024_1518; /* both rx and tx counter */
157 u64 frame_1518_1522; /* both rx and tx counter */
158 u64 rx_bytes;
159 u64 rx_packets;
160 u64 rx_fcs_error;
161 u64 rx_multicast;
162 u64 rx_broadcast;
163 u64 rx_control_frames;
164 u64 rx_pause;
165 u64 rx_unknown_opcode;
166 u64 rx_alignment_error;
167 u64 rx_frame_length_error;
168 u64 rx_code_error;
169 u64 rx_carrier_sense_error;
170 u64 rx_undersize;
171 u64 rx_oversize;
172 u64 rx_fragments;
173 u64 rx_jabber;
174 u64 rx_drop;
175};
176
177/*
178 * MAC Tx Statistics
179 */
180struct cna_stats_mac_tx {
181 u64 tx_bytes;
182 u64 tx_packets;
183 u64 tx_multicast;
184 u64 tx_broadcast;
185 u64 tx_pause;
186 u64 tx_deferral;
187 u64 tx_excessive_deferral;
188 u64 tx_single_collision;
189 u64 tx_muliple_collision;
190 u64 tx_late_collision;
191 u64 tx_excessive_collision;
192 u64 tx_total_collision;
193 u64 tx_pause_honored;
194 u64 tx_drop;
195 u64 tx_jabber;
196 u64 tx_fcs_error;
197 u64 tx_control_frame;
198 u64 tx_oversize;
199 u64 tx_undersize;
200 u64 tx_fragments;
201};
202
203/*
204 * Complete statistics
205 */
206struct bna_stats {
207 struct cna_stats_mac_rx mac_rx_stats;
208 struct cna_stats_bpc_rx bpc_rx_stats;
209 struct cna_stats_rad rad_stats;
210 struct bna_stats_fc_rx fc_rx_stats;
211 struct cna_stats_mac_tx mac_tx_stats;
212 struct cna_stats_bpc_tx bpc_tx_stats;
213 struct bna_stats_fc_tx fc_tx_stats;
214 struct bna_stats_rxf rxf_stats[BNA_TXF_ID_MAX];
215 struct bna_stats_txf txf_stats[BNA_RXF_ID_MAX];
216};
217
218#endif
diff --git a/drivers/scsi/bfa/include/cs/bfa_checksum.h b/drivers/scsi/bfa/include/cs/bfa_checksum.h
deleted file mode 100644
index 650f8d0aaff9..000000000000
--- a/drivers/scsi/bfa/include/cs/bfa_checksum.h
+++ /dev/null
@@ -1,60 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_checksum.h BFA checksum utilities
20 */
21
22#ifndef __BFA_CHECKSUM_H__
23#define __BFA_CHECKSUM_H__
24
25static inline u32
26bfa_checksum_u32(u32 *buf, int sz)
27{
28 int i, m = sz >> 2;
29 u32 sum = 0;
30
31 for (i = 0; i < m; i++)
32 sum ^= buf[i];
33
34 return sum;
35}
36
37static inline u16
38bfa_checksum_u16(u16 *buf, int sz)
39{
40 int i, m = sz >> 1;
41 u16 sum = 0;
42
43 for (i = 0; i < m; i++)
44 sum ^= buf[i];
45
46 return sum;
47}
48
49static inline u8
50bfa_checksum_u8(u8 *buf, int sz)
51{
52 int i;
53 u8 sum = 0;
54
55 for (i = 0; i < sz; i++)
56 sum ^= buf[i];
57
58 return sum;
59}
60#endif
diff --git a/drivers/scsi/bfa/include/cs/bfa_debug.h b/drivers/scsi/bfa/include/cs/bfa_debug.h
deleted file mode 100644
index 75a911ea7936..000000000000
--- a/drivers/scsi/bfa/include/cs/bfa_debug.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_debug.h BFA debug interfaces
20 */
21
22#ifndef __BFA_DEBUG_H__
23#define __BFA_DEBUG_H__
24
25#define bfa_assert(__cond) do { \
26 if (!(__cond)) \
27 bfa_panic(__LINE__, __FILE__, #__cond); \
28} while (0)
29
30#define bfa_sm_fault(__mod, __event) do { \
31 bfa_trc(__mod, (((uint32_t)0xDEAD << 16) | __event)); \
32 bfa_sm_panic((__mod)->logm, __LINE__, __FILE__, __event); \
33} while (0)
34
35#ifndef BFA_PERF_BUILD
36#define bfa_assert_fp(__cond) bfa_assert(__cond)
37#else
38#define bfa_assert_fp(__cond)
39#endif
40
41struct bfa_log_mod_s;
42void bfa_panic(int line, char *file, char *panicstr);
43void bfa_sm_panic(struct bfa_log_mod_s *logm, int line, char *file, int event);
44
45#endif /* __BFA_DEBUG_H__ */
diff --git a/drivers/scsi/bfa/include/cs/bfa_log.h b/drivers/scsi/bfa/include/cs/bfa_log.h
deleted file mode 100644
index bc334e0a93fa..000000000000
--- a/drivers/scsi/bfa/include/cs/bfa_log.h
+++ /dev/null
@@ -1,184 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_log.h BFA log library data structure and function definition
20 */
21
22#ifndef __BFA_LOG_H__
23#define __BFA_LOG_H__
24
25#include <bfa_os_inc.h>
26#include <defs/bfa_defs_status.h>
27#include <defs/bfa_defs_aen.h>
28
29/*
30 * BFA log module definition
31 *
32 * To create a new module id:
33 * Add a #define at the end of the list below. Select a value for your
34 * definition so that it is one (1) greater than the previous
35 * definition. Modify the definition of BFA_LOG_MODULE_ID_MAX to become
36 * your new definition.
37 * Should have no gaps in between the values because this is used in arrays.
38 * IMPORTANT: AEN_IDs must be at the begining, otherwise update bfa_defs_aen.h
39 */
40
41enum bfa_log_module_id {
42 BFA_LOG_UNUSED_ID = 0,
43
44 /* AEN defs begin */
45 BFA_LOG_AEN_MIN = BFA_LOG_UNUSED_ID,
46
47 BFA_LOG_AEN_ID_ADAPTER = BFA_LOG_AEN_MIN + BFA_AEN_CAT_ADAPTER,/* 1 */
48 BFA_LOG_AEN_ID_PORT = BFA_LOG_AEN_MIN + BFA_AEN_CAT_PORT, /* 2 */
49 BFA_LOG_AEN_ID_LPORT = BFA_LOG_AEN_MIN + BFA_AEN_CAT_LPORT, /* 3 */
50 BFA_LOG_AEN_ID_RPORT = BFA_LOG_AEN_MIN + BFA_AEN_CAT_RPORT, /* 4 */
51 BFA_LOG_AEN_ID_ITNIM = BFA_LOG_AEN_MIN + BFA_AEN_CAT_ITNIM, /* 5 */
52 BFA_LOG_AEN_ID_TIN = BFA_LOG_AEN_MIN + BFA_AEN_CAT_TIN, /* 6 */
53 BFA_LOG_AEN_ID_IPFC = BFA_LOG_AEN_MIN + BFA_AEN_CAT_IPFC, /* 7 */
54 BFA_LOG_AEN_ID_AUDIT = BFA_LOG_AEN_MIN + BFA_AEN_CAT_AUDIT, /* 8 */
55 BFA_LOG_AEN_ID_IOC = BFA_LOG_AEN_MIN + BFA_AEN_CAT_IOC, /* 9 */
56 BFA_LOG_AEN_ID_ETHPORT = BFA_LOG_AEN_MIN + BFA_AEN_CAT_ETHPORT,/* 10 */
57
58 BFA_LOG_AEN_MAX = BFA_LOG_AEN_ID_ETHPORT,
59 /* AEN defs end */
60
61 BFA_LOG_MODULE_ID_MIN = BFA_LOG_AEN_MAX,
62
63 BFA_LOG_FW_ID = BFA_LOG_MODULE_ID_MIN + 1,
64 BFA_LOG_HAL_ID = BFA_LOG_MODULE_ID_MIN + 2,
65 BFA_LOG_FCS_ID = BFA_LOG_MODULE_ID_MIN + 3,
66 BFA_LOG_WDRV_ID = BFA_LOG_MODULE_ID_MIN + 4,
67 BFA_LOG_LINUX_ID = BFA_LOG_MODULE_ID_MIN + 5,
68 BFA_LOG_SOLARIS_ID = BFA_LOG_MODULE_ID_MIN + 6,
69
70 BFA_LOG_MODULE_ID_MAX = BFA_LOG_SOLARIS_ID,
71
72 /* Not part of any arrays */
73 BFA_LOG_MODULE_ID_ALL = BFA_LOG_MODULE_ID_MAX + 1,
74 BFA_LOG_AEN_ALL = BFA_LOG_MODULE_ID_MAX + 2,
75 BFA_LOG_DRV_ALL = BFA_LOG_MODULE_ID_MAX + 3,
76};
77
78/*
79 * BFA log catalog name
80 */
81#define BFA_LOG_CAT_NAME "BFA"
82
83/*
84 * bfa log severity values
85 */
86enum bfa_log_severity {
87 BFA_LOG_INVALID = 0,
88 BFA_LOG_CRITICAL = 1,
89 BFA_LOG_ERROR = 2,
90 BFA_LOG_WARNING = 3,
91 BFA_LOG_INFO = 4,
92 BFA_LOG_NONE = 5,
93 BFA_LOG_LEVEL_MAX = BFA_LOG_NONE
94};
95
96#define BFA_LOG_MODID_OFFSET 16
97
98
99struct bfa_log_msgdef_s {
100 u32 msg_id; /* message id */
101 int attributes; /* attributes */
102 int severity; /* severity level */
103 char *msg_value;
104 /* msg string */
105 char *message;
106 /* msg format string */
107 int arg_type; /* argument type */
108 int arg_num; /* number of argument */
109};
110
111/*
112 * supported argument type
113 */
114enum bfa_log_arg_type {
115 BFA_LOG_S = 0, /* string */
116 BFA_LOG_D, /* decimal */
117 BFA_LOG_I, /* integer */
118 BFA_LOG_O, /* oct number */
119 BFA_LOG_U, /* unsigned integer */
120 BFA_LOG_X, /* hex number */
121 BFA_LOG_F, /* floating */
122 BFA_LOG_C, /* character */
123 BFA_LOG_L, /* double */
124 BFA_LOG_P /* pointer */
125};
126
127#define BFA_LOG_ARG_TYPE 2
128#define BFA_LOG_ARG0 (0 * BFA_LOG_ARG_TYPE)
129#define BFA_LOG_ARG1 (1 * BFA_LOG_ARG_TYPE)
130#define BFA_LOG_ARG2 (2 * BFA_LOG_ARG_TYPE)
131#define BFA_LOG_ARG3 (3 * BFA_LOG_ARG_TYPE)
132
133#define BFA_LOG_GET_MOD_ID(msgid) ((msgid >> BFA_LOG_MODID_OFFSET) & 0xff)
134#define BFA_LOG_GET_MSG_IDX(msgid) (msgid & 0xffff)
135#define BFA_LOG_GET_MSG_ID(msgdef) ((msgdef)->msg_id)
136#define BFA_LOG_GET_MSG_FMT_STRING(msgdef) ((msgdef)->message)
137#define BFA_LOG_GET_SEVERITY(msgdef) ((msgdef)->severity)
138
139/*
140 * Event attributes
141 */
142#define BFA_LOG_ATTR_NONE 0
143#define BFA_LOG_ATTR_AUDIT 1
144#define BFA_LOG_ATTR_LOG 2
145#define BFA_LOG_ATTR_FFDC 4
146
147#define BFA_LOG_CREATE_ID(msw, lsw) \
148 (((u32)msw << BFA_LOG_MODID_OFFSET) | lsw)
149
150struct bfa_log_mod_s;
151
152/**
153 * callback function
154 */
155typedef void (*bfa_log_cb_t)(struct bfa_log_mod_s *log_mod, u32 msg_id,
156 const char *format, ...);
157
158
159struct bfa_log_mod_s {
160 char instance_info[BFA_STRING_32]; /* instance info */
161 int log_level[BFA_LOG_MODULE_ID_MAX + 1];
162 /* log level for modules */
163 bfa_log_cb_t cbfn; /* callback function */
164};
165
166extern int bfa_log_init(struct bfa_log_mod_s *log_mod,
167 char *instance_name, bfa_log_cb_t cbfn);
168extern int bfa_log(struct bfa_log_mod_s *log_mod, u32 msg_id, ...);
169extern bfa_status_t bfa_log_set_level(struct bfa_log_mod_s *log_mod,
170 int mod_id, enum bfa_log_severity log_level);
171extern bfa_status_t bfa_log_set_level_all(struct bfa_log_mod_s *log_mod,
172 enum bfa_log_severity log_level);
173extern bfa_status_t bfa_log_set_level_aen(struct bfa_log_mod_s *log_mod,
174 enum bfa_log_severity log_level);
175extern enum bfa_log_severity bfa_log_get_level(struct bfa_log_mod_s *log_mod,
176 int mod_id);
177extern enum bfa_log_severity bfa_log_get_msg_level(
178 struct bfa_log_mod_s *log_mod, u32 msg_id);
179/*
180 * array of messages generated from xml files
181 */
182extern struct bfa_log_msgdef_s bfa_log_msg_array[];
183
184#endif
diff --git a/drivers/scsi/bfa/include/cs/bfa_perf.h b/drivers/scsi/bfa/include/cs/bfa_perf.h
deleted file mode 100644
index 45aa5f978ff5..000000000000
--- a/drivers/scsi/bfa/include/cs/bfa_perf.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFAD_PERF_H__
18#define __BFAD_PERF_H__
19
20#ifdef BFAD_PERF_BUILD
21
22#undef bfa_trc
23#undef bfa_trc32
24#undef bfa_assert
25#undef BFA_TRC_FILE
26
27#define bfa_trc(_trcp, _data)
28#define bfa_trc32(_trcp, _data)
29#define bfa_assert(__cond)
30#define BFA_TRC_FILE(__mod, __submod)
31
32#endif
33
34#endif /* __BFAD_PERF_H__ */
diff --git a/drivers/scsi/bfa/include/cs/bfa_q.h b/drivers/scsi/bfa/include/cs/bfa_q.h
deleted file mode 100644
index ea895facedbc..000000000000
--- a/drivers/scsi/bfa/include/cs/bfa_q.h
+++ /dev/null
@@ -1,81 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_q.h Circular queue definitions.
20 */
21
22#ifndef __BFA_Q_H__
23#define __BFA_Q_H__
24
25#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
26#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
27#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev)
28
29/*
30 * bfa_q_qe_init - to initialize a queue element
31 */
32#define bfa_q_qe_init(_qe) { \
33 bfa_q_next(_qe) = (struct list_head *) NULL; \
34 bfa_q_prev(_qe) = (struct list_head *) NULL; \
35}
36
37/*
38 * bfa_q_deq - dequeue an element from head of the queue
39 */
40#define bfa_q_deq(_q, _qe) { \
41 if (!list_empty(_q)) { \
42 (*((struct list_head **) (_qe))) = bfa_q_next(_q); \
43 bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \
44 (struct list_head *) (_q); \
45 bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe)); \
46 BFA_Q_DBG_INIT(*((struct list_head **) _qe)); \
47 } else { \
48 *((struct list_head **) (_qe)) = (struct list_head *) NULL; \
49 } \
50}
51
52/*
53 * bfa_q_deq_tail - dequeue an element from tail of the queue
54 */
55#define bfa_q_deq_tail(_q, _qe) { \
56 if (!list_empty(_q)) { \
57 *((struct list_head **) (_qe)) = bfa_q_prev(_q); \
58 bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) = \
59 (struct list_head *) (_q); \
60 bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe); \
61 BFA_Q_DBG_INIT(*((struct list_head **) _qe)); \
62 } else { \
63 *((struct list_head **) (_qe)) = (struct list_head *) NULL; \
64 } \
65}
66
67/*
68 * #ifdef BFA_DEBUG (Using bfa_assert to check for debug_build is not
69 * consistent across modules)
70 */
71#ifndef BFA_PERF_BUILD
72#define BFA_Q_DBG_INIT(_qe) bfa_q_qe_init(_qe)
73#else
74#define BFA_Q_DBG_INIT(_qe)
75#endif
76
77#define bfa_q_is_on_q(_q, _qe) \
78 bfa_q_is_on_q_func(_q, (struct list_head *)(_qe))
79extern int bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe);
80
81#endif
diff --git a/drivers/scsi/bfa/include/cs/bfa_sm.h b/drivers/scsi/bfa/include/cs/bfa_sm.h
deleted file mode 100644
index 11fba9082f05..000000000000
--- a/drivers/scsi/bfa/include/cs/bfa_sm.h
+++ /dev/null
@@ -1,77 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfasm.h State machine defines
20 */
21
22#ifndef __BFA_SM_H__
23#define __BFA_SM_H__
24
25typedef void (*bfa_sm_t)(void *sm, int event);
26/**
27 * oc - object class eg. bfa_ioc
28 * st - state, eg. reset
29 * otype - object type, eg. struct bfa_ioc_s
30 * etype - object type, eg. enum ioc_event
31 */
32#define bfa_sm_state_decl(oc, st, otype, etype) \
33 static void oc ## _sm_ ## st(otype * fsm, etype event)
34
35#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state))
36#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event)))
37#define bfa_sm_get_state(_sm) ((_sm)->sm)
38#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
39
40/**
41 * For converting from state machine function to state encoding.
42 */
43struct bfa_sm_table_s {
44 bfa_sm_t sm; /* state machine function */
45 int state; /* state machine encoding */
46 char *name; /* state name for display */
47};
48#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
49
50int bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm);
51
52/**
53 * State machine with entry actions.
54 */
55typedef void (*bfa_fsm_t)(void *fsm, int event);
56
57/**
58 * oc - object class eg. bfa_ioc
59 * st - state, eg. reset
60 * otype - object type, eg. struct bfa_ioc_s
61 * etype - object type, eg. enum ioc_event
62 */
63#define bfa_fsm_state_decl(oc, st, otype, etype) \
64 static void oc ## _sm_ ## st(otype * fsm, etype event); \
65 static void oc ## _sm_ ## st ## _entry(otype * fsm)
66
67#define bfa_fsm_set_state(_fsm, _state) do { \
68 (_fsm)->fsm = (bfa_fsm_t)(_state); \
69 _state ## _entry(_fsm); \
70} while (0)
71
72#define bfa_fsm_send_event(_fsm, _event) \
73 ((_fsm)->fsm((_fsm), (_event)))
74#define bfa_fsm_cmp_state(_fsm, _state) \
75 ((_fsm)->fsm == (bfa_fsm_t)(_state))
76
77#endif
diff --git a/drivers/scsi/bfa/include/cs/bfa_trc.h b/drivers/scsi/bfa/include/cs/bfa_trc.h
deleted file mode 100644
index 310771c888e7..000000000000
--- a/drivers/scsi/bfa/include/cs/bfa_trc.h
+++ /dev/null
@@ -1,176 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_TRC_H__
18#define __BFA_TRC_H__
19
20#include <bfa_os_inc.h>
21
22#ifndef BFA_TRC_MAX
23#define BFA_TRC_MAX (4 * 1024)
24#endif
25
26#ifndef BFA_TRC_TS
27#define BFA_TRC_TS(_trcm) ((_trcm)->ticks++)
28#endif
29
30struct bfa_trc_s {
31#ifdef __BIGENDIAN
32 u16 fileno;
33 u16 line;
34#else
35 u16 line;
36 u16 fileno;
37#endif
38 u32 timestamp;
39 union {
40 struct {
41 u32 rsvd;
42 u32 u32;
43 } u32;
44 u64 u64;
45 } data;
46};
47
48
49struct bfa_trc_mod_s {
50 u32 head;
51 u32 tail;
52 u32 ntrc;
53 u32 stopped;
54 u32 ticks;
55 u32 rsvd[3];
56 struct bfa_trc_s trc[BFA_TRC_MAX];
57};
58
59
60enum {
61 BFA_TRC_FW = 1, /* firmware modules */
62 BFA_TRC_HAL = 2, /* BFA modules */
63 BFA_TRC_FCS = 3, /* BFA FCS modules */
64 BFA_TRC_LDRV = 4, /* Linux driver modules */
65 BFA_TRC_SDRV = 5, /* Solaris driver modules */
66 BFA_TRC_VDRV = 6, /* vmware driver modules */
67 BFA_TRC_WDRV = 7, /* windows driver modules */
68 BFA_TRC_AEN = 8, /* AEN module */
69 BFA_TRC_BIOS = 9, /* bios driver modules */
70 BFA_TRC_EFI = 10, /* EFI driver modules */
71 BNA_TRC_WDRV = 11, /* BNA windows driver modules */
72 BNA_TRC_VDRV = 12, /* BNA vmware driver modules */
73 BNA_TRC_SDRV = 13, /* BNA Solaris driver modules */
74 BNA_TRC_LDRV = 14, /* BNA Linux driver modules */
75 BNA_TRC_HAL = 15, /* BNA modules */
76 BFA_TRC_CNA = 16, /* Common modules */
77 BNA_TRC_IMDRV = 17 /* BNA windows intermediate driver modules */
78};
79#define BFA_TRC_MOD_SH 10
80#define BFA_TRC_MOD(__mod) ((BFA_TRC_ ## __mod) << BFA_TRC_MOD_SH)
81
82/**
83 * Define a new tracing file (module). Module should match one defined above.
84 */
85#define BFA_TRC_FILE(__mod, __submod) \
86 static int __trc_fileno = ((BFA_TRC_ ## __mod ## _ ## __submod) | \
87 BFA_TRC_MOD(__mod))
88
89
90#define bfa_trc32(_trcp, _data) \
91 __bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u32)_data)
92
93
94#ifndef BFA_BOOT_BUILD
95#define bfa_trc(_trcp, _data) \
96 __bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u64)_data)
97#else
98void bfa_boot_trc(struct bfa_trc_mod_s *trcmod, u16 fileno,
99 u16 line, u32 data);
100#define bfa_trc(_trcp, _data) \
101 bfa_boot_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u32)_data)
102#endif
103
104
105static inline void
106bfa_trc_init(struct bfa_trc_mod_s *trcm)
107{
108 trcm->head = trcm->tail = trcm->stopped = 0;
109 trcm->ntrc = BFA_TRC_MAX;
110}
111
112
113static inline void
114bfa_trc_stop(struct bfa_trc_mod_s *trcm)
115{
116 trcm->stopped = 1;
117}
118
119#ifdef FWTRC
120extern void dc_flush(void *data);
121#else
122#define dc_flush(data)
123#endif
124
125
126static inline void
127__bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data)
128{
129 int tail = trcm->tail;
130 struct bfa_trc_s *trc = &trcm->trc[tail];
131
132 if (trcm->stopped)
133 return;
134
135 trc->fileno = (u16) fileno;
136 trc->line = (u16) line;
137 trc->data.u64 = data;
138 trc->timestamp = BFA_TRC_TS(trcm);
139 dc_flush(trc);
140
141 trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
142 if (trcm->tail == trcm->head)
143 trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
144 dc_flush(trcm);
145}
146
147
148static inline void
149__bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
150{
151 int tail = trcm->tail;
152 struct bfa_trc_s *trc = &trcm->trc[tail];
153
154 if (trcm->stopped)
155 return;
156
157 trc->fileno = (u16) fileno;
158 trc->line = (u16) line;
159 trc->data.u32.u32 = data;
160 trc->timestamp = BFA_TRC_TS(trcm);
161 dc_flush(trc);
162
163 trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
164 if (trcm->tail == trcm->head)
165 trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
166 dc_flush(trcm);
167}
168
169#ifndef BFA_PERF_BUILD
170#define bfa_trc_fp(_trcp, _data) bfa_trc(_trcp, _data)
171#else
172#define bfa_trc_fp(_trcp, _data)
173#endif
174
175#endif /* __BFA_TRC_H__ */
176
diff --git a/drivers/scsi/bfa/include/cs/bfa_wc.h b/drivers/scsi/bfa/include/cs/bfa_wc.h
deleted file mode 100644
index 0460bd4fc7c4..000000000000
--- a/drivers/scsi/bfa/include/cs/bfa_wc.h
+++ /dev/null
@@ -1,68 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_wc.h Generic wait counter.
20 */
21
22#ifndef __BFA_WC_H__
23#define __BFA_WC_H__
24
25typedef void (*bfa_wc_resume_t) (void *cbarg);
26
27struct bfa_wc_s {
28 bfa_wc_resume_t wc_resume;
29 void *wc_cbarg;
30 int wc_count;
31};
32
33static inline void
34bfa_wc_up(struct bfa_wc_s *wc)
35{
36 wc->wc_count++;
37}
38
39static inline void
40bfa_wc_down(struct bfa_wc_s *wc)
41{
42 wc->wc_count--;
43 if (wc->wc_count == 0)
44 wc->wc_resume(wc->wc_cbarg);
45}
46
47/**
48 * Initialize a waiting counter.
49 */
50static inline void
51bfa_wc_init(struct bfa_wc_s *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
52{
53 wc->wc_resume = wc_resume;
54 wc->wc_cbarg = wc_cbarg;
55 wc->wc_count = 0;
56 bfa_wc_up(wc);
57}
58
59/**
60 * Wait for counter to reach zero
61 */
62static inline void
63bfa_wc_wait(struct bfa_wc_s *wc)
64{
65 bfa_wc_down(wc);
66}
67
68#endif
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_adapter.h b/drivers/scsi/bfa/include/defs/bfa_defs_adapter.h
deleted file mode 100644
index aea0360d67d5..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_adapter.h
+++ /dev/null
@@ -1,83 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_ADAPTER_H__
18#define __BFA_DEFS_ADAPTER_H__
19
20#include <protocol/types.h>
21#include <defs/bfa_defs_version.h>
22#include <defs/bfa_defs_mfg.h>
23
24/**
25 * BFA adapter level attributes.
26 */
27enum {
28 BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE),
29 /*
30 *!< adapter serial num length
31 */
32 BFA_ADAPTER_MODEL_NAME_LEN = 16, /* model name length */
33 BFA_ADAPTER_MODEL_DESCR_LEN = 128, /* model description length */
34 BFA_ADAPTER_MFG_NAME_LEN = 8, /* manufacturer name length */
35 BFA_ADAPTER_SYM_NAME_LEN = 64, /* adapter symbolic name length */
36 BFA_ADAPTER_OS_TYPE_LEN = 64, /* adapter os type length */
37};
38
39struct bfa_adapter_attr_s {
40 char manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
41 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
42 u32 card_type;
43 char model[BFA_ADAPTER_MODEL_NAME_LEN];
44 char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
45 wwn_t pwwn;
46 char node_symname[FC_SYMNAME_MAX];
47 char hw_ver[BFA_VERSION_LEN];
48 char fw_ver[BFA_VERSION_LEN];
49 char optrom_ver[BFA_VERSION_LEN];
50 char os_type[BFA_ADAPTER_OS_TYPE_LEN];
51 struct bfa_mfg_vpd_s vpd;
52 struct mac_s mac;
53
54 u8 nports;
55 u8 max_speed;
56 u8 prototype;
57 char asic_rev;
58
59 u8 pcie_gen;
60 u8 pcie_lanes_orig;
61 u8 pcie_lanes;
62 u8 cna_capable;
63 u8 is_mezz;
64};
65
66/**
67 * BFA adapter level events
68 * Arguments below are in BFAL context from Mgmt
69 * BFA_PORT_AEN_ADD: [in]: None [out]: serial_num, pwwn, nports
70 * BFA_PORT_AEN_REMOVE: [in]: pwwn [out]: serial_num, pwwn, nports
71 */
72enum bfa_adapter_aen_event {
73 BFA_ADAPTER_AEN_ADD = 1, /* New Adapter found event */
74 BFA_ADAPTER_AEN_REMOVE = 2, /* Adapter removed event */
75};
76
77struct bfa_adapter_aen_data_s {
78 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
79 u32 nports; /* Number of NPorts */
80 wwn_t pwwn; /* WWN of one of its physical port */
81};
82
83#endif /* __BFA_DEFS_ADAPTER_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_aen.h b/drivers/scsi/bfa/include/defs/bfa_defs_aen.h
deleted file mode 100644
index 35244698fcdc..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_aen.h
+++ /dev/null
@@ -1,83 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_AEN_H__
19#define __BFA_DEFS_AEN_H__
20
21#include <defs/bfa_defs_types.h>
22#include <defs/bfa_defs_ioc.h>
23#include <defs/bfa_defs_adapter.h>
24#include <defs/bfa_defs_port.h>
25#include <defs/bfa_defs_lport.h>
26#include <defs/bfa_defs_rport.h>
27#include <defs/bfa_defs_itnim.h>
28#include <defs/bfa_defs_tin.h>
29#include <defs/bfa_defs_ipfc.h>
30#include <defs/bfa_defs_audit.h>
31#include <defs/bfa_defs_ethport.h>
32
33#define BFA_AEN_MAX_APP 5
34
35enum bfa_aen_app {
36 bfa_aen_app_bcu = 0, /* No thread for bcu */
37 bfa_aen_app_hcm = 1,
38 bfa_aen_app_cim = 2,
39 bfa_aen_app_snia = 3,
40 bfa_aen_app_test = 4, /* To be removed after unit test */
41};
42
43enum bfa_aen_category {
44 BFA_AEN_CAT_ADAPTER = 1,
45 BFA_AEN_CAT_PORT = 2,
46 BFA_AEN_CAT_LPORT = 3,
47 BFA_AEN_CAT_RPORT = 4,
48 BFA_AEN_CAT_ITNIM = 5,
49 BFA_AEN_CAT_TIN = 6,
50 BFA_AEN_CAT_IPFC = 7,
51 BFA_AEN_CAT_AUDIT = 8,
52 BFA_AEN_CAT_IOC = 9,
53 BFA_AEN_CAT_ETHPORT = 10,
54 BFA_AEN_MAX_CAT = 10
55};
56
57#pragma pack(1)
58union bfa_aen_data_u {
59 struct bfa_adapter_aen_data_s adapter;
60 struct bfa_port_aen_data_s port;
61 struct bfa_lport_aen_data_s lport;
62 struct bfa_rport_aen_data_s rport;
63 struct bfa_itnim_aen_data_s itnim;
64 struct bfa_audit_aen_data_s audit;
65 struct bfa_ioc_aen_data_s ioc;
66 struct bfa_ethport_aen_data_s ethport;
67};
68
69struct bfa_aen_entry_s {
70 enum bfa_aen_category aen_category;
71 int aen_type;
72 union bfa_aen_data_u aen_data;
73 struct bfa_timeval_s aen_tv;
74 s32 seq_num;
75 s32 bfad_num;
76 s32 rsvd[1];
77};
78
79#pragma pack()
80
81#define bfa_aen_event_t int
82
83#endif /* __BFA_DEFS_AEN_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_audit.h b/drivers/scsi/bfa/include/defs/bfa_defs_audit.h
deleted file mode 100644
index 8e3a962bf20c..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_audit.h
+++ /dev/null
@@ -1,38 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_AUDIT_H__
19#define __BFA_DEFS_AUDIT_H__
20
21#include <bfa_os_inc.h>
22
23/**
24 * BFA audit events
25 */
26enum bfa_audit_aen_event {
27 BFA_AUDIT_AEN_AUTH_ENABLE = 1,
28 BFA_AUDIT_AEN_AUTH_DISABLE = 2,
29};
30
31/**
32 * audit event data
33 */
34struct bfa_audit_aen_data_s {
35 wwn_t pwwn;
36};
37
38#endif /* __BFA_DEFS_AUDIT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_auth.h b/drivers/scsi/bfa/include/defs/bfa_defs_auth.h
deleted file mode 100644
index f56ed871bb99..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_auth.h
+++ /dev/null
@@ -1,134 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_AUTH_H__
18#define __BFA_DEFS_AUTH_H__
19
20#include <defs/bfa_defs_types.h>
21
22#define PUBLIC_KEY 15409
23#define PRIVATE_KEY 19009
24#define KEY_LEN 32399
25#define BFA_AUTH_SECRET_STRING_LEN 256
26#define BFA_AUTH_FAIL_NO_PASSWORD 0xFE
27#define BFA_AUTH_FAIL_TIMEOUT 0xFF
28
29/**
30 * Authentication status
31 */
32enum bfa_auth_status {
33 BFA_AUTH_STATUS_NONE = 0, /* no authentication */
34 BFA_AUTH_UNINIT = 1, /* state - uninit */
35 BFA_AUTH_NEG_SEND = 2, /* state - negotiate send */
36 BFA_AUTH_CHAL_WAIT = 3, /* state - challenge wait */
37 BFA_AUTH_NEG_RETRY = 4, /* state - negotiate retry */
38 BFA_AUTH_REPLY_SEND = 5, /* state - reply send */
39 BFA_AUTH_STATUS_WAIT = 6, /* state - status wait */
40 BFA_AUTH_SUCCESS = 7, /* state - success */
41 BFA_AUTH_FAILED = 8, /* state - failed */
42 BFA_AUTH_STATUS_UNKNOWN = 9, /* authentication status unknown */
43};
44
45enum bfa_auth_rej_code {
46 BFA_AUTH_RJT_CODE_AUTH_FAILURE = 1, /* auth failure */
47 BFA_AUTH_RJT_CODE_LOGICAL_ERR = 2, /* logical error */
48};
49
50/**
51 * Authentication reject codes
52 */
53enum bfa_auth_rej_code_exp {
54 BFA_AUTH_MECH_NOT_USABLE = 1, /* auth. mechanism not usable */
55 BFA_AUTH_DH_GROUP_NOT_USABLE = 2, /* DH Group not usable */
56 BFA_AUTH_HASH_FUNC_NOT_USABLE = 3, /* hash Function not usable */
57 BFA_AUTH_AUTH_XACT_STARTED = 4, /* auth xact started */
58 BFA_AUTH_AUTH_FAILED = 5, /* auth failed */
59 BFA_AUTH_INCORRECT_PLD = 6, /* incorrect payload */
60 BFA_AUTH_INCORRECT_PROTO_MSG = 7, /* incorrect proto msg */
61 BFA_AUTH_RESTART_AUTH_PROTO = 8, /* restart auth protocol */
62 BFA_AUTH_AUTH_CONCAT_NOT_SUPP = 9, /* auth concat not supported */
63 BFA_AUTH_PROTO_VER_NOT_SUPP = 10,/* proto version not supported */
64};
65
66struct auth_proto_stats_s {
67 u32 auth_rjts;
68 u32 auth_negs;
69 u32 auth_dones;
70
71 u32 dhchap_challenges;
72 u32 dhchap_replies;
73 u32 dhchap_successes;
74};
75
76/**
77 * Authentication related statistics
78 */
79struct bfa_auth_stats_s {
80 u32 auth_failures; /* authentication failures */
81 u32 auth_successes; /* authentication successes*/
82 struct auth_proto_stats_s auth_rx_stats; /* Rx protocol stats */
83 struct auth_proto_stats_s auth_tx_stats; /* Tx protocol stats */
84};
85
86/**
87 * Authentication hash function algorithms
88 */
89enum bfa_auth_algo {
90 BFA_AUTH_ALGO_MD5 = 1, /* Message-Digest algorithm 5 */
91 BFA_AUTH_ALGO_SHA1 = 2, /* Secure Hash Algorithm 1 */
92 BFA_AUTH_ALGO_MS = 3, /* MD5, then SHA-1 */
93 BFA_AUTH_ALGO_SM = 4, /* SHA-1, then MD5 */
94};
95
96/**
97 * DH Groups
98 *
99 * Current value could be combination of one or more of the following values
100 */
101enum bfa_auth_group {
102 BFA_AUTH_GROUP_DHNULL = 0, /* DH NULL (value == 0) */
103 BFA_AUTH_GROUP_DH768 = 1, /* DH group 768 (value == 1) */
104 BFA_AUTH_GROUP_DH1024 = 2, /* DH group 1024 (value == 2) */
105 BFA_AUTH_GROUP_DH1280 = 4, /* DH group 1280 (value == 3) */
106 BFA_AUTH_GROUP_DH1536 = 8, /* DH group 1536 (value == 4) */
107
108 BFA_AUTH_GROUP_ALL = 256 /* Use default DH group order
109 * 0, 1, 2, 3, 4 */
110};
111
112/**
113 * Authentication secret sources
114 */
115enum bfa_auth_secretsource {
116 BFA_AUTH_SECSRC_LOCAL = 1, /* locally configured */
117 BFA_AUTH_SECSRC_RADIUS = 2, /* use radius server */
118 BFA_AUTH_SECSRC_TACACS = 3, /* TACACS server */
119};
120
121/**
122 * Authentication attributes
123 */
124struct bfa_auth_attr_s {
125 enum bfa_auth_status status;
126 enum bfa_auth_algo algo;
127 enum bfa_auth_group dh_grp;
128 enum bfa_auth_rej_code rjt_code;
129 enum bfa_auth_rej_code_exp rjt_code_exp;
130 u8 secret_set;
131 u8 resv[3];
132};
133
134#endif /* __BFA_DEFS_AUTH_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_boot.h b/drivers/scsi/bfa/include/defs/bfa_defs_boot.h
deleted file mode 100644
index 0fca10b6ad10..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_boot.h
+++ /dev/null
@@ -1,81 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_BOOT_H__
19#define __BFA_DEFS_BOOT_H__
20
21#include <protocol/types.h>
22#include <defs/bfa_defs_types.h>
23#include <defs/bfa_defs_pport.h>
24
25enum {
26 BFA_BOOT_BOOTLUN_MAX = 4, /* maximum boot lun per IOC */
27 BFA_PREBOOT_BOOTLUN_MAX = 8, /* maximum preboot lun per IOC */
28
29};
30
31#define BOOT_CFG_REV1 1
32
33/**
34 * Boot options setting. Boot options setting determines from where
35 * to get the boot lun information
36 */
37enum bfa_boot_bootopt {
38 BFA_BOOT_AUTO_DISCOVER = 0, /* Boot from blun provided by fabric */
39 BFA_BOOT_STORED_BLUN = 1, /* Boot from bluns stored in flash */
40 BFA_BOOT_FIRST_LUN = 2, /* Boot from first discovered blun */
41};
42
43/**
44 * Boot lun information.
45 */
46struct bfa_boot_bootlun_s {
47 wwn_t pwwn; /* port wwn of target */
48 lun_t lun; /* 64-bit lun */
49};
50
51/**
52 * BOOT boot configuraton
53 */
54struct bfa_boot_cfg_s {
55 u8 version;
56 u8 rsvd1;
57 u16 chksum;
58
59 u8 enable; /* enable/disable SAN boot */
60 u8 speed; /* boot speed settings */
61 u8 topology; /* boot topology setting */
62 u8 bootopt; /* bfa_boot_bootopt_t */
63
64 u32 nbluns; /* number of boot luns */
65
66 u32 rsvd2;
67
68 struct bfa_boot_bootlun_s blun[BFA_BOOT_BOOTLUN_MAX];
69 struct bfa_boot_bootlun_s blun_disc[BFA_BOOT_BOOTLUN_MAX];
70};
71
72struct bfa_boot_pbc_s {
73 u8 enable; /* enable/disable SAN boot */
74 u8 speed; /* boot speed settings */
75 u8 topology; /* boot topology setting */
76 u8 rsvd1;
77 u32 nbluns; /* number of boot luns */
78 struct bfa_boot_bootlun_s pblun[BFA_PREBOOT_BOOTLUN_MAX];
79};
80
81#endif /* __BFA_DEFS_BOOT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_cee.h b/drivers/scsi/bfa/include/defs/bfa_defs_cee.h
deleted file mode 100644
index 6eaf519eccdc..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_cee.h
+++ /dev/null
@@ -1,157 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * bfa_defs_cee.h Interface declarations between host based
7 * BFAL and DCBX/LLDP module in Firmware
8 *
9 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License (GPL) Version 2 as
13 * published by the Free Software Foundation
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 */
20#ifndef __BFA_DEFS_CEE_H__
21#define __BFA_DEFS_CEE_H__
22
23#include <defs/bfa_defs_types.h>
24#include <defs/bfa_defs_pport.h>
25#include <protocol/types.h>
26
27#pragma pack(1)
28
29#define BFA_CEE_LLDP_MAX_STRING_LEN (128)
30
31#define BFA_CEE_LLDP_SYS_CAP_OTHER 0x0001
32#define BFA_CEE_LLDP_SYS_CAP_REPEATER 0x0002
33#define BFA_CEE_LLDP_SYS_CAP_MAC_BRIDGE 0x0004
34#define BFA_CEE_LLDP_SYS_CAP_WLAN_AP 0x0008
35#define BFA_CEE_LLDP_SYS_CAP_ROUTER 0x0010
36#define BFA_CEE_LLDP_SYS_CAP_TELEPHONE 0x0020
37#define BFA_CEE_LLDP_SYS_CAP_DOCSIS_CD 0x0040
38#define BFA_CEE_LLDP_SYS_CAP_STATION 0x0080
39#define BFA_CEE_LLDP_SYS_CAP_CVLAN 0x0100
40#define BFA_CEE_LLDP_SYS_CAP_SVLAN 0x0200
41#define BFA_CEE_LLDP_SYS_CAP_TPMR 0x0400
42
43
44/* LLDP string type */
45struct bfa_cee_lldp_str_s {
46 u8 sub_type;
47 u8 len;
48 u8 rsvd[2];
49 u8 value[BFA_CEE_LLDP_MAX_STRING_LEN];
50};
51
52
53/* LLDP parameters */
54struct bfa_cee_lldp_cfg_s {
55 struct bfa_cee_lldp_str_s chassis_id;
56 struct bfa_cee_lldp_str_s port_id;
57 struct bfa_cee_lldp_str_s port_desc;
58 struct bfa_cee_lldp_str_s sys_name;
59 struct bfa_cee_lldp_str_s sys_desc;
60 struct bfa_cee_lldp_str_s mgmt_addr;
61 u16 time_to_interval;
62 u16 enabled_system_cap;
63};
64
65enum bfa_cee_dcbx_version_e {
66 DCBX_PROTOCOL_PRECEE = 1,
67 DCBX_PROTOCOL_CEE = 2,
68};
69
70enum bfa_cee_lls_e {
71 CEE_LLS_DOWN_NO_TLV = 0, /* LLS is down because the TLV not sent by
72 * the peer */
73 CEE_LLS_DOWN = 1, /* LLS is down as advertised by the peer */
74 CEE_LLS_UP = 2,
75};
76
77/* CEE/DCBX parameters */
78struct bfa_cee_dcbx_cfg_s {
79 u8 pgid[8];
80 u8 pg_percentage[8];
81 u8 pfc_enabled; /* bitmap of priorties with PFC enabled */
82 u8 fcoe_user_priority; /* bitmap of priorities used for FcoE
83 * traffic */
84 u8 dcbx_version; /* operating version:CEE or preCEE */
85 u8 lls_fcoe; /* FCoE Logical Link Status */
86 u8 lls_lan; /* LAN Logical Link Status */
87 u8 rsvd[3];
88};
89
90/* CEE status */
91/* Making this to tri-state for the benefit of port list command */
92enum bfa_cee_status_e {
93 CEE_UP = 0,
94 CEE_PHY_UP = 1,
95 CEE_LOOPBACK = 2,
96 CEE_PHY_DOWN = 3,
97};
98
99/* CEE Query */
100struct bfa_cee_attr_s {
101 u8 cee_status;
102 u8 error_reason;
103 struct bfa_cee_lldp_cfg_s lldp_remote;
104 struct bfa_cee_dcbx_cfg_s dcbx_remote;
105 mac_t src_mac;
106 u8 link_speed;
107 u8 nw_priority;
108 u8 filler[2];
109};
110
111
112
113
114/* LLDP/DCBX/CEE Statistics */
115
116struct bfa_cee_lldp_stats_s {
117 u32 frames_transmitted;
118 u32 frames_aged_out;
119 u32 frames_discarded;
120 u32 frames_in_error;
121 u32 frames_rcvd;
122 u32 tlvs_discarded;
123 u32 tlvs_unrecognized;
124};
125
126struct bfa_cee_dcbx_stats_s {
127 u32 subtlvs_unrecognized;
128 u32 negotiation_failed;
129 u32 remote_cfg_changed;
130 u32 tlvs_received;
131 u32 tlvs_invalid;
132 u32 seqno;
133 u32 ackno;
134 u32 recvd_seqno;
135 u32 recvd_ackno;
136};
137
138struct bfa_cee_cfg_stats_s {
139 u32 cee_status_down;
140 u32 cee_status_up;
141 u32 cee_hw_cfg_changed;
142 u32 recvd_invalid_cfg;
143};
144
145
146struct bfa_cee_stats_s {
147 struct bfa_cee_lldp_stats_s lldp_stats;
148 struct bfa_cee_dcbx_stats_s dcbx_stats;
149 struct bfa_cee_cfg_stats_s cfg_stats;
150};
151
152#pragma pack()
153
154
155#endif /* __BFA_DEFS_CEE_H__ */
156
157
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_driver.h b/drivers/scsi/bfa/include/defs/bfa_defs_driver.h
deleted file mode 100644
index 7d00d00d3969..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_driver.h
+++ /dev/null
@@ -1,41 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_DRIVER_H__
19#define __BFA_DEFS_DRIVER_H__
20
21/**
22 * Driver statistics
23 */
24struct bfa_driver_stats_s {
25 u16 tm_io_abort;
26 u16 tm_io_abort_comp;
27 u16 tm_lun_reset;
28 u16 tm_lun_reset_comp;
29 u16 tm_target_reset;
30 u16 tm_bus_reset;
31 u16 ioc_restart; /* IOC restart count */
32 u16 rsvd;
33 u64 control_req;
34 u64 input_req;
35 u64 output_req;
36 u64 input_words;
37 u64 output_words;
38};
39
40
41#endif /* __BFA_DEFS_DRIVER_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h b/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h
deleted file mode 100644
index b4fa0923aa89..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h
+++ /dev/null
@@ -1,99 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_ETHPORT_H__
19#define __BFA_DEFS_ETHPORT_H__
20
21#include <defs/bfa_defs_status.h>
22#include <defs/bfa_defs_port.h>
23#include <protocol/types.h>
24#include <cna/pstats/phyport_defs.h>
25#include <cna/pstats/ethport_defs.h>
26
27struct bna_tx_info_s {
28 u32 miniport_state;
29 u32 adapter_state;
30 u64 tx_count;
31 u64 tx_wi;
32 u64 tx_sg;
33 u64 tx_tcp_chksum;
34 u64 tx_udp_chksum;
35 u64 tx_ip_chksum;
36 u64 tx_lsov1;
37 u64 tx_lsov2;
38 u64 tx_max_sg_len ;
39};
40
41struct bna_rx_queue_info_s {
42 u16 q_id ;
43 u16 buf_size ;
44 u16 buf_count ;
45 u16 rsvd ;
46 u64 rx_count ;
47 u64 rx_dropped ;
48 u64 rx_unsupported ;
49 u64 rx_internal_err ;
50 u64 rss_count ;
51 u64 vlan_count ;
52 u64 rx_tcp_chksum ;
53 u64 rx_udp_chksum ;
54 u64 rx_ip_chksum ;
55 u64 rx_hds ;
56};
57
58struct bna_rx_q_set_s {
59 u16 q_set_type;
60 u32 miniport_state;
61 u32 adapter_state;
62 struct bna_rx_queue_info_s rx_queue[2];
63};
64
65struct bna_port_stats_s {
66 struct bna_tx_info_s tx_stats;
67 u16 qset_count ;
68 struct bna_rx_q_set_s rx_qset[8];
69};
70
71struct bfa_ethport_stats_s {
72 struct bna_stats_txf txf_stats[1];
73 struct bna_stats_rxf rxf_stats[1];
74 struct bnad_drv_stats drv_stats;
75};
76
77/**
78 * Ethernet port events
79 * Arguments below are in BFAL context from Mgmt
80 * BFA_PORT_AEN_ETH_LINKUP: [in]: mac [out]: mac
81 * BFA_PORT_AEN_ETH_LINKDOWN: [in]: mac [out]: mac
82 * BFA_PORT_AEN_ETH_ENABLE: [in]: mac [out]: mac
83 * BFA_PORT_AEN_ETH_DISABLE: [in]: mac [out]: mac
84 *
85 */
86enum bfa_ethport_aen_event {
87 BFA_ETHPORT_AEN_LINKUP = 1, /* Base Port Ethernet link up event */
88 BFA_ETHPORT_AEN_LINKDOWN = 2, /* Base Port Ethernet link down event */
89 BFA_ETHPORT_AEN_ENABLE = 3, /* Base Port Ethernet link enable event */
90 BFA_ETHPORT_AEN_DISABLE = 4, /* Base Port Ethernet link disable
91 * event */
92};
93
94struct bfa_ethport_aen_data_s {
95 mac_t mac; /* MAC address of the physical port */
96};
97
98
99#endif /* __BFA_DEFS_ETHPORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_fcpim.h b/drivers/scsi/bfa/include/defs/bfa_defs_fcpim.h
deleted file mode 100644
index c08f4f5026ac..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_fcpim.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_FCPIM_H__
18#define __BFA_DEFS_FCPIM_H__
19
20struct bfa_fcpim_stats_s {
21 u32 total_ios; /* Total IO count */
22 u32 qresumes; /* IO waiting for CQ space */
23 u32 no_iotags; /* NO IO contexts */
24 u32 io_aborts; /* IO abort requests */
25 u32 no_tskims; /* NO task management contexts */
26 u32 iocomp_ok; /* IO completions with OK status */
27 u32 iocomp_underrun; /* IO underrun (good) */
28 u32 iocomp_overrun; /* IO overrun (good) */
29 u32 iocomp_aborted; /* Aborted IO requests */
30 u32 iocomp_timedout; /* IO timeouts */
31 u32 iocom_nexus_abort; /* IO selection timeouts */
32 u32 iocom_proto_err; /* IO protocol errors */
33 u32 iocom_dif_err; /* IO SBC-3 protection errors */
34 u32 iocom_tm_abort; /* IO aborted by TM requests */
35 u32 iocom_sqer_needed; /* IO retry for SQ error
36 *recovery */
37 u32 iocom_res_free; /* Delayed freeing of IO resources */
38 u32 iocomp_scsierr; /* IO with non-good SCSI status */
39 u32 iocom_hostabrts; /* Host IO abort requests */
40 u32 iocom_utags; /* IO comp with unknown tags */
41 u32 io_cleanups; /* IO implicitly aborted */
42 u32 io_tmaborts; /* IO aborted due to TM commands */
43 u32 rsvd;
44};
45#endif /*__BFA_DEFS_FCPIM_H__*/
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h b/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h
deleted file mode 100644
index af86a6396439..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h
+++ /dev/null
@@ -1,88 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * bfa_defs_fcport.h
7 *
8 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License (GPL) Version 2 as
12 * published by the Free Software Foundation
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 */
19#ifndef __BFA_DEFS_FCPORT_H__
20#define __BFA_DEFS_FCPORT_H__
21
22#include <defs/bfa_defs_types.h>
23#include <protocol/types.h>
24
25#pragma pack(1)
26
27/**
28 * FCoE statistics
29 */
30struct bfa_fcoe_stats_s {
31 u64 secs_reset; /* Seconds since stats reset */
32 u64 cee_linkups; /* CEE link up */
33 u64 cee_linkdns; /* CEE link down */
34 u64 fip_linkups; /* FIP link up */
35 u64 fip_linkdns; /* FIP link down */
36 u64 fip_fails; /* FIP failures */
37 u64 mac_invalids; /* Invalid mac assignments */
38 u64 vlan_req; /* Vlan requests */
39 u64 vlan_notify; /* Vlan notifications */
40 u64 vlan_err; /* Vlan notification errors */
41 u64 vlan_timeouts; /* Vlan request timeouts */
42 u64 vlan_invalids; /* Vlan invalids */
43 u64 disc_req; /* Discovery requests */
44 u64 disc_rsp; /* Discovery responses */
45 u64 disc_err; /* Discovery error frames */
46 u64 disc_unsol; /* Discovery unsolicited */
47 u64 disc_timeouts; /* Discovery timeouts */
48 u64 disc_fcf_unavail; /* Discovery FCF not avail */
49 u64 linksvc_unsupp; /* FIP link service req unsupp. */
50 u64 linksvc_err; /* FIP link service req errors */
51 u64 logo_req; /* FIP logos received */
52 u64 clrvlink_req; /* Clear virtual link requests */
53 u64 op_unsupp; /* FIP operation unsupp. */
54 u64 untagged; /* FIP untagged frames */
55 u64 txf_ucast; /* Tx FCoE unicast frames */
56 u64 txf_ucast_vlan; /* Tx FCoE unicast vlan frames */
57 u64 txf_ucast_octets; /* Tx FCoE unicast octets */
58 u64 txf_mcast; /* Tx FCoE mutlicast frames */
59 u64 txf_mcast_vlan; /* Tx FCoE mutlicast vlan frames */
60 u64 txf_mcast_octets; /* Tx FCoE multicast octets */
61 u64 txf_bcast; /* Tx FCoE broadcast frames */
62 u64 txf_bcast_vlan; /* Tx FCoE broadcast vlan frames */
63 u64 txf_bcast_octets; /* Tx FCoE broadcast octets */
64 u64 txf_timeout; /* Tx timeouts */
65 u64 txf_parity_errors; /* Transmit parity err */
66 u64 txf_fid_parity_errors; /* Transmit FID parity err */
67 u64 rxf_ucast_octets; /* Rx FCoE unicast octets */
68 u64 rxf_ucast; /* Rx FCoE unicast frames */
69 u64 rxf_ucast_vlan; /* Rx FCoE unicast vlan frames */
70 u64 rxf_mcast_octets; /* Rx FCoE multicast octets */
71 u64 rxf_mcast; /* Rx FCoE multicast frames */
72 u64 rxf_mcast_vlan; /* Rx FCoE multicast vlan frames */
73 u64 rxf_bcast_octets; /* Rx FCoE broadcast octets */
74 u64 rxf_bcast; /* Rx FCoE broadcast frames */
75 u64 rxf_bcast_vlan; /* Rx FCoE broadcast vlan frames */
76};
77
78/**
79 * QoS or FCoE stats (fcport stats excluding physical FC port stats)
80 */
81union bfa_fcport_stats_u {
82 struct bfa_qos_stats_s fcqos;
83 struct bfa_fcoe_stats_s fcoe;
84};
85
86#pragma pack()
87
88#endif /* __BFA_DEFS_FCPORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h b/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h
deleted file mode 100644
index add0a05d941d..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h
+++ /dev/null
@@ -1,158 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_IOC_H__
19#define __BFA_DEFS_IOC_H__
20
21#include <protocol/types.h>
22#include <defs/bfa_defs_types.h>
23#include <defs/bfa_defs_version.h>
24#include <defs/bfa_defs_adapter.h>
25#include <defs/bfa_defs_pm.h>
26
27enum {
28 BFA_IOC_DRIVER_LEN = 16,
29 BFA_IOC_CHIP_REV_LEN = 8,
30};
31
32/**
33 * Driver and firmware versions.
34 */
35struct bfa_ioc_driver_attr_s {
36 char driver[BFA_IOC_DRIVER_LEN]; /* driver name */
37 char driver_ver[BFA_VERSION_LEN]; /* driver version */
38 char fw_ver[BFA_VERSION_LEN]; /* firmware version*/
39 char bios_ver[BFA_VERSION_LEN]; /* bios version */
40 char efi_ver[BFA_VERSION_LEN]; /* EFI version */
41 char ob_ver[BFA_VERSION_LEN]; /* openboot version*/
42};
43
44/**
45 * IOC PCI device attributes
46 */
47struct bfa_ioc_pci_attr_s {
48 u16 vendor_id; /* PCI vendor ID */
49 u16 device_id; /* PCI device ID */
50 u16 ssid; /* subsystem ID */
51 u16 ssvid; /* subsystem vendor ID */
52 u32 pcifn; /* PCI device function */
53 u32 rsvd; /* padding */
54 u8 chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */
55};
56
57/**
58 * IOC states
59 */
60enum bfa_ioc_state {
61 BFA_IOC_RESET = 1, /* IOC is in reset state */
62 BFA_IOC_SEMWAIT = 2, /* Waiting for IOC hardware semaphore */
63 BFA_IOC_HWINIT = 3, /* IOC hardware is being initialized */
64 BFA_IOC_GETATTR = 4, /* IOC is being configured */
65 BFA_IOC_OPERATIONAL = 5, /* IOC is operational */
66 BFA_IOC_INITFAIL = 6, /* IOC hardware failure */
67 BFA_IOC_HBFAIL = 7, /* IOC heart-beat failure */
68 BFA_IOC_DISABLING = 8, /* IOC is being disabled */
69 BFA_IOC_DISABLED = 9, /* IOC is disabled */
70 BFA_IOC_FWMISMATCH = 10, /* IOC firmware different from drivers */
71};
72
73/**
74 * IOC firmware stats
75 */
76struct bfa_fw_ioc_stats_s {
77 u32 hb_count;
78 u32 cfg_reqs;
79 u32 enable_reqs;
80 u32 disable_reqs;
81 u32 stats_reqs;
82 u32 clrstats_reqs;
83 u32 unknown_reqs;
84 u32 ic_reqs; /* interrupt coalesce reqs */
85};
86
87/**
88 * IOC driver stats
89 */
90struct bfa_ioc_drv_stats_s {
91 u32 ioc_isrs;
92 u32 ioc_enables;
93 u32 ioc_disables;
94 u32 ioc_hbfails;
95 u32 ioc_boots;
96 u32 stats_tmos;
97 u32 hb_count;
98 u32 disable_reqs;
99 u32 enable_reqs;
100 u32 disable_replies;
101 u32 enable_replies;
102};
103
104/**
105 * IOC statistics
106 */
107struct bfa_ioc_stats_s {
108 struct bfa_ioc_drv_stats_s drv_stats; /* driver IOC stats */
109 struct bfa_fw_ioc_stats_s fw_stats; /* firmware IOC stats */
110};
111
112
113enum bfa_ioc_type_e {
114 BFA_IOC_TYPE_FC = 1,
115 BFA_IOC_TYPE_FCoE = 2,
116 BFA_IOC_TYPE_LL = 3,
117};
118
119/**
120 * IOC attributes returned in queries
121 */
122struct bfa_ioc_attr_s {
123 enum bfa_ioc_type_e ioc_type;
124 enum bfa_ioc_state state; /* IOC state */
125 struct bfa_adapter_attr_s adapter_attr; /* HBA attributes */
126 struct bfa_ioc_driver_attr_s driver_attr; /* driver attr */
127 struct bfa_ioc_pci_attr_s pci_attr;
128 u8 port_id; /* port number */
129 u8 rsvd[7]; /* 64bit align */
130};
131
132/**
133 * BFA IOC level events
134 */
135enum bfa_ioc_aen_event {
136 BFA_IOC_AEN_HBGOOD = 1, /* Heart Beat restore event */
137 BFA_IOC_AEN_HBFAIL = 2, /* Heart Beat failure event */
138 BFA_IOC_AEN_ENABLE = 3, /* IOC enabled event */
139 BFA_IOC_AEN_DISABLE = 4, /* IOC disabled event */
140 BFA_IOC_AEN_FWMISMATCH = 5, /* IOC firmware mismatch */
141 BFA_IOC_AEN_FWCFG_ERROR = 6, /* IOC firmware config error */
142 BFA_IOC_AEN_INVALID_VENDOR = 7,
143 BFA_IOC_AEN_INVALID_NWWN = 8, /* Zero NWWN */
144 BFA_IOC_AEN_INVALID_PWWN = 9 /* Zero PWWN */
145
146};
147
148/**
149 * BFA IOC level event data, now just a place holder
150 */
151struct bfa_ioc_aen_data_s {
152 wwn_t pwwn;
153 s16 ioc_type;
154 mac_t mac;
155};
156
157#endif /* __BFA_DEFS_IOC_H__ */
158
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h b/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h
deleted file mode 100644
index 31e728a631ed..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h
+++ /dev/null
@@ -1,322 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_IOCFC_H__
19#define __BFA_DEFS_IOCFC_H__
20
21#include <protocol/types.h>
22#include <defs/bfa_defs_types.h>
23#include <defs/bfa_defs_version.h>
24#include <defs/bfa_defs_adapter.h>
25#include <defs/bfa_defs_pm.h>
26
27#define BFA_IOCFC_INTR_DELAY 1125
28#define BFA_IOCFC_INTR_LATENCY 225
29#define BFA_IOCFCOE_INTR_DELAY 25
30#define BFA_IOCFCOE_INTR_LATENCY 5
31
32/**
33 * Interrupt coalescing configuration.
34 */
35struct bfa_iocfc_intr_attr_s {
36 bfa_boolean_t coalesce; /* enable/disable coalescing */
37 u16 latency; /* latency in microseconds */
38 u16 delay; /* delay in microseconds */
39};
40
41/**
42 * IOC firmware configuraton
43 */
44struct bfa_iocfc_fwcfg_s {
45 u16 num_fabrics; /* number of fabrics */
46 u16 num_lports; /* number of local lports */
47 u16 num_rports; /* number of remote ports */
48 u16 num_ioim_reqs; /* number of IO reqs */
49 u16 num_tskim_reqs; /* task management requests */
50 u16 num_iotm_reqs; /* number of TM IO reqs */
51 u16 num_tsktm_reqs; /* TM task management requests*/
52 u16 num_fcxp_reqs; /* unassisted FC exchanges */
53 u16 num_uf_bufs; /* unsolicited recv buffers */
54 u8 num_cqs;
55 u8 fw_tick_res; /*!< FW clock resolution in ms */
56 u8 rsvd[4];
57
58};
59
60struct bfa_iocfc_drvcfg_s {
61 u16 num_reqq_elems; /* number of req queue elements */
62 u16 num_rspq_elems; /* number of rsp queue elements */
63 u16 num_sgpgs; /* number of total SG pages */
64 u16 num_sboot_tgts; /* number of SAN boot targets */
65 u16 num_sboot_luns; /* number of SAN boot luns */
66 u16 ioc_recover; /* IOC recovery mode */
67 u16 min_cfg; /* minimum configuration */
68 u16 path_tov; /* device path timeout */
69 bfa_boolean_t delay_comp; /* delay completion of
70 failed inflight IOs */
71 u32 rsvd;
72};
73/**
74 * IOC configuration
75 */
76struct bfa_iocfc_cfg_s {
77 struct bfa_iocfc_fwcfg_s fwcfg; /* firmware side config */
78 struct bfa_iocfc_drvcfg_s drvcfg; /* driver side config */
79};
80
81/**
82 * IOC firmware IO stats
83 */
84struct bfa_fw_io_stats_s {
85 u32 host_abort; /* IO aborted by host driver*/
86 u32 host_cleanup; /* IO clean up by host driver */
87
88 u32 fw_io_timeout; /* IOs timedout */
89 u32 fw_frm_parse; /* frame parsed by f/w */
90 u32 fw_frm_data; /* fcp_data frame parsed by f/w */
91 u32 fw_frm_rsp; /* fcp_rsp frame parsed by f/w */
92 u32 fw_frm_xfer_rdy; /* xfer_rdy frame parsed by f/w */
93 u32 fw_frm_bls_acc; /* BLS ACC frame parsed by f/w */
94 u32 fw_frm_tgt_abort; /* target ABTS parsed by f/w */
95 u32 fw_frm_unknown; /* unknown parsed by f/w */
96 u32 fw_data_dma; /* f/w DMA'ed the data frame */
97 u32 fw_frm_drop; /* f/w drop the frame */
98
99 u32 rec_timeout; /* FW rec timed out */
100 u32 error_rec; /* FW sending rec on
101 * an error condition*/
102 u32 wait_for_si; /* FW wait for SI */
103 u32 rec_rsp_inval; /* REC rsp invalid */
104 u32 seqr_io_abort; /* target does not know cmd so abort */
105 u32 seqr_io_retry; /* SEQR failed so retry IO */
106
107 u32 itn_cisc_upd_rsp; /* ITN cisc updated on fcp_rsp */
108 u32 itn_cisc_upd_data; /* ITN cisc updated on fcp_data */
109 u32 itn_cisc_upd_xfer_rdy; /* ITN cisc updated on fcp_data */
110
111 u32 fcp_data_lost; /* fcp data lost */
112
113 u32 ro_set_in_xfer_rdy; /* Target set RO in Xfer_rdy frame */
114 u32 xfer_rdy_ooo_err; /* Out of order Xfer_rdy received */
115 u32 xfer_rdy_unknown_err; /* unknown error in xfer_rdy frame */
116
117 u32 io_abort_timeout; /* ABTS timedout */
118 u32 sler_initiated; /* SLER initiated */
119
120 u32 unexp_fcp_rsp; /* fcp response in wrong state */
121
122 u32 fcp_rsp_under_run; /* fcp rsp IO underrun */
123 u32 fcp_rsp_under_run_wr; /* fcp rsp IO underrun for write */
124 u32 fcp_rsp_under_run_err; /* fcp rsp IO underrun error */
125 u32 fcp_rsp_resid_inval; /* invalid residue */
126 u32 fcp_rsp_over_run; /* fcp rsp IO overrun */
127 u32 fcp_rsp_over_run_err; /* fcp rsp IO overrun error */
128 u32 fcp_rsp_proto_err; /* protocol error in fcp rsp */
129 u32 fcp_rsp_sense_err; /* error in sense info in fcp rsp */
130 u32 fcp_conf_req; /* FCP conf requested */
131
132 u32 tgt_aborted_io; /* target initiated abort */
133
134 u32 ioh_edtov_timeout_event;/* IOH edtov timer popped */
135 u32 ioh_fcp_rsp_excp_event; /* IOH FCP_RSP exception */
136 u32 ioh_fcp_conf_event; /* IOH FCP_CONF */
137 u32 ioh_mult_frm_rsp_event; /* IOH multi_frame FCP_RSP */
138 u32 ioh_hit_class2_event; /* IOH hit class2 */
139 u32 ioh_miss_other_event; /* IOH miss other */
140 u32 ioh_seq_cnt_err_event; /* IOH seq cnt error */
141 u32 ioh_len_err_event; /* IOH len error - fcp_dl !=
142 * bytes xfered */
143 u32 ioh_seq_len_err_event; /* IOH seq len error */
144 u32 ioh_data_oor_event; /* Data out of range */
145 u32 ioh_ro_ooo_event; /* Relative offset out of range */
146 u32 ioh_cpu_owned_event; /* IOH hit -iost owned by f/w */
147 u32 ioh_unexp_frame_event; /* unexpected frame recieved
148 * count */
149 u32 ioh_err_int; /* IOH error int during data-phase
150 * for scsi write
151 */
152};
153
154/**
155 * IOC port firmware stats
156 */
157
158struct bfa_fw_port_fpg_stats_s {
159 u32 intr_evt;
160 u32 intr;
161 u32 intr_excess;
162 u32 intr_cause0;
163 u32 intr_other;
164 u32 intr_other_ign;
165 u32 sig_lost;
166 u32 sig_regained;
167 u32 sync_lost;
168 u32 sync_to;
169 u32 sync_regained;
170 u32 div2_overflow;
171 u32 div2_underflow;
172 u32 efifo_overflow;
173 u32 efifo_underflow;
174 u32 idle_rx;
175 u32 lrr_rx;
176 u32 lr_rx;
177 u32 ols_rx;
178 u32 nos_rx;
179 u32 lip_rx;
180 u32 arbf0_rx;
181 u32 arb_rx;
182 u32 mrk_rx;
183 u32 const_mrk_rx;
184 u32 prim_unknown;
185};
186
187
188struct bfa_fw_port_lksm_stats_s {
189 u32 hwsm_success; /* hwsm state machine success */
190 u32 hwsm_fails; /* hwsm fails */
191 u32 hwsm_wdtov; /* hwsm timed out */
192 u32 swsm_success; /* swsm success */
193 u32 swsm_fails; /* swsm fails */
194 u32 swsm_wdtov; /* swsm timed out */
195 u32 busybufs; /* link init failed due to busybuf */
196 u32 buf_waits; /* bufwait state entries */
197 u32 link_fails; /* link failures */
198 u32 psp_errors; /* primitive sequence protocol errors */
199 u32 lr_unexp; /* No. of times LR rx-ed unexpectedly */
200 u32 lrr_unexp; /* No. of times LRR rx-ed unexpectedly */
201 u32 lr_tx; /* No. of times LR tx started */
202 u32 lrr_tx; /* No. of times LRR tx started */
203 u32 ols_tx; /* No. of times OLS tx started */
204 u32 nos_tx; /* No. of times NOS tx started */
205 u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */
206 u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */
207};
208
209
210struct bfa_fw_port_snsm_stats_s {
211 u32 hwsm_success; /* Successful hwsm terminations */
212 u32 hwsm_fails; /* hwsm fail count */
213 u32 hwsm_wdtov; /* hwsm timed out */
214 u32 swsm_success; /* swsm success */
215 u32 swsm_wdtov; /* swsm timed out */
216 u32 error_resets; /* error resets initiated by upsm */
217 u32 sync_lost; /* Sync loss count */
218 u32 sig_lost; /* Signal loss count */
219};
220
221
222struct bfa_fw_port_physm_stats_s {
223 u32 module_inserts; /* Module insert count */
224 u32 module_xtracts; /* Module extracts count */
225 u32 module_invalids; /* Invalid module inserted count */
226 u32 module_read_ign; /* Module validation status ignored */
227 u32 laser_faults; /* Laser fault count */
228 u32 rsvd;
229};
230
231
232struct bfa_fw_fip_stats_s {
233 u32 vlan_req; /* vlan discovery requests */
234 u32 vlan_notify; /* vlan notifications */
235 u32 vlan_err; /* vlan response error */
236 u32 vlan_timeouts; /* vlan disvoery timeouts */
237 u32 vlan_invalids; /* invalid vlan in discovery advert. */
238 u32 disc_req; /* Discovery solicit requests */
239 u32 disc_rsp; /* Discovery solicit response */
240 u32 disc_err; /* Discovery advt. parse errors */
241 u32 disc_unsol; /* Discovery unsolicited */
242 u32 disc_timeouts; /* Discovery timeouts */
243 u32 disc_fcf_unavail; /* Discovery FCF Not Avail. */
244 u32 linksvc_unsupp; /* Unsupported link service req */
245 u32 linksvc_err; /* Parse error in link service req */
246 u32 logo_req; /* FIP logos received */
247 u32 clrvlink_req; /* Clear virtual link req */
248 u32 op_unsupp; /* Unsupported FIP operation */
249 u32 untagged; /* Untagged frames (ignored) */
250 u32 invalid_version; /*!< Invalid FIP version */
251};
252
253
254struct bfa_fw_lps_stats_s {
255 u32 mac_invalids; /* Invalid mac assigned */
256 u32 rsvd;
257};
258
259
260struct bfa_fw_fcoe_stats_s {
261 u32 cee_linkups; /* CEE link up count */
262 u32 cee_linkdns; /* CEE link down count */
263 u32 fip_linkups; /* FIP link up count */
264 u32 fip_linkdns; /* FIP link up count */
265 u32 fip_fails; /* FIP fail count */
266 u32 mac_invalids; /* Invalid mac assigned */
267};
268
269/**
270 * IOC firmware FCoE port stats
271 */
272struct bfa_fw_fcoe_port_stats_s {
273 struct bfa_fw_fcoe_stats_s fcoe_stats;
274 struct bfa_fw_fip_stats_s fip_stats;
275};
276
277/**
278 * IOC firmware FC port stats
279 */
280struct bfa_fw_fc_port_stats_s {
281 struct bfa_fw_port_fpg_stats_s fpg_stats;
282 struct bfa_fw_port_physm_stats_s physm_stats;
283 struct bfa_fw_port_snsm_stats_s snsm_stats;
284 struct bfa_fw_port_lksm_stats_s lksm_stats;
285};
286
287/**
288 * IOC firmware FC port stats
289 */
290union bfa_fw_port_stats_s {
291 struct bfa_fw_fc_port_stats_s fc_stats;
292 struct bfa_fw_fcoe_port_stats_s fcoe_stats;
293};
294
295/**
296 * IOC firmware stats
297 */
298struct bfa_fw_stats_s {
299 struct bfa_fw_ioc_stats_s ioc_stats;
300 struct bfa_fw_io_stats_s io_stats;
301 union bfa_fw_port_stats_s port_stats;
302};
303
304/**
305 * IOC statistics
306 */
307struct bfa_iocfc_stats_s {
308 struct bfa_fw_stats_s fw_stats; /* firmware IOC stats */
309};
310
311/**
312 * IOC attributes returned in queries
313 */
314struct bfa_iocfc_attr_s {
315 struct bfa_iocfc_cfg_s config; /* IOCFC config */
316 struct bfa_iocfc_intr_attr_s intr_attr; /* interrupt attr */
317};
318
319#define BFA_IOCFC_PATHTOV_MAX 60
320#define BFA_IOCFC_QDEPTH_MAX 2000
321
322#endif /* __BFA_DEFS_IOC_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_ipfc.h b/drivers/scsi/bfa/include/defs/bfa_defs_ipfc.h
deleted file mode 100644
index 7cb63ea98f38..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_ipfc.h
+++ /dev/null
@@ -1,70 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_IPFC_H__
18#define __BFA_DEFS_IPFC_H__
19
20#include <bfa_os_inc.h>
21#include <protocol/types.h>
22#include <defs/bfa_defs_types.h>
23
24/**
25 * FCS ip remote port states
26 */
27enum bfa_iprp_state {
28 BFA_IPRP_UNINIT = 0, /* PORT is not yet initialized */
29 BFA_IPRP_ONLINE = 1, /* process login is complete */
30 BFA_IPRP_OFFLINE = 2, /* iprp is offline */
31};
32
33/**
34 * FCS remote port statistics
35 */
36struct bfa_iprp_stats_s {
37 u32 offlines;
38 u32 onlines;
39 u32 rscns;
40 u32 plogis;
41 u32 logos;
42 u32 plogi_timeouts;
43 u32 plogi_rejects;
44};
45
46/**
47 * FCS iprp attribute returned in queries
48 */
49struct bfa_iprp_attr_s {
50 enum bfa_iprp_state state;
51};
52
53struct bfa_ipfc_stats_s {
54 u32 arp_sent;
55 u32 arp_recv;
56 u32 arp_reply_sent;
57 u32 arp_reply_recv;
58 u32 farp_sent;
59 u32 farp_recv;
60 u32 farp_reply_sent;
61 u32 farp_reply_recv;
62 u32 farp_reject_sent;
63 u32 farp_reject_recv;
64};
65
66struct bfa_ipfc_attr_s {
67 bfa_boolean_t enabled;
68};
69
70#endif /* __BFA_DEFS_IPFC_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_itnim.h b/drivers/scsi/bfa/include/defs/bfa_defs_itnim.h
deleted file mode 100644
index d77788b3999a..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_itnim.h
+++ /dev/null
@@ -1,136 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_ITNIM_H__
18#define __BFA_DEFS_ITNIM_H__
19
20#include <bfa_os_inc.h>
21#include <protocol/types.h>
22
23/**
24 * FCS itnim states
25 */
26enum bfa_itnim_state {
27 BFA_ITNIM_OFFLINE = 0, /* offline */
28 BFA_ITNIM_PRLI_SEND = 1, /* prli send */
29 BFA_ITNIM_PRLI_SENT = 2, /* prli sent */
30 BFA_ITNIM_PRLI_RETRY = 3, /* prli retry */
31 BFA_ITNIM_HCB_ONLINE = 4, /* online callback */
32 BFA_ITNIM_ONLINE = 5, /* online */
33 BFA_ITNIM_HCB_OFFLINE = 6, /* offline callback */
34 BFA_ITNIM_INITIATIOR = 7, /* initiator */
35};
36
37struct bfa_itnim_latency_s {
38 u32 min;
39 u32 max;
40 u32 count;
41 u32 clock_res;
42 u32 avg;
43 u32 rsvd;
44};
45
46struct bfa_itnim_hal_stats_s {
47 u32 onlines; /* ITN nexus onlines (PRLI done) */
48 u32 offlines; /* ITN Nexus offlines */
49 u32 creates; /* ITN create requests */
50 u32 deletes; /* ITN delete requests */
51 u32 create_comps; /* ITN create completions */
52 u32 delete_comps; /* ITN delete completions */
53 u32 sler_events; /* SLER (sequence level error
54 * recovery) events */
55 u32 ioc_disabled; /* Num IOC disables */
56 u32 cleanup_comps; /* ITN cleanup completions */
57 u32 tm_cmnds; /* task management(TM) cmnds sent */
58 u32 tm_fw_rsps; /* TM cmds firmware responses */
59 u32 tm_success; /* TM successes */
60 u32 tm_failures; /* TM failures */
61 u32 tm_io_comps; /* TM IO completions */
62 u32 tm_qresumes; /* TM queue resumes (after waiting
63 * for resources)
64 */
65 u32 tm_iocdowns; /* TM cmnds affected by IOC down */
66 u32 tm_cleanups; /* TM cleanups */
67 u32 tm_cleanup_comps;
68 /* TM cleanup completions */
69 u32 ios; /* IO requests */
70 u32 io_comps; /* IO completions */
71 u64 input_reqs; /* INPUT requests */
72 u64 output_reqs; /* OUTPUT requests */
73};
74
75/**
76 * FCS remote port statistics
77 */
78struct bfa_itnim_stats_s {
79 u32 onlines; /* num rport online */
80 u32 offlines; /* num rport offline */
81 u32 prli_sent; /* num prli sent out */
82 u32 fcxp_alloc_wait;/* num fcxp alloc waits */
83 u32 prli_rsp_err; /* num prli rsp errors */
84 u32 prli_rsp_acc; /* num prli rsp accepts */
85 u32 initiator; /* rport is an initiator */
86 u32 prli_rsp_parse_err; /* prli rsp parsing errors */
87 u32 prli_rsp_rjt; /* num prli rsp rejects */
88 u32 timeout; /* num timeouts detected */
89 u32 sler; /* num sler notification from BFA */
90 u32 rsvd;
91 struct bfa_itnim_hal_stats_s hal_stats;
92};
93
94/**
95 * FCS itnim attributes returned in queries
96 */
97struct bfa_itnim_attr_s {
98 enum bfa_itnim_state state; /* FCS itnim state */
99 u8 retry; /* data retransmision support */
100 u8 task_retry_id; /* task retry ident support */
101 u8 rec_support; /* REC supported */
102 u8 conf_comp; /* confirmed completion supp */
103 struct bfa_itnim_latency_s io_latency; /* IO latency */
104};
105
106/**
107 * BFA ITNIM events.
108 * Arguments below are in BFAL context from Mgmt
109 * BFA_ITNIM_AEN_NEW: [in]: None [out]: vf_id, lpwwn
110 * BFA_ITNIM_AEN_DELETE: [in]: vf_id, lpwwn, rpwwn (0 = all fcp4 targets),
111 * [out]: vf_id, ppwwn, lpwwn, rpwwn
112 * BFA_ITNIM_AEN_ONLINE: [in]: vf_id, lpwwn, rpwwn (0 = all fcp4 targets),
113 * [out]: vf_id, ppwwn, lpwwn, rpwwn
114 * BFA_ITNIM_AEN_OFFLINE: [in]: vf_id, lpwwn, rpwwn (0 = all fcp4 targets),
115 * [out]: vf_id, ppwwn, lpwwn, rpwwn
116 * BFA_ITNIM_AEN_DISCONNECT:[in]: vf_id, lpwwn, rpwwn (0 = all fcp4 targets),
117 * [out]: vf_id, ppwwn, lpwwn, rpwwn
118 */
119enum bfa_itnim_aen_event {
120 BFA_ITNIM_AEN_ONLINE = 1, /* Target online */
121 BFA_ITNIM_AEN_OFFLINE = 2, /* Target offline */
122 BFA_ITNIM_AEN_DISCONNECT = 3, /* Target disconnected */
123};
124
125/**
126 * BFA ITNIM event data structure.
127 */
128struct bfa_itnim_aen_data_s {
129 u16 vf_id; /* vf_id of the IT nexus */
130 u16 rsvd[3];
131 wwn_t ppwwn; /* WWN of its physical port */
132 wwn_t lpwwn; /* WWN of logical port */
133 wwn_t rpwwn; /* WWN of remote(target) port */
134};
135
136#endif /* __BFA_DEFS_ITNIM_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_led.h b/drivers/scsi/bfa/include/defs/bfa_defs_led.h
deleted file mode 100644
index 62039273264e..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_led.h
+++ /dev/null
@@ -1,35 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_LED_H__
19#define __BFA_DEFS_LED_H__
20
21#define BFA_LED_MAX_NUM 3
22
23enum bfa_led_op {
24 BFA_LED_OFF = 0,
25 BFA_LED_ON = 1,
26 BFA_LED_FLICK = 2,
27 BFA_LED_BLINK = 3,
28};
29
30enum bfa_led_color {
31 BFA_LED_GREEN = 0,
32 BFA_LED_AMBER = 1,
33};
34
35#endif /* __BFA_DEFS_LED_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_lport.h b/drivers/scsi/bfa/include/defs/bfa_defs_lport.h
deleted file mode 100644
index 0952a139c47c..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_lport.h
+++ /dev/null
@@ -1,68 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_LPORT_H__
19#define __BFA_DEFS_LPORT_H__
20
21#include <defs/bfa_defs_types.h>
22#include <defs/bfa_defs_port.h>
23
24/**
25 * BFA AEN logical port events.
26 * Arguments below are in BFAL context from Mgmt
27 * BFA_LPORT_AEN_NEW: [in]: None [out]: vf_id, ppwwn, lpwwn, roles
28 * BFA_LPORT_AEN_DELETE: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles
29 * BFA_LPORT_AEN_ONLINE: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles
30 * BFA_LPORT_AEN_OFFLINE: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles
31 * BFA_LPORT_AEN_DISCONNECT:[in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles
32 * BFA_LPORT_AEN_NEW_PROP: [in]: None [out]: vf_id, ppwwn. lpwwn, roles
33 * BFA_LPORT_AEN_DELETE_PROP: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles
34 * BFA_LPORT_AEN_NEW_STANDARD: [in]: None [out]: vf_id, ppwwn. lpwwn, roles
35 * BFA_LPORT_AEN_DELETE_STANDARD: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles
36 * BFA_LPORT_AEN_NPIV_DUP_WWN: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles
37 * BFA_LPORT_AEN_NPIV_FABRIC_MAX: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles
38 * BFA_LPORT_AEN_NPIV_UNKNOWN: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles
39 */
40enum bfa_lport_aen_event {
41 BFA_LPORT_AEN_NEW = 1, /* LPort created event */
42 BFA_LPORT_AEN_DELETE = 2, /* LPort deleted event */
43 BFA_LPORT_AEN_ONLINE = 3, /* LPort online event */
44 BFA_LPORT_AEN_OFFLINE = 4, /* LPort offline event */
45 BFA_LPORT_AEN_DISCONNECT = 5, /* LPort disconnect event */
46 BFA_LPORT_AEN_NEW_PROP = 6, /* VPort created event */
47 BFA_LPORT_AEN_DELETE_PROP = 7, /* VPort deleted event */
48 BFA_LPORT_AEN_NEW_STANDARD = 8, /* VPort created event */
49 BFA_LPORT_AEN_DELETE_STANDARD = 9, /* VPort deleted event */
50 BFA_LPORT_AEN_NPIV_DUP_WWN = 10, /* VPort configured with
51 * duplicate WWN event
52 */
53 BFA_LPORT_AEN_NPIV_FABRIC_MAX = 11, /* Max NPIV in fabric/fport */
54 BFA_LPORT_AEN_NPIV_UNKNOWN = 12, /* Unknown NPIV Error code event */
55};
56
57/**
58 * BFA AEN event data structure
59 */
60struct bfa_lport_aen_data_s {
61 u16 vf_id; /* vf_id of this logical port */
62 s16 roles; /* Logical port mode,IM/TM/IP etc */
63 u32 rsvd;
64 wwn_t ppwwn; /* WWN of its physical port */
65 wwn_t lpwwn; /* WWN of this logical port */
66};
67
68#endif /* __BFA_DEFS_LPORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h b/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h
deleted file mode 100644
index d22fb7909643..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h
+++ /dev/null
@@ -1,144 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_MFG_H__
18#define __BFA_DEFS_MFG_H__
19
20#include <bfa_os_inc.h>
21
22/**
23 * Manufacturing block version
24 */
25#define BFA_MFG_VERSION 2
26
27/**
28 * Manufacturing block encrypted version
29 */
30#define BFA_MFG_ENC_VER 2
31
32/**
33 * Manufacturing block version 1 length
34 */
35#define BFA_MFG_VER1_LEN 128
36
37/**
38 * Manufacturing block header length
39 */
40#define BFA_MFG_HDR_LEN 4
41
42/**
43 * Checksum size
44 */
45#define BFA_MFG_CHKSUM_SIZE 16
46
47/**
48 * Manufacturing block format
49 */
50#define BFA_MFG_SERIALNUM_SIZE 11
51#define BFA_MFG_PARTNUM_SIZE 14
52#define BFA_MFG_SUPPLIER_ID_SIZE 10
53#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20
54#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20
55#define BFA_MFG_SUPPLIER_REVISION_SIZE 4
56#define STRSZ(_n) (((_n) + 4) & ~3)
57
58/**
59 * Manufacturing card type
60 */
61enum {
62 BFA_MFG_TYPE_CB_MAX = 825, /* Crossbow card type max */
63 BFA_MFG_TYPE_FC8P2 = 825, /* 8G 2port FC card */
64 BFA_MFG_TYPE_FC8P1 = 815, /* 8G 1port FC card */
65 BFA_MFG_TYPE_FC4P2 = 425, /* 4G 2port FC card */
66 BFA_MFG_TYPE_FC4P1 = 415, /* 4G 1port FC card */
67 BFA_MFG_TYPE_CNA10P2 = 1020, /* 10G 2port CNA card */
68 BFA_MFG_TYPE_CNA10P1 = 1010, /* 10G 1port CNA card */
69 BFA_MFG_TYPE_JAYHAWK = 804, /* Jayhawk mezz card */
70 BFA_MFG_TYPE_WANCHESE = 1007, /* Wanchese mezz card */
71 BFA_MFG_TYPE_INVALID = 0, /* Invalid card type */
72};
73
74#pragma pack(1)
75
76/**
77 * Card type to port number conversion
78 */
79#define bfa_mfg_type2port_num(card_type) (((card_type) / 10) % 10)
80
81/**
82 * Check if Mezz card
83 */
84#define bfa_mfg_is_mezz(type) (( \
85 (type) == BFA_MFG_TYPE_JAYHAWK || \
86 (type) == BFA_MFG_TYPE_WANCHESE))
87
88/**
89 * Check if card type valid
90 */
91#define bfa_mfg_is_card_type_valid(type) (( \
92 (type) == BFA_MFG_TYPE_FC8P2 || \
93 (type) == BFA_MFG_TYPE_FC8P1 || \
94 (type) == BFA_MFG_TYPE_FC4P2 || \
95 (type) == BFA_MFG_TYPE_FC4P1 || \
96 (type) == BFA_MFG_TYPE_CNA10P2 || \
97 (type) == BFA_MFG_TYPE_CNA10P1 || \
98 bfa_mfg_is_mezz(type)))
99
100/**
101 * All numerical fields are in big-endian format.
102 */
103struct bfa_mfg_block_s {
104};
105
106/**
107 * VPD data length
108 */
109#define BFA_MFG_VPD_LEN 512
110
111#define BFA_MFG_VPD_PCI_HDR_OFF 137
112#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /* version mask 3 bits */
113#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /* vendor mask 5 bits */
114
115/**
116 * VPD vendor tag
117 */
118enum {
119 BFA_MFG_VPD_UNKNOWN = 0, /* vendor unknown */
120 BFA_MFG_VPD_IBM = 1, /* vendor IBM */
121 BFA_MFG_VPD_HP = 2, /* vendor HP */
122 BFA_MFG_VPD_DELL = 3, /* vendor DELL */
123 BFA_MFG_VPD_PCI_IBM = 0x08, /* PCI VPD IBM */
124 BFA_MFG_VPD_PCI_HP = 0x10, /* PCI VPD HP */
125 BFA_MFG_VPD_PCI_DELL = 0x20, /* PCI VPD DELL */
126 BFA_MFG_VPD_PCI_BRCD = 0xf8, /* PCI VPD Brocade */
127};
128
129/**
130 * All numerical fields are in big-endian format.
131 */
132struct bfa_mfg_vpd_s {
133 u8 version; /* vpd data version */
134 u8 vpd_sig[3]; /* characters 'V', 'P', 'D' */
135 u8 chksum; /* u8 checksum */
136 u8 vendor; /* vendor */
137 u8 len; /* vpd data length excluding header */
138 u8 rsv;
139 u8 data[BFA_MFG_VPD_LEN]; /* vpd data */
140};
141
142#pragma pack()
143
144#endif /* __BFA_DEFS_MFG_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_pci.h b/drivers/scsi/bfa/include/defs/bfa_defs_pci.h
deleted file mode 100644
index ea7d89bbc0bb..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_pci.h
+++ /dev/null
@@ -1,48 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_PCI_H__
19#define __BFA_DEFS_PCI_H__
20
21/**
22 * PCI device and vendor ID information
23 */
24enum {
25 BFA_PCI_VENDOR_ID_BROCADE = 0x1657,
26 BFA_PCI_DEVICE_ID_FC_8G2P = 0x13,
27 BFA_PCI_DEVICE_ID_FC_8G1P = 0x17,
28 BFA_PCI_DEVICE_ID_CT = 0x14,
29 BFA_PCI_DEVICE_ID_CT_FC = 0x21,
30};
31
32#define bfa_asic_id_ct(devid) \
33 ((devid) == BFA_PCI_DEVICE_ID_CT || \
34 (devid) == BFA_PCI_DEVICE_ID_CT_FC)
35
36/**
37 * PCI sub-system device and vendor ID information
38 */
39enum {
40 BFA_PCI_FCOE_SSDEVICE_ID = 0x14,
41};
42
43/**
44 * Maximum number of device address ranges mapped through different BAR(s)
45 */
46#define BFA_PCI_ACCESS_RANGES 1
47
48#endif /* __BFA_DEFS_PCI_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_pm.h b/drivers/scsi/bfa/include/defs/bfa_defs_pm.h
deleted file mode 100644
index e8d6d959006e..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_pm.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_PM_H__
19#define __BFA_DEFS_PM_H__
20
21#include <bfa_os_inc.h>
22
23/**
24 * BFA power management device states
25 */
26enum bfa_pm_ds {
27 BFA_PM_DS_D0 = 0, /* full power mode */
28 BFA_PM_DS_D1 = 1, /* power save state 1 */
29 BFA_PM_DS_D2 = 2, /* power save state 2 */
30 BFA_PM_DS_D3 = 3, /* power off state */
31};
32
33#endif /* __BFA_DEFS_PM_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_pom.h b/drivers/scsi/bfa/include/defs/bfa_defs_pom.h
deleted file mode 100644
index d9fa278472b7..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_pom.h
+++ /dev/null
@@ -1,56 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_POM_H__
18#define __BFA_DEFS_POM_H__
19
20#include <bfa_os_inc.h>
21#include <defs/bfa_defs_types.h>
22
23/**
24 * POM health status levels for each attributes.
25 */
26enum bfa_pom_entry_health {
27 BFA_POM_HEALTH_NOINFO = 1, /* no information */
28 BFA_POM_HEALTH_NORMAL = 2, /* health is normal */
29 BFA_POM_HEALTH_WARNING = 3, /* warning level */
30 BFA_POM_HEALTH_ALARM = 4, /* alarming level */
31};
32
33/**
34 * Reading of temperature/voltage/current/power
35 */
36struct bfa_pom_entry_s {
37 enum bfa_pom_entry_health health; /* POM entry health */
38 u32 curr_value; /* current value */
39 u32 thr_warn_high; /* threshold warning high */
40 u32 thr_warn_low; /* threshold warning low */
41 u32 thr_alarm_low; /* threshold alaram low */
42 u32 thr_alarm_high; /* threshold alarm high */
43};
44
45/**
46 * POM attributes
47 */
48struct bfa_pom_attr_s {
49 struct bfa_pom_entry_s temperature; /* centigrade */
50 struct bfa_pom_entry_s voltage; /* volts */
51 struct bfa_pom_entry_s curr; /* milli amps */
52 struct bfa_pom_entry_s txpower; /* micro watts */
53 struct bfa_pom_entry_s rxpower; /* micro watts */
54};
55
56#endif /* __BFA_DEFS_POM_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_port.h b/drivers/scsi/bfa/include/defs/bfa_defs_port.h
deleted file mode 100644
index ebdf0d1731a4..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_port.h
+++ /dev/null
@@ -1,248 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_PORT_H__
19#define __BFA_DEFS_PORT_H__
20
21#include <bfa_os_inc.h>
22#include <protocol/types.h>
23#include <defs/bfa_defs_pport.h>
24#include <defs/bfa_defs_ioc.h>
25
26#define BFA_FCS_FABRIC_IPADDR_SZ 16
27
28/**
29 * symbolic names for base port/virtual port
30 */
31#define BFA_SYMNAME_MAXLEN 128 /* vmware/windows uses 128 bytes */
32struct bfa_port_symname_s {
33 char symname[BFA_SYMNAME_MAXLEN];
34};
35
36/**
37* Roles of FCS port:
38 * - FCP IM and FCP TM roles cannot be enabled together for a FCS port
39 * - Create multiple ports if both IM and TM functions required.
40 * - Atleast one role must be specified.
41 */
42enum bfa_port_role {
43 BFA_PORT_ROLE_FCP_IM = 0x01, /* FCP initiator role */
44 BFA_PORT_ROLE_FCP_TM = 0x02, /* FCP target role */
45 BFA_PORT_ROLE_FCP_IPFC = 0x04, /* IP over FC role */
46 BFA_PORT_ROLE_FCP_MAX = BFA_PORT_ROLE_FCP_IPFC | BFA_PORT_ROLE_FCP_IM
47};
48
49/**
50 * FCS port configuration.
51 */
52struct bfa_port_cfg_s {
53 wwn_t pwwn; /* port wwn */
54 wwn_t nwwn; /* node wwn */
55 struct bfa_port_symname_s sym_name; /* vm port symbolic name */
56 bfa_boolean_t preboot_vp; /* vport created from PBC */
57 enum bfa_port_role roles; /* FCS port roles */
58 u8 tag[16]; /* opaque tag from application */
59};
60
61/**
62 * FCS port states
63 */
64enum bfa_port_state {
65 BFA_PORT_UNINIT = 0, /* PORT is not yet initialized */
66 BFA_PORT_FDISC = 1, /* FDISC is in progress */
67 BFA_PORT_ONLINE = 2, /* login to fabric is complete */
68 BFA_PORT_OFFLINE = 3, /* No login to fabric */
69};
70
71/**
72 * FCS port type. Required for VmWare.
73 */
74enum bfa_port_type {
75 BFA_PORT_TYPE_PHYSICAL = 0,
76 BFA_PORT_TYPE_VIRTUAL,
77};
78
79/**
80 * FCS port offline reason. Required for VmWare.
81 */
82enum bfa_port_offline_reason {
83 BFA_PORT_OFFLINE_UNKNOWN = 0,
84 BFA_PORT_OFFLINE_LINKDOWN,
85 BFA_PORT_OFFLINE_FAB_UNSUPPORTED, /* NPIV not supported by the
86 * fabric */
87 BFA_PORT_OFFLINE_FAB_NORESOURCES,
88 BFA_PORT_OFFLINE_FAB_LOGOUT,
89};
90
91/**
92 * FCS lport info. Required for VmWare.
93 */
94struct bfa_port_info_s {
95 u8 port_type; /* bfa_port_type_t : physical or
96 * virtual */
97 u8 port_state; /* one of bfa_port_state values */
98 u8 offline_reason; /* one of bfa_port_offline_reason_t
99 * values */
100 wwn_t port_wwn;
101 wwn_t node_wwn;
102
103 /*
104 * following 4 feilds are valid for Physical Ports only
105 */
106 u32 max_vports_supp; /* Max supported vports */
107 u32 num_vports_inuse; /* Num of in use vports */
108 u32 max_rports_supp; /* Max supported rports */
109 u32 num_rports_inuse; /* Num of doscovered rports */
110
111};
112
113/**
114 * FCS port statistics
115 */
116struct bfa_port_stats_s {
117 u32 ns_plogi_sent;
118 u32 ns_plogi_rsp_err;
119 u32 ns_plogi_acc_err;
120 u32 ns_plogi_accepts;
121 u32 ns_rejects; /* NS command rejects */
122 u32 ns_plogi_unknown_rsp;
123 u32 ns_plogi_alloc_wait;
124
125 u32 ns_retries; /* NS command retries */
126 u32 ns_timeouts; /* NS command timeouts */
127
128 u32 ns_rspnid_sent;
129 u32 ns_rspnid_accepts;
130 u32 ns_rspnid_rsp_err;
131 u32 ns_rspnid_rejects;
132 u32 ns_rspnid_alloc_wait;
133
134 u32 ns_rftid_sent;
135 u32 ns_rftid_accepts;
136 u32 ns_rftid_rsp_err;
137 u32 ns_rftid_rejects;
138 u32 ns_rftid_alloc_wait;
139
140 u32 ns_rffid_sent;
141 u32 ns_rffid_accepts;
142 u32 ns_rffid_rsp_err;
143 u32 ns_rffid_rejects;
144 u32 ns_rffid_alloc_wait;
145
146 u32 ns_gidft_sent;
147 u32 ns_gidft_accepts;
148 u32 ns_gidft_rsp_err;
149 u32 ns_gidft_rejects;
150 u32 ns_gidft_unknown_rsp;
151 u32 ns_gidft_alloc_wait;
152
153 /*
154 * Mgmt Server stats
155 */
156 u32 ms_retries; /* MS command retries */
157 u32 ms_timeouts; /* MS command timeouts */
158 u32 ms_plogi_sent;
159 u32 ms_plogi_rsp_err;
160 u32 ms_plogi_acc_err;
161 u32 ms_plogi_accepts;
162 u32 ms_rejects; /* MS command rejects */
163 u32 ms_plogi_unknown_rsp;
164 u32 ms_plogi_alloc_wait;
165
166 u32 num_rscn; /* Num of RSCN received */
167 u32 num_portid_rscn;/* Num portid format RSCN
168 * received */
169
170 u32 uf_recvs; /* unsolicited recv frames */
171 u32 uf_recv_drops; /* dropped received frames */
172
173 u32 rsvd; /* padding for 64 bit alignment */
174};
175
176/**
177 * BFA port attribute returned in queries
178 */
179struct bfa_port_attr_s {
180 enum bfa_port_state state; /* port state */
181 u32 pid; /* port ID */
182 struct bfa_port_cfg_s port_cfg; /* port configuration */
183 enum bfa_pport_type port_type; /* current topology */
184 u32 loopback; /* cable is externally looped back */
185 wwn_t fabric_name; /* attached switch's nwwn */
186 u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ]; /* attached
187 * fabric's ip addr */
188 struct mac_s fpma_mac; /* Lport's FPMA Mac address */
189 u16 authfail; /* auth failed state */
190};
191
192/**
193 * BFA physical port Level events
194 * Arguments below are in BFAL context from Mgmt
195 * BFA_PORT_AEN_ONLINE: [in]: pwwn [out]: pwwn
196 * BFA_PORT_AEN_OFFLINE: [in]: pwwn [out]: pwwn
197 * BFA_PORT_AEN_RLIR: [in]: None [out]: pwwn, rlir_data, rlir_len
198 * BFA_PORT_AEN_SFP_INSERT: [in]: pwwn [out]: port_id, pwwn
199 * BFA_PORT_AEN_SFP_REMOVE: [in]: pwwn [out]: port_id, pwwn
200 * BFA_PORT_AEN_SFP_POM: [in]: pwwn [out]: level, port_id, pwwn
201 * BFA_PORT_AEN_ENABLE: [in]: pwwn [out]: pwwn
202 * BFA_PORT_AEN_DISABLE: [in]: pwwn [out]: pwwn
203 * BFA_PORT_AEN_AUTH_ON: [in]: pwwn [out]: pwwn
204 * BFA_PORT_AEN_AUTH_OFF: [in]: pwwn [out]: pwwn
205 * BFA_PORT_AEN_DISCONNECT: [in]: pwwn [out]: pwwn
206 * BFA_PORT_AEN_QOS_NEG: [in]: pwwn [out]: pwwn
207 * BFA_PORT_AEN_FABRIC_NAME_CHANGE: [in]: pwwn, [out]: pwwn, fwwn
208 *
209 */
210enum bfa_port_aen_event {
211 BFA_PORT_AEN_ONLINE = 1, /* Physical Port online event */
212 BFA_PORT_AEN_OFFLINE = 2, /* Physical Port offline event */
213 BFA_PORT_AEN_RLIR = 3, /* RLIR event, not supported */
214 BFA_PORT_AEN_SFP_INSERT = 4, /* SFP inserted event */
215 BFA_PORT_AEN_SFP_REMOVE = 5, /* SFP removed event */
216 BFA_PORT_AEN_SFP_POM = 6, /* SFP POM event */
217 BFA_PORT_AEN_ENABLE = 7, /* Physical Port enable event */
218 BFA_PORT_AEN_DISABLE = 8, /* Physical Port disable event */
219 BFA_PORT_AEN_AUTH_ON = 9, /* Physical Port auth success event */
220 BFA_PORT_AEN_AUTH_OFF = 10, /* Physical Port auth fail event */
221 BFA_PORT_AEN_DISCONNECT = 11, /* Physical Port disconnect event */
222 BFA_PORT_AEN_QOS_NEG = 12, /* Base Port QOS negotiation event */
223 BFA_PORT_AEN_FABRIC_NAME_CHANGE = 13, /* Fabric Name/WWN change
224 * event */
225 BFA_PORT_AEN_SFP_ACCESS_ERROR = 14, /* SFP read error event */
226 BFA_PORT_AEN_SFP_UNSUPPORT = 15, /* Unsupported SFP event */
227};
228
229enum bfa_port_aen_sfp_pom {
230 BFA_PORT_AEN_SFP_POM_GREEN = 1, /* Normal */
231 BFA_PORT_AEN_SFP_POM_AMBER = 2, /* Warning */
232 BFA_PORT_AEN_SFP_POM_RED = 3, /* Critical */
233 BFA_PORT_AEN_SFP_POM_MAX = BFA_PORT_AEN_SFP_POM_RED
234};
235
236struct bfa_port_aen_data_s {
237 wwn_t pwwn; /* WWN of the physical port */
238 wwn_t fwwn; /* WWN of the fabric port */
239 s32 phy_port_num; /*! For SFP related events */
240 s16 ioc_type;
241 s16 level; /* Only transitions will
242 * be informed */
243 struct mac_s mac; /* MAC address of the ethernet port,
244 * applicable to CNA port only */
245 s16 rsvd;
246};
247
248#endif /* __BFA_DEFS_PORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_pport.h b/drivers/scsi/bfa/include/defs/bfa_defs_pport.h
deleted file mode 100644
index 2de675839c2f..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_pport.h
+++ /dev/null
@@ -1,393 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_PPORT_H__
19#define __BFA_DEFS_PPORT_H__
20
21#include <bfa_os_inc.h>
22#include <protocol/fc.h>
23#include <defs/bfa_defs_types.h>
24#include <defs/bfa_defs_qos.h>
25#include <cna/pstats/phyport_defs.h>
26
27/* Modify char* port_stt[] in bfal_port.c if a new state was added */
28enum bfa_pport_states {
29 BFA_PPORT_ST_UNINIT = 1,
30 BFA_PPORT_ST_ENABLING_QWAIT = 2,
31 BFA_PPORT_ST_ENABLING = 3,
32 BFA_PPORT_ST_LINKDOWN = 4,
33 BFA_PPORT_ST_LINKUP = 5,
34 BFA_PPORT_ST_DISABLING_QWAIT = 6,
35 BFA_PPORT_ST_DISABLING = 7,
36 BFA_PPORT_ST_DISABLED = 8,
37 BFA_PPORT_ST_STOPPED = 9,
38 BFA_PPORT_ST_IOCDOWN = 10,
39 BFA_PPORT_ST_IOCDIS = 11,
40 BFA_PPORT_ST_FWMISMATCH = 12,
41 BFA_PPORT_ST_PREBOOT_DISABLED = 13,
42 BFA_PPORT_ST_MAX_STATE,
43};
44
45/**
46 * Port speed settings. Each specific speed is a bit field. Use multiple
47 * bits to specify speeds to be selected for auto-negotiation.
48 */
49enum bfa_pport_speed {
50 BFA_PPORT_SPEED_UNKNOWN = 0,
51 BFA_PPORT_SPEED_1GBPS = 1,
52 BFA_PPORT_SPEED_2GBPS = 2,
53 BFA_PPORT_SPEED_4GBPS = 4,
54 BFA_PPORT_SPEED_8GBPS = 8,
55 BFA_PPORT_SPEED_10GBPS = 10,
56 BFA_PPORT_SPEED_AUTO =
57 (BFA_PPORT_SPEED_1GBPS | BFA_PPORT_SPEED_2GBPS |
58 BFA_PPORT_SPEED_4GBPS | BFA_PPORT_SPEED_8GBPS),
59};
60
61/**
62 * Port operational type (in sync with SNIA port type).
63 */
64enum bfa_pport_type {
65 BFA_PPORT_TYPE_UNKNOWN = 1, /* port type is unknown */
66 BFA_PPORT_TYPE_TRUNKED = 2, /* Trunked mode */
67 BFA_PPORT_TYPE_NPORT = 5, /* P2P with switched fabric */
68 BFA_PPORT_TYPE_NLPORT = 6, /* public loop */
69 BFA_PPORT_TYPE_LPORT = 20, /* private loop */
70 BFA_PPORT_TYPE_P2P = 21, /* P2P with no switched fabric */
71 BFA_PPORT_TYPE_VPORT = 22, /* NPIV - virtual port */
72};
73
74/**
75 * Port topology setting. A port's topology and fabric login status
76 * determine its operational type.
77 */
78enum bfa_pport_topology {
79 BFA_PPORT_TOPOLOGY_NONE = 0, /* No valid topology */
80 BFA_PPORT_TOPOLOGY_P2P = 1, /* P2P only */
81 BFA_PPORT_TOPOLOGY_LOOP = 2, /* LOOP topology */
82 BFA_PPORT_TOPOLOGY_AUTO = 3, /* auto topology selection */
83};
84
85/**
86 * Physical port loopback types.
87 */
88enum bfa_pport_opmode {
89 BFA_PPORT_OPMODE_NORMAL = 0x00, /* normal non-loopback mode */
90 BFA_PPORT_OPMODE_LB_INT = 0x01, /* internal loop back */
91 BFA_PPORT_OPMODE_LB_SLW = 0x02, /* serial link wrapback (serdes) */
92 BFA_PPORT_OPMODE_LB_EXT = 0x04, /* external loop back (serdes) */
93 BFA_PPORT_OPMODE_LB_CBL = 0x08, /* cabled loop back */
94 BFA_PPORT_OPMODE_LB_NLINT = 0x20, /* NL_Port internal loopback */
95};
96
97#define BFA_PPORT_OPMODE_LB_HARD(_mode) \
98 ((_mode == BFA_PPORT_OPMODE_LB_INT) || \
99 (_mode == BFA_PPORT_OPMODE_LB_SLW) || \
100 (_mode == BFA_PPORT_OPMODE_LB_EXT))
101
102/**
103 Port State (in sync with SNIA port state).
104 */
105enum bfa_pport_snia_state {
106 BFA_PPORT_STATE_UNKNOWN = 1, /* port is not initialized */
107 BFA_PPORT_STATE_ONLINE = 2, /* port is ONLINE */
108 BFA_PPORT_STATE_DISABLED = 3, /* port is disabled by user */
109 BFA_PPORT_STATE_BYPASSED = 4, /* port is bypassed (in LOOP) */
110 BFA_PPORT_STATE_DIAG = 5, /* port diagnostics is active */
111 BFA_PPORT_STATE_LINKDOWN = 6, /* link is down */
112 BFA_PPORT_STATE_LOOPBACK = 8, /* port is looped back */
113};
114
115/**
116 * Port link state
117 */
118enum bfa_pport_linkstate {
119 BFA_PPORT_LINKUP = 1, /* Physical port/Trunk link up */
120 BFA_PPORT_LINKDOWN = 2, /* Physical port/Trunk link down */
121 BFA_PPORT_TRUNK_LINKDOWN = 3, /* Trunk link down (new tmaster) */
122};
123
124/**
125 * Port link state event
126 */
127#define bfa_pport_event_t enum bfa_pport_linkstate
128
129/**
130 * Port link state reason code
131 */
132enum bfa_pport_linkstate_rsn {
133 BFA_PPORT_LINKSTATE_RSN_NONE = 0,
134 BFA_PPORT_LINKSTATE_RSN_DISABLED = 1,
135 BFA_PPORT_LINKSTATE_RSN_RX_NOS = 2,
136 BFA_PPORT_LINKSTATE_RSN_RX_OLS = 3,
137 BFA_PPORT_LINKSTATE_RSN_RX_LIP = 4,
138 BFA_PPORT_LINKSTATE_RSN_RX_LIPF7 = 5,
139 BFA_PPORT_LINKSTATE_RSN_SFP_REMOVED = 6,
140 BFA_PPORT_LINKSTATE_RSN_PORT_FAULT = 7,
141 BFA_PPORT_LINKSTATE_RSN_RX_LOS = 8,
142 BFA_PPORT_LINKSTATE_RSN_LOCAL_FAULT = 9,
143 BFA_PPORT_LINKSTATE_RSN_REMOTE_FAULT = 10,
144 BFA_PPORT_LINKSTATE_RSN_TIMEOUT = 11,
145
146
147
148 /* CEE related reason codes/errors */
149 CEE_LLDP_INFO_AGED_OUT = 20,
150 CEE_LLDP_SHUTDOWN_TLV_RCVD = 21,
151 CEE_PEER_NOT_ADVERTISE_DCBX = 22,
152 CEE_PEER_NOT_ADVERTISE_PG = 23,
153 CEE_PEER_NOT_ADVERTISE_PFC = 24,
154 CEE_PEER_NOT_ADVERTISE_FCOE = 25,
155 CEE_PG_NOT_COMPATIBLE = 26,
156 CEE_PFC_NOT_COMPATIBLE = 27,
157 CEE_FCOE_NOT_COMPATIBLE = 28,
158 CEE_BAD_PG_RCVD = 29,
159 CEE_BAD_BW_RCVD = 30,
160 CEE_BAD_PFC_RCVD = 31,
161 CEE_BAD_FCOE_PRI_RCVD = 32,
162 CEE_FCOE_PRI_PFC_OFF = 33,
163 CEE_DUP_CONTROL_TLV_RCVD = 34,
164 CEE_DUP_FEAT_TLV_RCVD = 35,
165 CEE_APPLY_NEW_CFG = 36, /* reason, not an error */
166 CEE_PROTOCOL_INIT = 37, /* reason, not an error */
167 CEE_PHY_LINK_DOWN = 38,
168 CEE_LLS_FCOE_ABSENT = 39,
169 CEE_LLS_FCOE_DOWN = 40
170};
171
172/**
173 * Default Target Rate Limiting Speed.
174 */
175#define BFA_PPORT_DEF_TRL_SPEED BFA_PPORT_SPEED_1GBPS
176
177/**
178 * Physical port configuration
179 */
180struct bfa_pport_cfg_s {
181 u8 topology; /* bfa_pport_topology */
182 u8 speed; /* enum bfa_pport_speed */
183 u8 trunked; /* trunked or not */
184 u8 qos_enabled; /* qos enabled or not */
185 u8 trunk_ports; /* bitmap of trunked ports */
186 u8 cfg_hardalpa; /* is hard alpa configured */
187 u16 maxfrsize; /* maximum frame size */
188 u8 hardalpa; /* configured hard alpa */
189 u8 rx_bbcredit; /* receive buffer credits */
190 u8 tx_bbcredit; /* transmit buffer credits */
191 u8 ratelimit; /* ratelimit enabled or not */
192 u8 trl_def_speed; /* ratelimit default speed */
193 u8 rsvd[3];
194 u16 path_tov; /* device path timeout */
195 u16 q_depth; /* SCSI Queue depth */
196};
197
198/**
199 * Port attribute values.
200 */
201struct bfa_pport_attr_s {
202 /*
203 * Static fields
204 */
205 wwn_t nwwn; /* node wwn */
206 wwn_t pwwn; /* port wwn */
207 wwn_t factorynwwn; /* factory node wwn */
208 wwn_t factorypwwn; /* factory port wwn */
209 enum fc_cos cos_supported; /* supported class of services */
210 u32 rsvd;
211 struct fc_symname_s port_symname; /* port symbolic name */
212 enum bfa_pport_speed speed_supported; /* supported speeds */
213 bfa_boolean_t pbind_enabled; /* Will be set if Persistent binding
214 * enabled. Relevant only in Windows
215 */
216
217 /*
218 * Configured values
219 */
220 struct bfa_pport_cfg_s pport_cfg; /* pport cfg */
221
222 /*
223 * Dynamic field - info from BFA
224 */
225 enum bfa_pport_states port_state; /* current port state */
226 enum bfa_pport_speed speed; /* current speed */
227 enum bfa_pport_topology topology; /* current topology */
228 bfa_boolean_t beacon; /* current beacon status */
229 bfa_boolean_t link_e2e_beacon;/* set if link beacon on */
230 bfa_boolean_t plog_enabled; /* set if portlog is enabled*/
231
232 /*
233 * Dynamic field - info from FCS
234 */
235 u32 pid; /* port ID */
236 enum bfa_pport_type port_type; /* current topology */
237 u32 loopback; /* external loopback */
238 u32 authfail; /* auth fail state */
239 u32 rsvd2; /* padding for 64 bit */
240};
241
242/**
243 * FC Port statistics.
244 */
245struct bfa_pport_fc_stats_s {
246 u64 secs_reset; /* Seconds since stats is reset */
247 u64 tx_frames; /* Tx frames */
248 u64 tx_words; /* Tx words */
249 u64 tx_lip; /* Tx LIP */
250 u64 tx_nos; /* Tx NOS */
251 u64 tx_ols; /* Tx OLS */
252 u64 tx_lr; /* Tx LR */
253 u64 tx_lrr; /* Tx LRR */
254 u64 rx_frames; /* Rx frames */
255 u64 rx_words; /* Rx words */
256 u64 lip_count; /* Rx LIP */
257 u64 nos_count; /* Rx NOS */
258 u64 ols_count; /* Rx OLS */
259 u64 lr_count; /* Rx LR */
260 u64 lrr_count; /* Rx LRR */
261 u64 invalid_crcs; /* Rx CRC err frames */
262 u64 invalid_crc_gd_eof; /* Rx CRC err good EOF frames */
263 u64 undersized_frm; /* Rx undersized frames */
264 u64 oversized_frm; /* Rx oversized frames */
265 u64 bad_eof_frm; /* Rx frames with bad EOF */
266 u64 error_frames; /* Errored frames */
267 u64 dropped_frames; /* Dropped frames */
268 u64 link_failures; /* Link Failure (LF) count */
269 u64 loss_of_syncs; /* Loss of sync count */
270 u64 loss_of_signals;/* Loss of signal count */
271 u64 primseq_errs; /* Primitive sequence protocol err. */
272 u64 bad_os_count; /* Invalid ordered sets */
273 u64 err_enc_out; /* Encoding err nonframe_8b10b */
274 u64 err_enc; /* Encoding err frame_8b10b */
275};
276
277/**
278 * Eth Port statistics.
279 */
280struct bfa_pport_eth_stats_s {
281 u64 secs_reset; /* Seconds since stats is reset */
282 u64 frame_64; /* Frames 64 bytes */
283 u64 frame_65_127; /* Frames 65-127 bytes */
284 u64 frame_128_255; /* Frames 128-255 bytes */
285 u64 frame_256_511; /* Frames 256-511 bytes */
286 u64 frame_512_1023; /* Frames 512-1023 bytes */
287 u64 frame_1024_1518; /* Frames 1024-1518 bytes */
288 u64 frame_1519_1522; /* Frames 1519-1522 bytes */
289 u64 tx_bytes; /* Tx bytes */
290 u64 tx_packets; /* Tx packets */
291 u64 tx_mcast_packets; /* Tx multicast packets */
292 u64 tx_bcast_packets; /* Tx broadcast packets */
293 u64 tx_control_frame; /* Tx control frame */
294 u64 tx_drop; /* Tx drops */
295 u64 tx_jabber; /* Tx jabber */
296 u64 tx_fcs_error; /* Tx FCS error */
297 u64 tx_fragments; /* Tx fragments */
298 u64 rx_bytes; /* Rx bytes */
299 u64 rx_packets; /* Rx packets */
300 u64 rx_mcast_packets; /* Rx multicast packets */
301 u64 rx_bcast_packets; /* Rx broadcast packets */
302 u64 rx_control_frames; /* Rx control frames */
303 u64 rx_unknown_opcode; /* Rx unknown opcode */
304 u64 rx_drop; /* Rx drops */
305 u64 rx_jabber; /* Rx jabber */
306 u64 rx_fcs_error; /* Rx FCS errors */
307 u64 rx_alignment_error; /* Rx alignment errors */
308 u64 rx_frame_length_error; /* Rx frame len errors */
309 u64 rx_code_error; /* Rx code errors */
310 u64 rx_fragments; /* Rx fragments */
311 u64 rx_pause; /* Rx pause */
312 u64 rx_zero_pause; /* Rx zero pause */
313 u64 tx_pause; /* Tx pause */
314 u64 tx_zero_pause; /* Tx zero pause */
315 u64 rx_fcoe_pause; /* Rx FCoE pause */
316 u64 rx_fcoe_zero_pause; /* Rx FCoE zero pause */
317 u64 tx_fcoe_pause; /* Tx FCoE pause */
318 u64 tx_fcoe_zero_pause; /* Tx FCoE zero pause */
319};
320
321/**
322 * Port statistics.
323 */
324union bfa_pport_stats_u {
325 struct bfa_pport_fc_stats_s fc;
326 struct bfa_pport_eth_stats_s eth;
327};
328
329/**
330 * Port FCP mappings.
331 */
332struct bfa_pport_fcpmap_s {
333 char osdevname[256];
334 u32 bus;
335 u32 target;
336 u32 oslun;
337 u32 fcid;
338 wwn_t nwwn;
339 wwn_t pwwn;
340 u64 fcplun;
341 char luid[256];
342};
343
344/**
345 * Port RNI */
346struct bfa_pport_rnid_s {
347 wwn_t wwn;
348 u32 unittype;
349 u32 portid;
350 u32 attached_nodes_num;
351 u16 ip_version;
352 u16 udp_port;
353 u8 ipaddr[16];
354 u16 rsvd;
355 u16 topologydiscoveryflags;
356};
357
358struct bfa_fcport_fcf_s {
359 wwn_t name; /* FCF name */
360 wwn_t fabric_name; /* Fabric Name */
361 u8 fipenabled; /* FIP enabled or not */
362 u8 fipfailed; /* FIP failed or not */
363 u8 resv[2];
364 u8 pri; /* FCF priority */
365 u8 version; /* FIP version used */
366 u8 available; /* Available for login */
367 u8 fka_disabled; /* FKA is disabled */
368 u8 maxsz_verified; /* FCoE max size verified */
369 u8 fc_map[3]; /* FC map */
370 u16 vlan; /* FCoE vlan tag/priority */
371 u32 fka_adv_per; /* FIP ka advert. period */
372 struct mac_s mac; /* FCF mac */
373};
374
375/**
376 * Link state information
377 */
378struct bfa_pport_link_s {
379 u8 linkstate; /* Link state bfa_pport_linkstate */
380 u8 linkstate_rsn; /* bfa_pport_linkstate_rsn_t */
381 u8 topology; /* P2P/LOOP bfa_pport_topology */
382 u8 speed; /* Link speed (1/2/4/8 G) */
383 u32 linkstate_opt; /* Linkstate optional data (debug) */
384 u8 trunked; /* Trunked or not (1 or 0) */
385 u8 resvd[3];
386 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */
387 union {
388 struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */
389 struct bfa_fcport_fcf_s fcf; /* FCF information (for FCoE) */
390 } vc_fcf;
391};
392
393#endif /* __BFA_DEFS_PPORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_qos.h b/drivers/scsi/bfa/include/defs/bfa_defs_qos.h
deleted file mode 100644
index aadbacd1d2d7..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_qos.h
+++ /dev/null
@@ -1,99 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_QOS_H__
19#define __BFA_DEFS_QOS_H__
20
21/**
22 * QoS states
23 */
24enum bfa_qos_state {
25 BFA_QOS_ONLINE = 1, /* QoS is online */
26 BFA_QOS_OFFLINE = 2, /* QoS is offline */
27};
28
29
30/**
31 * QoS Priority levels.
32 */
33enum bfa_qos_priority {
34 BFA_QOS_UNKNOWN = 0,
35 BFA_QOS_HIGH = 1, /* QoS Priority Level High */
36 BFA_QOS_MED = 2, /* QoS Priority Level Medium */
37 BFA_QOS_LOW = 3, /* QoS Priority Level Low */
38};
39
40
41/**
42 * QoS bandwidth allocation for each priority level
43 */
44enum bfa_qos_bw_alloc {
45 BFA_QOS_BW_HIGH = 60, /* bandwidth allocation for High */
46 BFA_QOS_BW_MED = 30, /* bandwidth allocation for Medium */
47 BFA_QOS_BW_LOW = 10, /* bandwidth allocation for Low */
48};
49
50/**
51 * QoS attribute returned in QoS Query
52 */
53struct bfa_qos_attr_s {
54 enum bfa_qos_state state; /* QoS current state */
55 u32 total_bb_cr; /* Total BB Credits */
56};
57
58/**
59 * These fields should be displayed only from the CLI.
60 * There will be a separate BFAL API (get_qos_vc_attr ?)
61 * to retrieve this.
62 *
63 */
64#define BFA_QOS_MAX_VC 16
65
66struct bfa_qos_vc_info_s {
67 u8 vc_credit;
68 u8 borrow_credit;
69 u8 priority;
70 u8 resvd;
71};
72
73struct bfa_qos_vc_attr_s {
74 u16 total_vc_count; /* Total VC Count */
75 u16 shared_credit;
76 u32 elp_opmode_flags;
77 struct bfa_qos_vc_info_s vc_info[BFA_QOS_MAX_VC]; /* as many as
78 * total_vc_count */
79};
80
81/**
82 * QoS statistics
83 */
84struct bfa_qos_stats_s {
85 u32 flogi_sent; /* QoS Flogi sent */
86 u32 flogi_acc_recvd; /* QoS Flogi Acc received */
87 u32 flogi_rjt_recvd; /* QoS Flogi rejects received */
88 u32 flogi_retries; /* QoS Flogi retries */
89
90 u32 elp_recvd; /* QoS ELP received */
91 u32 elp_accepted; /* QoS ELP Accepted */
92 u32 elp_rejected; /* QoS ELP rejected */
93 u32 elp_dropped; /* QoS ELP dropped */
94
95 u32 qos_rscn_recvd; /* QoS RSCN received */
96 u32 rsvd; /* padding for 64 bit alignment */
97};
98
99#endif /* __BFA_DEFS_QOS_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_rport.h b/drivers/scsi/bfa/include/defs/bfa_defs_rport.h
deleted file mode 100644
index e0af59d6d2f6..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_rport.h
+++ /dev/null
@@ -1,199 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_RPORT_H__
19#define __BFA_DEFS_RPORT_H__
20
21#include <bfa_os_inc.h>
22#include <protocol/types.h>
23#include <defs/bfa_defs_pport.h>
24#include <defs/bfa_defs_port.h>
25#include <defs/bfa_defs_qos.h>
26
27/**
28 * FCS remote port states
29 */
30enum bfa_rport_state {
31 BFA_RPORT_UNINIT = 0, /* PORT is not yet initialized */
32 BFA_RPORT_OFFLINE = 1, /* rport is offline */
33 BFA_RPORT_PLOGI = 2, /* PLOGI to rport is in progress */
34 BFA_RPORT_ONLINE = 3, /* login to rport is complete */
35 BFA_RPORT_PLOGI_RETRY = 4, /* retrying login to rport */
36 BFA_RPORT_NSQUERY = 5, /* nameserver query */
37 BFA_RPORT_ADISC = 6, /* ADISC authentication */
38 BFA_RPORT_LOGO = 7, /* logging out with rport */
39 BFA_RPORT_LOGORCV = 8, /* handling LOGO from rport */
40 BFA_RPORT_NSDISC = 9, /* re-discover rport */
41};
42
43/**
44 * Rport Scsi Function : Initiator/Target.
45 */
46enum bfa_rport_function {
47 BFA_RPORT_INITIATOR = 0x01, /* SCSI Initiator */
48 BFA_RPORT_TARGET = 0x02, /* SCSI Target */
49};
50
51/**
52 * port/node symbolic names for rport
53 */
54#define BFA_RPORT_SYMNAME_MAXLEN 255
55struct bfa_rport_symname_s {
56 char symname[BFA_RPORT_SYMNAME_MAXLEN];
57};
58
59struct bfa_rport_hal_stats_s {
60 u32 sm_un_cr; /* uninit: create events */
61 u32 sm_un_unexp; /* uninit: exception events */
62 u32 sm_cr_on; /* created: online events */
63 u32 sm_cr_del; /* created: delete events */
64 u32 sm_cr_hwf; /* created: IOC down */
65 u32 sm_cr_unexp; /* created: exception events */
66 u32 sm_fwc_rsp; /* fw create: f/w responses */
67 u32 sm_fwc_del; /* fw create: delete events */
68 u32 sm_fwc_off; /* fw create: offline events */
69 u32 sm_fwc_hwf; /* fw create: IOC down */
70 u32 sm_fwc_unexp; /* fw create: exception events*/
71 u32 sm_on_off; /* online: offline events */
72 u32 sm_on_del; /* online: delete events */
73 u32 sm_on_hwf; /* online: IOC down events */
74 u32 sm_on_unexp; /* online: exception events */
75 u32 sm_fwd_rsp; /* fw delete: fw responses */
76 u32 sm_fwd_del; /* fw delete: delete events */
77 u32 sm_fwd_hwf; /* fw delete: IOC down events */
78 u32 sm_fwd_unexp; /* fw delete: exception events*/
79 u32 sm_off_del; /* offline: delete events */
80 u32 sm_off_on; /* offline: online events */
81 u32 sm_off_hwf; /* offline: IOC down events */
82 u32 sm_off_unexp; /* offline: exception events */
83 u32 sm_del_fwrsp; /* delete: fw responses */
84 u32 sm_del_hwf; /* delete: IOC down events */
85 u32 sm_del_unexp; /* delete: exception events */
86 u32 sm_delp_fwrsp; /* delete pend: fw responses */
87 u32 sm_delp_hwf; /* delete pend: IOC downs */
88 u32 sm_delp_unexp; /* delete pend: exceptions */
89 u32 sm_offp_fwrsp; /* off-pending: fw responses */
90 u32 sm_offp_del; /* off-pending: deletes */
91 u32 sm_offp_hwf; /* off-pending: IOC downs */
92 u32 sm_offp_unexp; /* off-pending: exceptions */
93 u32 sm_iocd_off; /* IOC down: offline events */
94 u32 sm_iocd_del; /* IOC down: delete events */
95 u32 sm_iocd_on; /* IOC down: online events */
96 u32 sm_iocd_unexp; /* IOC down: exceptions */
97 u32 rsvd;
98};
99
100/**
101 * FCS remote port statistics
102 */
103struct bfa_rport_stats_s {
104 u32 offlines; /* remote port offline count */
105 u32 onlines; /* remote port online count */
106 u32 rscns; /* RSCN affecting rport */
107 u32 plogis; /* plogis sent */
108 u32 plogi_accs; /* plogi accepts */
109 u32 plogi_timeouts; /* plogi timeouts */
110 u32 plogi_rejects; /* rcvd plogi rejects */
111 u32 plogi_failed; /* local failure */
112 u32 plogi_rcvd; /* plogis rcvd */
113 u32 prli_rcvd; /* inbound PRLIs */
114 u32 adisc_rcvd; /* ADISCs received */
115 u32 adisc_rejects; /* recvd ADISC rejects */
116 u32 adisc_sent; /* ADISC requests sent */
117 u32 adisc_accs; /* ADISC accepted by rport */
118 u32 adisc_failed; /* ADISC failed (no response) */
119 u32 adisc_rejected; /* ADISC rejected by us */
120 u32 logos; /* logos sent */
121 u32 logo_accs; /* LOGO accepts from rport */
122 u32 logo_failed; /* LOGO failures */
123 u32 logo_rejected; /* LOGO rejects from rport */
124 u32 logo_rcvd; /* LOGO from remote port */
125
126 u32 rpsc_rcvd; /* RPSC received */
127 u32 rpsc_rejects; /* recvd RPSC rejects */
128 u32 rpsc_sent; /* RPSC requests sent */
129 u32 rpsc_accs; /* RPSC accepted by rport */
130 u32 rpsc_failed; /* RPSC failed (no response) */
131 u32 rpsc_rejected; /* RPSC rejected by us */
132
133 u32 rsvd;
134 struct bfa_rport_hal_stats_s hal_stats; /* BFA rport stats */
135};
136
137/**
138 * Rport's QoS attributes
139 */
140struct bfa_rport_qos_attr_s {
141 enum bfa_qos_priority qos_priority; /* rport's QoS priority */
142 u32 qos_flow_id; /* QoS flow Id */
143};
144
145/**
146 * FCS remote port attributes returned in queries
147 */
148struct bfa_rport_attr_s {
149 wwn_t nwwn; /* node wwn */
150 wwn_t pwwn; /* port wwn */
151 enum fc_cos cos_supported; /* supported class of services */
152 u32 pid; /* port ID */
153 u32 df_sz; /* Max payload size */
154 enum bfa_rport_state state; /* Rport State machine state */
155 enum fc_cos fc_cos; /* FC classes of services */
156 bfa_boolean_t cisc; /* CISC capable device */
157 struct bfa_rport_symname_s symname; /* Symbolic Name */
158 enum bfa_rport_function scsi_function; /* Initiator/Target */
159 struct bfa_rport_qos_attr_s qos_attr; /* qos attributes */
160 enum bfa_pport_speed curr_speed; /* operating speed got from
161 * RPSC ELS. UNKNOWN, if RPSC
162 * is not supported */
163 bfa_boolean_t trl_enforced; /* TRL enforced ? TRUE/FALSE */
164 enum bfa_pport_speed assigned_speed; /* Speed assigned by the user.
165 * will be used if RPSC is not
166 * supported by the rport */
167};
168
169#define bfa_rport_aen_qos_data_t struct bfa_rport_qos_attr_s
170
171/**
172 * BFA remote port events
173 * Arguments below are in BFAL context from Mgmt
174 * BFA_RPORT_AEN_ONLINE: [in]: lpwwn [out]: vf_id, lpwwn, rpwwn
175 * BFA_RPORT_AEN_OFFLINE: [in]: lpwwn [out]: vf_id, lpwwn, rpwwn
176 * BFA_RPORT_AEN_DISCONNECT:[in]: lpwwn [out]: vf_id, lpwwn, rpwwn
177 * BFA_RPORT_AEN_QOS_PRIO: [in]: lpwwn [out]: vf_id, lpwwn, rpwwn, prio
178 * BFA_RPORT_AEN_QOS_FLOWID:[in]: lpwwn [out]: vf_id, lpwwn, rpwwn, flow_id
179 */
180enum bfa_rport_aen_event {
181 BFA_RPORT_AEN_ONLINE = 1, /* RPort online event */
182 BFA_RPORT_AEN_OFFLINE = 2, /* RPort offline event */
183 BFA_RPORT_AEN_DISCONNECT = 3, /* RPort disconnect event */
184 BFA_RPORT_AEN_QOS_PRIO = 4, /* QOS priority change event */
185 BFA_RPORT_AEN_QOS_FLOWID = 5, /* QOS flow Id change event */
186};
187
188struct bfa_rport_aen_data_s {
189 u16 vf_id; /* vf_id of this logical port */
190 u16 rsvd[3];
191 wwn_t ppwwn; /* WWN of its physical port */
192 wwn_t lpwwn; /* WWN of this logical port */
193 wwn_t rpwwn; /* WWN of this remote port */
194 union {
195 bfa_rport_aen_qos_data_t qos;
196 } priv;
197};
198
199#endif /* __BFA_DEFS_RPORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_status.h b/drivers/scsi/bfa/include/defs/bfa_defs_status.h
deleted file mode 100644
index 6eb4e62096fc..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_status.h
+++ /dev/null
@@ -1,282 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_STATUS_H__
18#define __BFA_DEFS_STATUS_H__
19
20/**
21 * API status return values
22 *
23 * NOTE: The error msgs are auto generated from the comments. Only singe line
24 * comments are supported
25 */
26enum bfa_status {
27 BFA_STATUS_OK = 0, /* Success */
28 BFA_STATUS_FAILED = 1, /* Operation failed */
29 BFA_STATUS_EINVAL = 2, /* Invalid params Check input
30 * parameters */
31 BFA_STATUS_ENOMEM = 3, /* Out of resources */
32 BFA_STATUS_ENOSYS = 4, /* Function not implemented */
33 BFA_STATUS_ETIMER = 5, /* Timer expired - Retry, if
34 * persists, contact support */
35 BFA_STATUS_EPROTOCOL = 6, /* Protocol error */
36 BFA_STATUS_ENOFCPORTS = 7, /* No FC ports resources */
37 BFA_STATUS_NOFLASH = 8, /* Flash not present */
38 BFA_STATUS_BADFLASH = 9, /* Flash is corrupted or bad */
39 BFA_STATUS_SFP_UNSUPP = 10, /* Unsupported SFP - Replace SFP */
40 BFA_STATUS_UNKNOWN_VFID = 11, /* VF_ID not found */
41 BFA_STATUS_DATACORRUPTED = 12, /* Diag returned data corrupted
42 * contact support */
43 BFA_STATUS_DEVBUSY = 13, /* Device busy - Retry operation */
44 BFA_STATUS_ABORTED = 14, /* Operation aborted */
45 BFA_STATUS_NODEV = 15, /* Dev is not present */
46 BFA_STATUS_HDMA_FAILED = 16, /* Host dma failed contact support */
47 BFA_STATUS_FLASH_BAD_LEN = 17, /* Flash bad length */
48 BFA_STATUS_UNKNOWN_LWWN = 18, /* LPORT PWWN not found */
49 BFA_STATUS_UNKNOWN_RWWN = 19, /* RPORT PWWN not found */
50 BFA_STATUS_FCPT_LS_RJT = 20, /* Got LS_RJT for FC Pass
51 * through Req */
52 BFA_STATUS_VPORT_EXISTS = 21, /* VPORT already exists */
53 BFA_STATUS_VPORT_MAX = 22, /* Reached max VPORT supported
54 * limit */
55 BFA_STATUS_UNSUPP_SPEED = 23, /* Invalid Speed Check speed
56 * setting */
57 BFA_STATUS_INVLD_DFSZ = 24, /* Invalid Max data field size */
58 BFA_STATUS_CNFG_FAILED = 25, /* Setting can not be persisted */
59 BFA_STATUS_CMD_NOTSUPP = 26, /* Command/API not supported */
60 BFA_STATUS_NO_ADAPTER = 27, /* No Brocade Adapter Found */
61 BFA_STATUS_LINKDOWN = 28, /* Link is down - Check or replace
62 * SFP/cable */
63 BFA_STATUS_FABRIC_RJT = 29, /* Reject from attached fabric */
64 BFA_STATUS_UNKNOWN_VWWN = 30, /* VPORT PWWN not found */
65 BFA_STATUS_NSLOGIN_FAILED = 31, /* Nameserver login failed */
66 BFA_STATUS_NO_RPORTS = 32, /* No remote ports found */
67 BFA_STATUS_NSQUERY_FAILED = 33, /* Nameserver query failed */
68 BFA_STATUS_PORT_OFFLINE = 34, /* Port is not online */
69 BFA_STATUS_RPORT_OFFLINE = 35, /* RPORT is not online */
70 BFA_STATUS_TGTOPEN_FAILED = 36, /* Remote SCSI target open failed */
71 BFA_STATUS_BAD_LUNS = 37, /* No valid LUNs found */
72 BFA_STATUS_IO_FAILURE = 38, /* SCSI target IO failure */
73 BFA_STATUS_NO_FABRIC = 39, /* No switched fabric present */
74 BFA_STATUS_EBADF = 40, /* Bad file descriptor */
75 BFA_STATUS_EINTR = 41, /* A signal was caught during ioctl */
76 BFA_STATUS_EIO = 42, /* I/O error */
77 BFA_STATUS_ENOTTY = 43, /* Inappropriate I/O control
78 * operation */
79 BFA_STATUS_ENXIO = 44, /* No such device or address */
80 BFA_STATUS_EFOPEN = 45, /* Failed to open file */
81 BFA_STATUS_VPORT_WWN_BP = 46, /* WWN is same as base port's WWN */
82 BFA_STATUS_PORT_NOT_DISABLED = 47, /* Port not disabled disable port
83 * first */
84 BFA_STATUS_BADFRMHDR = 48, /* Bad frame header */
85 BFA_STATUS_BADFRMSZ = 49, /* Bad frame size check and replace
86 * SFP/cable */
87 BFA_STATUS_MISSINGFRM = 50, /* Missing frame check and replace
88 * SFP/cable or for Mezz card check and
89 * replace pass through module */
90 BFA_STATUS_LINKTIMEOUT = 51, /* Link timeout check and replace
91 * SFP/cable */
92 BFA_STATUS_NO_FCPIM_NEXUS = 52, /* No FCP Nexus exists with the
93 * rport */
94 BFA_STATUS_CHECKSUM_FAIL = 53, /* checksum failure */
95 BFA_STATUS_GZME_FAILED = 54, /* Get zone member query failed */
96 BFA_STATUS_SCSISTART_REQD = 55, /* SCSI disk require START command */
97 BFA_STATUS_IOC_FAILURE = 56, /* IOC failure - Retry, if persists
98 * contact support */
99 BFA_STATUS_INVALID_WWN = 57, /* Invalid WWN */
100 BFA_STATUS_MISMATCH = 58, /* Version mismatch */
101 BFA_STATUS_IOC_ENABLED = 59, /* IOC is already enabled */
102 BFA_STATUS_ADAPTER_ENABLED = 60, /* Adapter is not disabled disable
103 * adapter first */
104 BFA_STATUS_IOC_NON_OP = 61, /* IOC is not operational. Enable IOC
105 * and if it still fails,
106 * contact support */
107 BFA_STATUS_ADDR_MAP_FAILURE = 62, /* PCI base address not mapped
108 * in OS */
109 BFA_STATUS_SAME_NAME = 63, /* Name exists! use a different
110 * name */
111 BFA_STATUS_PENDING = 64, /* API completes asynchronously */
112 BFA_STATUS_8G_SPD = 65, /* Speed setting not valid for
113 * 8G HBA */
114 BFA_STATUS_4G_SPD = 66, /* Speed setting not valid for
115 * 4G HBA */
116 BFA_STATUS_AD_IS_ENABLE = 67, /* Adapter is already enabled */
117 BFA_STATUS_EINVAL_TOV = 68, /* Invalid path failover TOV */
118 BFA_STATUS_EINVAL_QDEPTH = 69, /* Invalid queue depth value */
119 BFA_STATUS_VERSION_FAIL = 70, /* Application/Driver version
120 * mismatch */
121 BFA_STATUS_DIAG_BUSY = 71, /* diag busy */
122 BFA_STATUS_BEACON_ON = 72, /* Port Beacon already on */
123 BFA_STATUS_BEACON_OFF = 73, /* Port Beacon already off */
124 BFA_STATUS_LBEACON_ON = 74, /* Link End-to-End Beacon already
125 * on */
126 BFA_STATUS_LBEACON_OFF = 75, /* Link End-to-End Beacon already
127 * off */
128 BFA_STATUS_PORT_NOT_INITED = 76, /* Port not initialized */
129 BFA_STATUS_RPSC_ENABLED = 77, /* Target has a valid speed */
130 BFA_STATUS_ENOFSAVE = 78, /* No saved firmware trace */
131 BFA_STATUS_BAD_FILE = 79, /* Not a valid Brocade Boot Code
132 * file */
133 BFA_STATUS_RLIM_EN = 80, /* Target rate limiting is already
134 * enabled */
135 BFA_STATUS_RLIM_DIS = 81, /* Target rate limiting is already
136 * disabled */
137 BFA_STATUS_IOC_DISABLED = 82, /* IOC is already disabled */
138 BFA_STATUS_ADAPTER_DISABLED = 83, /* Adapter is already disabled */
139 BFA_STATUS_BIOS_DISABLED = 84, /* Bios is already disabled */
140 BFA_STATUS_AUTH_ENABLED = 85, /* Authentication is already
141 * enabled */
142 BFA_STATUS_AUTH_DISABLED = 86, /* Authentication is already
143 * disabled */
144 BFA_STATUS_ERROR_TRL_ENABLED = 87, /* Target rate limiting is
145 * enabled */
146 BFA_STATUS_ERROR_QOS_ENABLED = 88, /* QoS is enabled */
147 BFA_STATUS_NO_SFP_DEV = 89, /* No SFP device check or replace SFP */
148 BFA_STATUS_MEMTEST_FAILED = 90, /* Memory test failed contact
149 * support */
150 BFA_STATUS_INVALID_DEVID = 91, /* Invalid device id provided */
151 BFA_STATUS_QOS_ENABLED = 92, /* QOS is already enabled */
152 BFA_STATUS_QOS_DISABLED = 93, /* QOS is already disabled */
153 BFA_STATUS_INCORRECT_DRV_CONFIG = 94, /* Check configuration
154 * key/value pair */
155 BFA_STATUS_REG_FAIL = 95, /* Can't read windows registry */
156 BFA_STATUS_IM_INV_CODE = 96, /* Invalid IOCTL code */
157 BFA_STATUS_IM_INV_VLAN = 97, /* Invalid VLAN ID */
158 BFA_STATUS_IM_INV_ADAPT_NAME = 98, /* Invalid adapter name */
159 BFA_STATUS_IM_LOW_RESOURCES = 99, /* Memory allocation failure in
160 * driver */
161 BFA_STATUS_IM_VLANID_IS_PVID = 100, /* Given VLAN id same as PVID */
162 BFA_STATUS_IM_VLANID_EXISTS = 101, /* Given VLAN id already exists */
163 BFA_STATUS_IM_FW_UPDATE_FAIL = 102, /* Updating firmware with new
164 * VLAN ID failed */
165 BFA_STATUS_PORTLOG_ENABLED = 103, /* Port Log is already enabled */
166 BFA_STATUS_PORTLOG_DISABLED = 104, /* Port Log is already disabled */
167 BFA_STATUS_FILE_NOT_FOUND = 105, /* Specified file could not be
168 * found */
169 BFA_STATUS_QOS_FC_ONLY = 106, /* QOS can be enabled for FC mode
170 * only */
171 BFA_STATUS_RLIM_FC_ONLY = 107, /* RATELIM can be enabled for FC mode
172 * only */
173 BFA_STATUS_CT_SPD = 108, /* Invalid speed selection for Catapult. */
174 BFA_STATUS_LEDTEST_OP = 109, /* LED test is operating */
175 BFA_STATUS_CEE_NOT_DN = 110, /* eth port is not at down state, please
176 * bring down first */
177 BFA_STATUS_10G_SPD = 111, /* Speed setting not valid for 10G CNA */
178 BFA_STATUS_IM_INV_TEAM_NAME = 112, /* Invalid team name */
179 BFA_STATUS_IM_DUP_TEAM_NAME = 113, /* Given team name already
180 * exists */
181 BFA_STATUS_IM_ADAPT_ALREADY_IN_TEAM = 114, /* Given adapter is part
182 * of another team */
183 BFA_STATUS_IM_ADAPT_HAS_VLANS = 115, /* Adapter has VLANs configured.
184 * Delete all VLANs to become
185 * part of the team */
186 BFA_STATUS_IM_PVID_MISMATCH = 116, /* Mismatching PVIDs configured
187 * for adapters */
188 BFA_STATUS_IM_LINK_SPEED_MISMATCH = 117, /* Mismatching link speeds
189 * configured for adapters */
190 BFA_STATUS_IM_MTU_MISMATCH = 118, /* Mismatching MTUs configured for
191 * adapters */
192 BFA_STATUS_IM_RSS_MISMATCH = 119, /* Mismatching RSS parameters
193 * configured for adapters */
194 BFA_STATUS_IM_HDS_MISMATCH = 120, /* Mismatching HDS parameters
195 * configured for adapters */
196 BFA_STATUS_IM_OFFLOAD_MISMATCH = 121, /* Mismatching offload
197 * parameters configured for
198 * adapters */
199 BFA_STATUS_IM_PORT_PARAMS = 122, /* Error setting port parameters */
200 BFA_STATUS_IM_PORT_NOT_IN_TEAM = 123, /* Port is not part of team */
201 BFA_STATUS_IM_CANNOT_REM_PRI = 124, /* Primary adapter cannot be
202 * removed. Change primary before
203 * removing */
204 BFA_STATUS_IM_MAX_PORTS_REACHED = 125, /* Exceeding maximum ports
205 * per team */
206 BFA_STATUS_IM_LAST_PORT_DELETE = 126, /* Last port in team being
207 * deleted */
208 BFA_STATUS_IM_NO_DRIVER = 127, /* IM driver is not installed */
209 BFA_STATUS_IM_MAX_VLANS_REACHED = 128, /* Exceeding maximum VLANs
210 * per port */
211 BFA_STATUS_TOMCAT_SPD_NOT_ALLOWED = 129, /* Bios speed config not
212 * allowed for CNA */
213 BFA_STATUS_NO_MINPORT_DRIVER = 130, /* Miniport driver is not
214 * loaded */
215 BFA_STATUS_CARD_TYPE_MISMATCH = 131, /* Card type mismatch */
216 BFA_STATUS_BAD_ASICBLK = 132, /* Bad ASIC block */
217 BFA_STATUS_NO_DRIVER = 133, /* Brocade adapter/driver not installed
218 * or loaded */
219 BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */
220 BFA_STATUS_IM_NO_VLAN = 135, /* No VLANs configured on the adapter */
221 BFA_STATUS_IM_ETH_LB_FAILED = 136, /* Ethernet loopback test failed */
222 BFA_STATUS_IM_PVID_REMOVE = 137, /* Cannot remove port VLAN (PVID) */
223 BFA_STATUS_IM_PVID_EDIT = 138, /* Cannot edit port VLAN (PVID) */
224 BFA_STATUS_CNA_NO_BOOT = 139, /* Boot upload not allowed for CNA */
225 BFA_STATUS_IM_PVID_NON_ZERO = 140, /* Port VLAN ID (PVID) is Set to
226 * Non-Zero Value */
227 BFA_STATUS_IM_INETCFG_LOCK_FAILED = 141, /* Acquiring Network
228 * Subsystem Lock Failed.Please
229 * try after some time */
230 BFA_STATUS_IM_GET_INETCFG_FAILED = 142, /* Acquiring Network Subsystem
231 * handle Failed. Please try
232 * after some time */
233 BFA_STATUS_IM_NOT_BOUND = 143, /* IM driver is not active */
234 BFA_STATUS_INSUFFICIENT_PERMS = 144, /* User doesn't have sufficient
235 * permissions to execute the BCU
236 * application */
237 BFA_STATUS_IM_INV_VLAN_NAME = 145, /* Invalid/Reserved VLAN name
238 * string. The name is not allowed
239 * for the normal VLAN */
240 BFA_STATUS_CMD_NOTSUPP_CNA = 146, /* Command not supported for CNA */
241 BFA_STATUS_IM_PASSTHRU_EDIT = 147, /* Can not edit passthrough VLAN
242 * id */
243 BFA_STATUS_IM_BIND_FAILED = 148, /* IM Driver bind operation
244 * failed */
245 BFA_STATUS_IM_UNBIND_FAILED = 149, /* IM Driver unbind operation
246 * failed */
247 BFA_STATUS_IM_PORT_IN_TEAM = 150, /* Port is already part of the
248 * team */
249 BFA_STATUS_IM_VLAN_NOT_FOUND = 151, /* VLAN ID doesn't exists */
250 BFA_STATUS_IM_TEAM_NOT_FOUND = 152, /* Teaming configuration doesn't
251 * exists */
252 BFA_STATUS_IM_TEAM_CFG_NOT_ALLOWED = 153, /* Given settings are not
253 * allowed for the current
254 * Teaming mode */
255 BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot
256 * configuration */
257 BFA_STATUS_DEVID_MISSING = 155, /* Boot image is not for the adapter(s)
258 * installed */
259 BFA_STATUS_BAD_FWCFG = 156, /* Bad firmware configuration */
260 BFA_STATUS_CREATE_FILE = 157, /* Failed to create temporary file */
261 BFA_STATUS_INVALID_VENDOR = 158, /* Invalid switch vendor */
262 BFA_STATUS_SFP_NOT_READY = 159, /* SFP info is not ready. Retry */
263 BFA_STATUS_NO_TOPOLOGY_FOR_CNA = 160, /* Topology command not
264 * applicable to CNA */
265 BFA_STATUS_BOOT_CODE_UPDATED = 161, /* reboot -- -r is needed after
266 * boot code updated */
267 BFA_STATUS_BOOT_VERSION = 162, /* Boot code version not compatible with
268 * the driver installed */
269 BFA_STATUS_CARDTYPE_MISSING = 163, /* Boot image is not for the
270 * adapter(s) installed */
271 BFA_STATUS_INVALID_CARDTYPE = 164, /* Invalid card type provided */
272 BFA_STATUS_MAX_VAL /* Unknown error code */
273};
274#define bfa_status_t enum bfa_status
275
276enum bfa_eproto_status {
277 BFA_EPROTO_BAD_ACCEPT = 0,
278 BFA_EPROTO_UNKNOWN_RSP = 1
279};
280#define bfa_eproto_status_t enum bfa_eproto_status
281
282#endif /* __BFA_DEFS_STATUS_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_tin.h b/drivers/scsi/bfa/include/defs/bfa_defs_tin.h
deleted file mode 100644
index e05a2db7abed..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_tin.h
+++ /dev/null
@@ -1,118 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_TIN_H__
19#define __BFA_DEFS_TIN_H__
20
21#include <protocol/types.h>
22#include <protocol/fc.h>
23
24/**
25 * FCS tin states
26 */
27enum bfa_tin_state_e {
28 BFA_TIN_SM_OFFLINE = 0, /* tin is offline */
29 BFA_TIN_SM_WOS_LOGIN = 1, /* Waiting PRLI ACC/RJT from ULP */
30 BFA_TIN_SM_WFW_ONLINE = 2, /* Waiting ACK to PRLI ACC from FW */
31 BFA_TIN_SM_ONLINE = 3, /* tin login is complete */
32 BFA_TIN_SM_WIO_RELOGIN = 4, /* tin relogin is in progress */
33 BFA_TIN_SM_WIO_LOGOUT = 5, /* Processing of PRLO req from
34 * Initiator is in progress
35 */
36 BFA_TIN_SM_WOS_LOGOUT = 6, /* Processing of PRLO req from
37 * Initiator is in progress
38 */
39 BFA_TIN_SM_WIO_CLEAN = 7, /* Waiting for IO cleanup before tin
40 * is offline. This can be triggered
41 * by RPORT LOGO (rcvd/sent) or by
42 * PRLO (rcvd/sent)
43 */
44};
45
46struct bfa_prli_req_s {
47 struct fchs_s fchs;
48 struct fc_prli_s prli_payload;
49};
50
51struct bfa_prlo_req_s {
52 struct fchs_s fchs;
53 struct fc_prlo_s prlo_payload;
54};
55
56void bfa_tin_send_login_rsp(void *bfa_tin, u32 login_rsp,
57 struct fc_ls_rjt_s rjt_payload);
58void bfa_tin_send_logout_rsp(void *bfa_tin, u32 logout_rsp,
59 struct fc_ls_rjt_s rjt_payload);
60/**
61 * FCS target port statistics
62 */
63struct bfa_tin_stats_s {
64 u32 onlines; /* ITN nexus onlines (PRLI done) */
65 u32 offlines; /* ITN Nexus offlines */
66 u32 prli_req_parse_err; /* prli req parsing errors */
67 u32 prli_rsp_rjt; /* num prli rsp rejects sent */
68 u32 prli_rsp_acc; /* num prli rsp accepts sent */
69 u32 cleanup_comps; /* ITN cleanup completions */
70};
71
72/**
73 * FCS tin attributes returned in queries
74 */
75struct bfa_tin_attr_s {
76 enum bfa_tin_state_e state;
77 u8 seq_retry; /* Sequence retry supported */
78 u8 rsvd[3];
79};
80
81/**
82 * BFA TIN async event data structure for BFAL
83 */
84enum bfa_tin_aen_event {
85 BFA_TIN_AEN_ONLINE = 1, /* Target online */
86 BFA_TIN_AEN_OFFLINE = 2, /* Target offline */
87 BFA_TIN_AEN_DISCONNECT = 3, /* Target disconnected */
88};
89
90/**
91 * BFA TIN event data structure.
92 */
93struct bfa_tin_aen_data_s {
94 u16 vf_id; /* vf_id of the IT nexus */
95 u16 rsvd[3];
96 wwn_t lpwwn; /* WWN of logical port */
97 wwn_t rpwwn; /* WWN of remote(target) port */
98};
99
100/**
101 * Below APIs are needed from BFA driver
102 * Move these to BFA driver public header file?
103 */
104/* TIN rcvd new PRLI & gets bfad_tin_t ptr from driver this callback */
105void *bfad_tin_rcvd_login_req(void *bfad_tm_port, void *bfa_tin,
106 wwn_t rp_wwn, u32 rp_fcid,
107 struct bfa_prli_req_s prli_req);
108/* TIN rcvd new PRLO */
109void bfad_tin_rcvd_logout_req(void *bfad_tin, wwn_t rp_wwn, u32 rp_fcid,
110 struct bfa_prlo_req_s prlo_req);
111/* TIN is online and ready for IO */
112void bfad_tin_online(void *bfad_tin);
113/* TIN is offline and BFA driver can shutdown its upper stack */
114void bfad_tin_offline(void *bfad_tin);
115/* TIN does not need this BFA driver tin tag anymore, so can be freed */
116void bfad_tin_res_free(void *bfad_tin);
117
118#endif /* __BFA_DEFS_TIN_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h b/drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h
deleted file mode 100644
index ade763dbc8ce..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h
+++ /dev/null
@@ -1,43 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_TSENSOR_H__
19#define __BFA_DEFS_TSENSOR_H__
20
21#include <bfa_os_inc.h>
22#include <defs/bfa_defs_types.h>
23
24/**
25 * Temperature sensor status values
26 */
27enum bfa_tsensor_status {
28 BFA_TSENSOR_STATUS_UNKNOWN = 1, /* unknown status */
29 BFA_TSENSOR_STATUS_FAULTY = 2, /* sensor is faulty */
30 BFA_TSENSOR_STATUS_BELOW_MIN = 3, /* temperature below mininum */
31 BFA_TSENSOR_STATUS_NOMINAL = 4, /* normal temperature */
32 BFA_TSENSOR_STATUS_ABOVE_MAX = 5, /* temperature above maximum */
33};
34
35/**
36 * Temperature sensor attribute
37 */
38struct bfa_tsensor_attr_s {
39 enum bfa_tsensor_status status; /* temperature sensor status */
40 u32 value; /* current temperature in celsius */
41};
42
43#endif /* __BFA_DEFS_TSENSOR_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_types.h b/drivers/scsi/bfa/include/defs/bfa_defs_types.h
deleted file mode 100644
index 4348332b107a..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_types.h
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_TYPES_H__
18#define __BFA_DEFS_TYPES_H__
19
20#include <bfa_os_inc.h>
21
22enum bfa_boolean {
23 BFA_FALSE = 0,
24 BFA_TRUE = 1
25};
26#define bfa_boolean_t enum bfa_boolean
27
28#define BFA_STRING_32 32
29
30#endif /* __BFA_DEFS_TYPES_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_version.h b/drivers/scsi/bfa/include/defs/bfa_defs_version.h
deleted file mode 100644
index f8902a2c9aad..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_version.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_VERSION_H__
18#define __BFA_DEFS_VERSION_H__
19
20#define BFA_VERSION_LEN 64
21
22#endif
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_vf.h b/drivers/scsi/bfa/include/defs/bfa_defs_vf.h
deleted file mode 100644
index 3235be5e9423..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_vf.h
+++ /dev/null
@@ -1,74 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_VF_H__
19#define __BFA_DEFS_VF_H__
20
21#include <bfa_os_inc.h>
22#include <defs/bfa_defs_port.h>
23#include <protocol/types.h>
24
25/**
26 * VF states
27 */
28enum bfa_vf_state {
29 BFA_VF_UNINIT = 0, /* fabric is not yet initialized */
30 BFA_VF_LINK_DOWN = 1, /* link is down */
31 BFA_VF_FLOGI = 2, /* flogi is in progress */
32 BFA_VF_AUTH = 3, /* authentication in progress */
33 BFA_VF_NOFABRIC = 4, /* fabric is not present */
34 BFA_VF_ONLINE = 5, /* login to fabric is complete */
35 BFA_VF_EVFP = 6, /* EVFP is in progress */
36 BFA_VF_ISOLATED = 7, /* port isolated due to vf_id mismatch */
37};
38
39/**
40 * VF statistics
41 */
42struct bfa_vf_stats_s {
43 u32 flogi_sent; /* Num FLOGIs sent */
44 u32 flogi_rsp_err; /* FLOGI response errors */
45 u32 flogi_acc_err; /* FLOGI accept errors */
46 u32 flogi_accepts; /* FLOGI accepts received */
47 u32 flogi_rejects; /* FLOGI rejects received */
48 u32 flogi_unknown_rsp; /* Unknown responses for FLOGI */
49 u32 flogi_alloc_wait; /* Allocation waits prior to
50 * sending FLOGI
51 */
52 u32 flogi_rcvd; /* FLOGIs received */
53 u32 flogi_rejected; /* Incoming FLOGIs rejected */
54 u32 fabric_onlines; /* Internal fabric online
55 * notification sent to other
56 * modules
57 */
58 u32 fabric_offlines; /* Internal fabric offline
59 * notification sent to other
60 * modules
61 */
62 u32 resvd;
63};
64
65/**
66 * VF attributes returned in queries
67 */
68struct bfa_vf_attr_s {
69 enum bfa_vf_state state; /* VF state */
70 u32 rsvd;
71 wwn_t fabric_name; /* fabric name */
72};
73
74#endif /* __BFA_DEFS_VF_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_vport.h b/drivers/scsi/bfa/include/defs/bfa_defs_vport.h
deleted file mode 100644
index 9f021f43b3b4..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_vport.h
+++ /dev/null
@@ -1,91 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_VPORT_H__
19#define __BFA_DEFS_VPORT_H__
20
21#include <bfa_os_inc.h>
22#include <defs/bfa_defs_port.h>
23#include <protocol/types.h>
24
25/**
26 * VPORT states
27 */
28enum bfa_vport_state {
29 BFA_FCS_VPORT_UNINIT = 0,
30 BFA_FCS_VPORT_CREATED = 1,
31 BFA_FCS_VPORT_OFFLINE = 1,
32 BFA_FCS_VPORT_FDISC_SEND = 2,
33 BFA_FCS_VPORT_FDISC = 3,
34 BFA_FCS_VPORT_FDISC_RETRY = 4,
35 BFA_FCS_VPORT_ONLINE = 5,
36 BFA_FCS_VPORT_DELETING = 6,
37 BFA_FCS_VPORT_CLEANUP = 6,
38 BFA_FCS_VPORT_LOGO_SEND = 7,
39 BFA_FCS_VPORT_LOGO = 8,
40 BFA_FCS_VPORT_ERROR = 9,
41 BFA_FCS_VPORT_MAX_STATE,
42};
43
44/**
45 * vport statistics
46 */
47struct bfa_vport_stats_s {
48 struct bfa_port_stats_s port_stats; /* base class (port) stats */
49 /*
50 * TODO - remove
51 */
52
53 u32 fdisc_sent; /* num fdisc sent */
54 u32 fdisc_accepts; /* fdisc accepts */
55 u32 fdisc_retries; /* fdisc retries */
56 u32 fdisc_timeouts; /* fdisc timeouts */
57 u32 fdisc_rsp_err; /* fdisc response error */
58 u32 fdisc_acc_bad; /* bad fdisc accepts */
59 u32 fdisc_rejects; /* fdisc rejects */
60 u32 fdisc_unknown_rsp;
61 /*
62 *!< fdisc rsp unknown error
63 */
64 u32 fdisc_alloc_wait;/* fdisc req (fcxp)alloc wait */
65
66 u32 logo_alloc_wait;/* logo req (fcxp) alloc wait */
67 u32 logo_sent; /* logo sent */
68 u32 logo_accepts; /* logo accepts */
69 u32 logo_rejects; /* logo rejects */
70 u32 logo_rsp_err; /* logo rsp errors */
71 u32 logo_unknown_rsp;
72 /* logo rsp unknown errors */
73
74 u32 fab_no_npiv; /* fabric does not support npiv */
75
76 u32 fab_offline; /* offline events from fab SM */
77 u32 fab_online; /* online events from fab SM */
78 u32 fab_cleanup; /* cleanup request from fab SM */
79 u32 rsvd;
80};
81
82/**
83 * BFA vport attribute returned in queries
84 */
85struct bfa_vport_attr_s {
86 struct bfa_port_attr_s port_attr; /* base class (port) attributes */
87 enum bfa_vport_state vport_state; /* vport state */
88 u32 rsvd;
89};
90
91#endif /* __BFA_DEFS_VPORT_H__ */
diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb.h b/drivers/scsi/bfa/include/fcb/bfa_fcb.h
deleted file mode 100644
index 2963b0bc30e7..000000000000
--- a/drivers/scsi/bfa/include/fcb/bfa_fcb.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcb.h BFA FCS callback interfaces
20 */
21
22#ifndef __BFA_FCB_H__
23#define __BFA_FCB_H__
24
25/**
26 * fcb Main fcs callbacks
27 */
28
29void bfa_fcb_exit(struct bfad_s *bfad);
30
31
32
33#endif /* __BFA_FCB_H__ */
diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h b/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h
deleted file mode 100644
index 52585d3dd891..000000000000
--- a/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h
+++ /dev/null
@@ -1,75 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19* : bfad_fcpim.h - BFA FCS initiator mode remote port callbacks
20 */
21
22#ifndef __BFAD_FCB_FCPIM_H__
23#define __BFAD_FCB_FCPIM_H__
24
25struct bfad_itnim_s;
26
27/*
28 * RPIM callbacks
29 */
30
31/**
32 * Memory allocation for remote port instance. Called before PRLI is
33 * initiated to the remote target port.
34 *
35 * @param[in] bfad - driver instance
36 * @param[out] itnim - FCS remote port (IM) instance
37 * @param[out] itnim_drv - driver remote port (IM) instance
38 *
39 * @return None
40 */
41void bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
42 struct bfad_itnim_s **itnim_drv);
43
44/**
45 * Free remote port (IM) instance.
46 *
47 * @param[in] bfad - driver instance
48 * @param[in] itnim_drv - driver remote port instance
49 *
50 * @return None
51 */
52void bfa_fcb_itnim_free(struct bfad_s *bfad,
53 struct bfad_itnim_s *itnim_drv);
54
55/**
56 * Notification of when login with a remote target device is complete.
57 *
58 * @param[in] itnim_drv - driver remote port instance
59 *
60 * @return None
61 */
62void bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv);
63
64/**
65 * Notification when login with the remote device is severed.
66 *
67 * @param[in] itnim_drv - driver remote port instance
68 *
69 * @return None
70 */
71void bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv);
72
73void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim_drv);
74
75#endif /* __BFAD_FCB_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb_port.h b/drivers/scsi/bfa/include/fcb/bfa_fcb_port.h
deleted file mode 100644
index 5fd7f986fa32..000000000000
--- a/drivers/scsi/bfa/include/fcb/bfa_fcb_port.h
+++ /dev/null
@@ -1,113 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcb_port.h BFA FCS virtual port driver interfaces
20 */
21
22#ifndef __BFA_FCB_PORT_H__
23#define __BFA_FCB_PORT_H__
24
25#include <fcb/bfa_fcb_vport.h>
26/**
27 * fcs_port_fcb FCS port driver interfaces
28 */
29
30/*
31 * Forward declarations
32 */
33struct bfad_port_s;
34
35/*
36 * Callback functions from BFA FCS to driver
37 */
38
39/**
40 * Call from FCS to driver module when a port is instantiated. The port
41 * can be a base port or a virtual port with in the base fabric or
42 * a virtual fabric.
43 *
44 * On this callback, driver is supposed to create scsi_host, scsi_tgt or
45 * network interfaces bases on ports personality/roles.
46 *
47 * base port of base fabric: vf_drv == NULL && vp_drv == NULL
48 * vport of base fabric: vf_drv == NULL && vp_drv != NULL
49 * base port of VF: vf_drv != NULL && vp_drv == NULL
50 * vport of VF: vf_drv != NULL && vp_drv != NULL
51 *
52 * @param[in] bfad - driver instance
53 * @param[in] port - FCS port instance
54 * @param[in] roles - port roles: IM, TM, IP
55 * @param[in] vf_drv - VF driver instance, NULL if base fabric (no VF)
56 * @param[in] vp_drv - vport driver instance, NULL if base port
57 *
58 * @return None
59 */
60struct bfad_port_s *bfa_fcb_port_new(struct bfad_s *bfad,
61 struct bfa_fcs_port_s *port,
62 enum bfa_port_role roles, struct bfad_vf_s *vf_drv,
63 struct bfad_vport_s *vp_drv);
64
65/**
66 * Call from FCS to driver module when a port is deleted. The port
67 * can be a base port or a virtual port with in the base fabric or
68 * a virtual fabric.
69 *
70 * @param[in] bfad - driver instance
71 * @param[in] roles - port roles: IM, TM, IP
72 * @param[in] vf_drv - VF driver instance, NULL if base fabric (no VF)
73 * @param[in] vp_drv - vport driver instance, NULL if base port
74 *
75 * @return None
76 */
77void bfa_fcb_port_delete(struct bfad_s *bfad, enum bfa_port_role roles,
78 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv);
79
80/**
81 * Notification when port transitions to ONLINE state.
82 *
83 * Online notification is a logical link up for the local port. This
84 * notification is sent after a successfull FLOGI, or a successful
85 * link initialization in proviate-loop or N2N topologies.
86 *
87 * @param[in] bfad - driver instance
88 * @param[in] roles - port roles: IM, TM, IP
89 * @param[in] vf_drv - VF driver instance, NULL if base fabric (no VF)
90 * @param[in] vp_drv - vport driver instance, NULL if base port
91 *
92 * @return None
93 */
94void bfa_fcb_port_online(struct bfad_s *bfad, enum bfa_port_role roles,
95 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv);
96
97/**
98 * Notification when port transitions to OFFLINE state.
99 *
100 * Offline notification is a logical link down for the local port.
101 *
102 * @param[in] bfad - driver instance
103 * @param[in] roles - port roles: IM, TM, IP
104 * @param[in] vf_drv - VF driver instance, NULL if base fabric (no VF)
105 * @param[in] vp_drv - vport driver instance, NULL if base port
106 *
107 * @return None
108 */
109void bfa_fcb_port_offline(struct bfad_s *bfad, enum bfa_port_role roles,
110 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv);
111
112
113#endif /* __BFA_FCB_PORT_H__ */
diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb_rport.h b/drivers/scsi/bfa/include/fcb/bfa_fcb_rport.h
deleted file mode 100644
index e0261bb6d1c1..000000000000
--- a/drivers/scsi/bfa/include/fcb/bfa_fcb_rport.h
+++ /dev/null
@@ -1,80 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcb_rport.h BFA FCS rport driver interfaces
20 */
21
22#ifndef __BFA_FCB_RPORT_H__
23#define __BFA_FCB_RPORT_H__
24
25/**
26 * fcs_rport_fcb Remote port driver interfaces
27 */
28
29
30struct bfad_rport_s;
31
32/*
33 * Callback functions from BFA FCS to driver
34 */
35
36/**
37 * Completion callback for bfa_fcs_rport_add().
38 *
39 * @param[in] rport_drv - driver instance of rport
40 *
41 * @return None
42 */
43void bfa_fcb_rport_add(struct bfad_rport_s *rport_drv);
44
45/**
46 * Completion callback for bfa_fcs_rport_remove().
47 *
48 * @param[in] rport_drv - driver instance of rport
49 *
50 * @return None
51 */
52void bfa_fcb_rport_remove(struct bfad_rport_s *rport_drv);
53
54/**
55 * Call to allocate a rport instance.
56 *
57 * @param[in] bfad - driver instance
58 * @param[out] rport - BFA FCS instance of rport
59 * @param[out] rport_drv - driver instance of rport
60 *
61 * @retval BFA_STATUS_OK - successfully allocated
62 * @retval BFA_STATUS_ENOMEM - cannot allocate
63 */
64bfa_status_t bfa_fcb_rport_alloc(struct bfad_s *bfad,
65 struct bfa_fcs_rport_s **rport,
66 struct bfad_rport_s **rport_drv);
67
68/**
69 * Call to free rport memory resources.
70 *
71 * @param[in] bfad - driver instance
72 * @param[in] rport_drv - driver instance of rport
73 *
74 * @return None
75 */
76void bfa_fcb_rport_free(struct bfad_s *bfad, struct bfad_rport_s **rport_drv);
77
78
79
80#endif /* __BFA_FCB_RPORT_H__ */
diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb_vf.h b/drivers/scsi/bfa/include/fcb/bfa_fcb_vf.h
deleted file mode 100644
index cfd3fac0a4e2..000000000000
--- a/drivers/scsi/bfa/include/fcb/bfa_fcb_vf.h
+++ /dev/null
@@ -1,47 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcb_vf.h BFA FCS virtual fabric driver interfaces
20 */
21
22#ifndef __BFA_FCB_VF_H__
23#define __BFA_FCB_VF_H__
24
25/**
26 * fcs_vf_fcb Virtual fabric driver intrefaces
27 */
28
29
30struct bfad_vf_s;
31
32/*
33 * Callback functions from BFA FCS to driver
34 */
35
36/**
37 * Completion callback for bfa_fcs_vf_stop().
38 *
39 * @param[in] vf_drv - driver instance of vf
40 *
41 * @return None
42 */
43void bfa_fcb_vf_stop(struct bfad_vf_s *vf_drv);
44
45
46
47#endif /* __BFA_FCB_VF_H__ */
diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb_vport.h b/drivers/scsi/bfa/include/fcb/bfa_fcb_vport.h
deleted file mode 100644
index cfd6ba7c47ec..000000000000
--- a/drivers/scsi/bfa/include/fcb/bfa_fcb_vport.h
+++ /dev/null
@@ -1,48 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcb_vport.h BFA FCS virtual port driver interfaces
20 */
21
22#ifndef __BFA_FCB_VPORT_H__
23#define __BFA_FCB_VPORT_H__
24
25/**
26 * fcs_vport_fcb Virtual port driver interfaces
27 */
28
29
30struct bfad_vport_s;
31
32/*
33 * Callback functions from BFA FCS to driver
34 */
35
36/**
37 * Completion callback for bfa_fcs_vport_delete().
38 *
39 * @param[in] vport_drv - driver instance of vport
40 *
41 * @return None
42 */
43void bfa_fcb_vport_delete(struct bfad_vport_s *vport_drv);
44void bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s);
45
46
47
48#endif /* __BFA_FCB_VPORT_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs.h b/drivers/scsi/bfa/include/fcs/bfa_fcs.h
deleted file mode 100644
index 54e5b81ab2a3..000000000000
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs.h
+++ /dev/null
@@ -1,76 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCS_H__
19#define __BFA_FCS_H__
20
21#include <cs/bfa_debug.h>
22#include <defs/bfa_defs_status.h>
23#include <defs/bfa_defs_version.h>
24#include <bfa.h>
25#include <fcs/bfa_fcs_fabric.h>
26
27#define BFA_FCS_OS_STR_LEN 64
28
29struct bfa_fcs_stats_s {
30 struct {
31 u32 untagged; /* untagged receive frames */
32 u32 tagged; /* tagged receive frames */
33 u32 vfid_unknown; /* VF id is unknown */
34 } uf;
35};
36
37struct bfa_fcs_driver_info_s {
38 u8 version[BFA_VERSION_LEN]; /* Driver Version */
39 u8 host_machine_name[BFA_FCS_OS_STR_LEN];
40 u8 host_os_name[BFA_FCS_OS_STR_LEN]; /* OS name and version */
41 u8 host_os_patch[BFA_FCS_OS_STR_LEN];/* patch or service pack */
42 u8 os_device_name[BFA_FCS_OS_STR_LEN]; /* Driver Device Name */
43};
44
45struct bfa_fcs_s {
46 struct bfa_s *bfa; /* corresponding BFA bfa instance */
47 struct bfad_s *bfad; /* corresponding BDA driver instance */
48 struct bfa_log_mod_s *logm; /* driver logging module instance */
49 struct bfa_trc_mod_s *trcmod; /* tracing module */
50 struct bfa_aen_s *aen; /* aen component */
51 bfa_boolean_t vf_enabled; /* VF mode is enabled */
52 bfa_boolean_t fdmi_enabled; /*!< FDMI is enabled */
53 bfa_boolean_t min_cfg; /* min cfg enabled/disabled */
54 u16 port_vfid; /* port default VF ID */
55 struct bfa_fcs_driver_info_s driver_info;
56 struct bfa_fcs_fabric_s fabric; /* base fabric state machine */
57 struct bfa_fcs_stats_s stats; /* FCS statistics */
58 struct bfa_wc_s wc; /* waiting counter */
59};
60
61/*
62 * bfa fcs API functions
63 */
64void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa,
65 struct bfad_s *bfad, bfa_boolean_t min_cfg);
66void bfa_fcs_init(struct bfa_fcs_s *fcs);
67void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
68 struct bfa_fcs_driver_info_s *driver_info);
69void bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable);
70void bfa_fcs_exit(struct bfa_fcs_s *fcs);
71void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod);
72void bfa_fcs_log_init(struct bfa_fcs_s *fcs, struct bfa_log_mod_s *logmod);
73void bfa_fcs_aen_init(struct bfa_fcs_s *fcs, struct bfa_aen_s *aen);
74void bfa_fcs_start(struct bfa_fcs_s *fcs);
75
76#endif /* __BFA_FCS_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_auth.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_auth.h
deleted file mode 100644
index 28c4c9ff08b3..000000000000
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs_auth.h
+++ /dev/null
@@ -1,82 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCS_AUTH_H__
19#define __BFA_FCS_AUTH_H__
20
21struct bfa_fcs_s;
22
23#include <defs/bfa_defs_status.h>
24#include <defs/bfa_defs_auth.h>
25#include <defs/bfa_defs_vf.h>
26#include <cs/bfa_q.h>
27#include <cs/bfa_sm.h>
28#include <defs/bfa_defs_pport.h>
29#include <fcs/bfa_fcs_lport.h>
30#include <protocol/fc_sp.h>
31
32struct bfa_fcs_fabric_s;
33
34
35
36struct bfa_fcs_auth_s {
37 bfa_sm_t sm; /* state machine */
38 bfa_boolean_t policy; /* authentication enabled/disabled */
39 enum bfa_auth_status status; /* authentication status */
40 enum auth_rjt_codes rjt_code; /* auth reject status */
41 enum auth_rjt_code_exps rjt_code_exp; /* auth reject reason */
42 enum bfa_auth_algo algo; /* Authentication algorithm */
43 struct bfa_auth_stats_s stats; /* Statistics */
44 enum auth_dh_gid group; /* DH(diffie-hellman) Group */
45 enum bfa_auth_secretsource source; /* Secret source */
46 char secret[BFA_AUTH_SECRET_STRING_LEN];
47 /* secret string */
48 u8 secret_len;
49 /* secret string length */
50 u8 nretries;
51 /* number of retries */
52 struct bfa_fcs_fabric_s *fabric;/* pointer to fabric */
53 u8 sentcode; /* pointer to response data */
54 u8 *response; /* pointer to response data */
55 struct bfa_timer_s delay_timer; /* delay timer */
56 struct bfa_fcxp_s *fcxp; /* pointer to fcxp */
57 struct bfa_fcxp_wqe_s fcxp_wqe;
58};
59
60/**
61 * bfa fcs authentication public functions
62 */
63bfa_status_t bfa_fcs_auth_get_attr(struct bfa_fcs_s *port,
64 struct bfa_auth_attr_s *attr);
65bfa_status_t bfa_fcs_auth_set_policy(struct bfa_fcs_s *port,
66 bfa_boolean_t policy);
67enum bfa_auth_status bfa_fcs_auth_get_status(struct bfa_fcs_s *port);
68bfa_status_t bfa_fcs_auth_set_algo(struct bfa_fcs_s *port,
69 enum bfa_auth_algo algo);
70bfa_status_t bfa_fcs_auth_get_stats(struct bfa_fcs_s *port,
71 struct bfa_auth_stats_s *stats);
72bfa_status_t bfa_fcs_auth_set_dh_group(struct bfa_fcs_s *port, int group);
73bfa_status_t bfa_fcs_auth_set_secretstring(struct bfa_fcs_s *port,
74 char *secret);
75bfa_status_t bfa_fcs_auth_set_secretstring_encrypt(struct bfa_fcs_s *port,
76 u32 secret[], u32 len);
77bfa_status_t bfa_fcs_auth_set_secretsource(struct bfa_fcs_s *port,
78 enum bfa_auth_secretsource src);
79bfa_status_t bfa_fcs_auth_reset_stats(struct bfa_fcs_s *port);
80bfa_status_t bfa_fcs_auth_reinit(struct bfa_fcs_s *port);
81
82#endif /* __BFA_FCS_AUTH_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h
deleted file mode 100644
index 08b79d5e46f3..000000000000
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h
+++ /dev/null
@@ -1,112 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCS_FABRIC_H__
19#define __BFA_FCS_FABRIC_H__
20
21struct bfa_fcs_s;
22
23#include <defs/bfa_defs_status.h>
24#include <defs/bfa_defs_vf.h>
25#include <cs/bfa_q.h>
26#include <cs/bfa_sm.h>
27#include <defs/bfa_defs_pport.h>
28#include <fcs/bfa_fcs_lport.h>
29#include <protocol/fc_sp.h>
30#include <fcs/bfa_fcs_auth.h>
31
32/*
33 * forward declaration
34 */
35struct bfad_vf_s;
36
37enum bfa_fcs_fabric_type {
38 BFA_FCS_FABRIC_UNKNOWN = 0,
39 BFA_FCS_FABRIC_SWITCHED = 1,
40 BFA_FCS_FABRIC_PLOOP = 2,
41 BFA_FCS_FABRIC_N2N = 3,
42};
43
44
45struct bfa_fcs_fabric_s {
46 struct list_head qe; /* queue element */
47 bfa_sm_t sm; /* state machine */
48 struct bfa_fcs_s *fcs; /* FCS instance */
49 struct bfa_fcs_port_s bport; /* base logical port */
50 enum bfa_fcs_fabric_type fab_type; /* fabric type */
51 enum bfa_pport_type oper_type; /* current link topology */
52 u8 is_vf; /* is virtual fabric? */
53 u8 is_npiv; /* is NPIV supported ? */
54 u8 is_auth; /* is Security/Auth supported ? */
55 u16 bb_credit; /* BB credit from fabric */
56 u16 vf_id; /* virtual fabric ID */
57 u16 num_vports; /* num vports */
58 u16 rsvd;
59 struct list_head vport_q; /* queue of virtual ports */
60 struct list_head vf_q; /* queue of virtual fabrics */
61 struct bfad_vf_s *vf_drv; /* driver vf structure */
62 struct bfa_timer_s link_timer; /* Link Failure timer. Vport */
63 wwn_t fabric_name; /* attached fabric name */
64 bfa_boolean_t auth_reqd; /* authentication required */
65 struct bfa_timer_s delay_timer; /* delay timer */
66 union {
67 u16 swp_vfid;/* switch port VF id */
68 } event_arg;
69 struct bfa_fcs_auth_s auth; /* authentication config */
70 struct bfa_wc_s wc; /* wait counter for delete */
71 struct bfa_vf_stats_s stats; /* fabric/vf stats */
72 struct bfa_lps_s *lps; /* lport login services */
73 u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ]; /* attached
74 * fabric's ip addr
75 */
76};
77
78#define bfa_fcs_fabric_npiv_capable(__f) ((__f)->is_npiv)
79#define bfa_fcs_fabric_is_switched(__f) \
80 ((__f)->fab_type == BFA_FCS_FABRIC_SWITCHED)
81
82/**
83 * The design calls for a single implementation of base fabric and vf.
84 */
85#define bfa_fcs_vf_t struct bfa_fcs_fabric_s
86
87struct bfa_vf_event_s {
88 u32 undefined;
89};
90
91/**
92 * bfa fcs vf public functions
93 */
94bfa_status_t bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id);
95bfa_status_t bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs);
96bfa_status_t bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs,
97 u16 vf_id, struct bfa_port_cfg_s *port_cfg,
98 struct bfad_vf_s *vf_drv);
99bfa_status_t bfa_fcs_vf_delete(bfa_fcs_vf_t *vf);
100void bfa_fcs_vf_start(bfa_fcs_vf_t *vf);
101bfa_status_t bfa_fcs_vf_stop(bfa_fcs_vf_t *vf);
102void bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs);
103void bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs);
104void bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr);
105void bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf,
106 struct bfa_vf_stats_s *vf_stats);
107void bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf);
108void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports);
109bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id);
110struct bfad_vf_s *bfa_fcs_vf_get_drv_vf(bfa_fcs_vf_t *vf);
111
112#endif /* __BFA_FCS_FABRIC_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_fcpim.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_fcpim.h
deleted file mode 100644
index 9a35ecf5cdf0..000000000000
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs_fcpim.h
+++ /dev/null
@@ -1,132 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcs_fcpim.h BFA FCS FCP Initiator Mode interfaces/defines.
20 */
21
22#ifndef __BFA_FCS_FCPIM_H__
23#define __BFA_FCS_FCPIM_H__
24
25#include <defs/bfa_defs_status.h>
26#include <defs/bfa_defs_itnim.h>
27#include <fcs/bfa_fcs.h>
28#include <fcs/bfa_fcs_rport.h>
29#include <fcs/bfa_fcs_lport.h>
30#include <bfa_fcpim.h>
31
32/*
33 * forward declarations
34 */
35struct bfad_itnim_s;
36
37struct bfa_fcs_itnim_s {
38 bfa_sm_t sm; /* state machine */
39 struct bfa_fcs_rport_s *rport; /* parent remote rport */
40 struct bfad_itnim_s *itnim_drv; /* driver peer instance */
41 struct bfa_fcs_s *fcs; /* fcs instance */
42 struct bfa_timer_s timer; /* timer functions */
43 struct bfa_itnim_s *bfa_itnim; /* BFA itnim struct */
44 u32 prli_retries; /* max prli retry attempts */
45 bfa_boolean_t seq_rec; /* seq recovery support */
46 bfa_boolean_t rec_support; /* REC supported */
47 bfa_boolean_t conf_comp; /* FCP_CONF support */
48 bfa_boolean_t task_retry_id; /* task retry id supp */
49 struct bfa_fcxp_wqe_s fcxp_wqe; /* wait qelem for fcxp */
50 struct bfa_fcxp_s *fcxp; /* FCXP in use */
51 struct bfa_itnim_stats_s stats; /* itn statistics */
52};
53
54
55static inline struct bfad_port_s *
56bfa_fcs_itnim_get_drvport(struct bfa_fcs_itnim_s *itnim)
57{
58 return itnim->rport->port->bfad_port;
59}
60
61
62static inline struct bfa_fcs_port_s *
63bfa_fcs_itnim_get_port(struct bfa_fcs_itnim_s *itnim)
64{
65 return itnim->rport->port;
66}
67
68
69static inline wwn_t
70bfa_fcs_itnim_get_nwwn(struct bfa_fcs_itnim_s *itnim)
71{
72 return itnim->rport->nwwn;
73}
74
75
76static inline wwn_t
77bfa_fcs_itnim_get_pwwn(struct bfa_fcs_itnim_s *itnim)
78{
79 return itnim->rport->pwwn;
80}
81
82
83static inline u32
84bfa_fcs_itnim_get_fcid(struct bfa_fcs_itnim_s *itnim)
85{
86 return itnim->rport->pid;
87}
88
89
90static inline u32
91bfa_fcs_itnim_get_maxfrsize(struct bfa_fcs_itnim_s *itnim)
92{
93 return itnim->rport->maxfrsize;
94}
95
96
97static inline enum fc_cos
98bfa_fcs_itnim_get_cos(struct bfa_fcs_itnim_s *itnim)
99{
100 return itnim->rport->fc_cos;
101}
102
103
104static inline struct bfad_itnim_s *
105bfa_fcs_itnim_get_drvitn(struct bfa_fcs_itnim_s *itnim)
106{
107 return itnim->itnim_drv;
108}
109
110
111static inline struct bfa_itnim_s *
112bfa_fcs_itnim_get_halitn(struct bfa_fcs_itnim_s *itnim)
113{
114 return itnim->bfa_itnim;
115}
116
117/**
118 * bfa fcs FCP Initiator mode API functions
119 */
120void bfa_fcs_itnim_get_attr(struct bfa_fcs_itnim_s *itnim,
121 struct bfa_itnim_attr_s *attr);
122void bfa_fcs_itnim_get_stats(struct bfa_fcs_itnim_s *itnim,
123 struct bfa_itnim_stats_s *stats);
124struct bfa_fcs_itnim_s *bfa_fcs_itnim_lookup(struct bfa_fcs_port_s *port,
125 wwn_t rpwwn);
126bfa_status_t bfa_fcs_itnim_attr_get(struct bfa_fcs_port_s *port, wwn_t rpwwn,
127 struct bfa_itnim_attr_s *attr);
128bfa_status_t bfa_fcs_itnim_stats_get(struct bfa_fcs_port_s *port, wwn_t rpwwn,
129 struct bfa_itnim_stats_s *stats);
130bfa_status_t bfa_fcs_itnim_stats_clear(struct bfa_fcs_port_s *port,
131 wwn_t rpwwn);
132#endif /* __BFA_FCS_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_fdmi.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_fdmi.h
deleted file mode 100644
index 4441fffc9c82..000000000000
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs_fdmi.h
+++ /dev/null
@@ -1,63 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcs_fdmi.h BFA fcs fdmi module public interface
20 */
21
22#ifndef __BFA_FCS_FDMI_H__
23#define __BFA_FCS_FDMI_H__
24#include <bfa_os_inc.h>
25#include <protocol/fdmi.h>
26
27#define BFA_FCS_FDMI_SUPORTED_SPEEDS (FDMI_TRANS_SPEED_1G | \
28 FDMI_TRANS_SPEED_2G | \
29 FDMI_TRANS_SPEED_4G | \
30 FDMI_TRANS_SPEED_8G)
31
32/*
33* HBA Attribute Block : BFA internal representation. Note : Some variable
34* sizes have been trimmed to suit BFA For Ex : Model will be "Brocade". Based
35 * on this the size has been reduced to 16 bytes from the standard's 64 bytes.
36 */
37struct bfa_fcs_fdmi_hba_attr_s {
38 wwn_t node_name;
39 u8 manufacturer[64];
40 u8 serial_num[64];
41 u8 model[16];
42 u8 model_desc[256];
43 u8 hw_version[8];
44 u8 driver_version[8];
45 u8 option_rom_ver[BFA_VERSION_LEN];
46 u8 fw_version[8];
47 u8 os_name[256];
48 u32 max_ct_pyld;
49};
50
51/*
52 * Port Attribute Block
53 */
54struct bfa_fcs_fdmi_port_attr_s {
55 u8 supp_fc4_types[32]; /* supported FC4 types */
56 u32 supp_speed; /* supported speed */
57 u32 curr_speed; /* current Speed */
58 u32 max_frm_size; /* max frame size */
59 u8 os_device_name[256]; /* OS device Name */
60 u8 host_name[256]; /* host name */
61};
62
63#endif /* __BFA_FCS_FDMI_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h
deleted file mode 100644
index ceaefd3060f4..000000000000
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h
+++ /dev/null
@@ -1,219 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcs_port.h BFA fcs port module public interface
20 */
21
22#ifndef __BFA_FCS_PORT_H__
23#define __BFA_FCS_PORT_H__
24
25#include <defs/bfa_defs_status.h>
26#include <defs/bfa_defs_port.h>
27#include <defs/bfa_defs_pport.h>
28#include <defs/bfa_defs_rport.h>
29#include <cs/bfa_q.h>
30#include <bfa_svc.h>
31#include <cs/bfa_wc.h>
32
33struct bfa_fcs_s;
34struct bfa_fcs_fabric_s;
35
36/*
37 * Maximum Rports supported per port (physical/logical).
38 */
39#define BFA_FCS_MAX_RPORTS_SUPP 256 /* @todo : tentative value */
40
41
42struct bfa_fcs_port_ns_s {
43 bfa_sm_t sm; /* state machine */
44 struct bfa_timer_s timer;
45 struct bfa_fcs_port_s *port; /* parent port */
46 struct bfa_fcxp_s *fcxp;
47 struct bfa_fcxp_wqe_s fcxp_wqe;
48};
49
50
51struct bfa_fcs_port_scn_s {
52 bfa_sm_t sm; /* state machine */
53 struct bfa_timer_s timer;
54 struct bfa_fcs_port_s *port; /* parent port */
55 struct bfa_fcxp_s *fcxp;
56 struct bfa_fcxp_wqe_s fcxp_wqe;
57};
58
59
60struct bfa_fcs_port_fdmi_s {
61 bfa_sm_t sm; /* state machine */
62 struct bfa_timer_s timer;
63 struct bfa_fcs_port_ms_s *ms; /* parent ms */
64 struct bfa_fcxp_s *fcxp;
65 struct bfa_fcxp_wqe_s fcxp_wqe;
66 u8 retry_cnt; /* retry count */
67 u8 rsvd[3];
68};
69
70
71struct bfa_fcs_port_ms_s {
72 bfa_sm_t sm; /* state machine */
73 struct bfa_timer_s timer;
74 struct bfa_fcs_port_s *port; /* parent port */
75 struct bfa_fcxp_s *fcxp;
76 struct bfa_fcxp_wqe_s fcxp_wqe;
77 struct bfa_fcs_port_fdmi_s fdmi; /* FDMI component of MS */
78 u8 retry_cnt; /* retry count */
79 u8 rsvd[3];
80};
81
82
83struct bfa_fcs_port_fab_s {
84 struct bfa_fcs_port_ns_s ns; /* NS component of port */
85 struct bfa_fcs_port_scn_s scn; /* scn component of port */
86 struct bfa_fcs_port_ms_s ms; /* MS component of port */
87};
88
89
90
91#define MAX_ALPA_COUNT 127
92
93struct bfa_fcs_port_loop_s {
94 u8 num_alpa; /* Num of ALPA entries in the map */
95 u8 alpa_pos_map[MAX_ALPA_COUNT]; /* ALPA Positional
96 *Map */
97 struct bfa_fcs_port_s *port; /* parent port */
98};
99
100
101
102struct bfa_fcs_port_n2n_s {
103 u32 rsvd;
104 u16 reply_oxid; /* ox_id from the req flogi to be
105 *used in flogi acc */
106 wwn_t rem_port_wwn; /* Attached port's wwn */
107};
108
109
110union bfa_fcs_port_topo_u {
111 struct bfa_fcs_port_fab_s pfab;
112 struct bfa_fcs_port_loop_s ploop;
113 struct bfa_fcs_port_n2n_s pn2n;
114};
115
116
117struct bfa_fcs_port_s {
118 struct list_head qe; /* used by port/vport */
119 bfa_sm_t sm; /* state machine */
120 struct bfa_fcs_fabric_s *fabric;/* parent fabric */
121 struct bfa_port_cfg_s port_cfg;/* port configuration */
122 struct bfa_timer_s link_timer; /* timer for link offline */
123 u32 pid:24; /* FC address */
124 u8 lp_tag; /* lport tag */
125 u16 num_rports; /* Num of r-ports */
126 struct list_head rport_q; /* queue of discovered r-ports */
127 struct bfa_fcs_s *fcs; /* FCS instance */
128 union bfa_fcs_port_topo_u port_topo; /* fabric/loop/n2n details */
129 struct bfad_port_s *bfad_port; /* driver peer instance */
130 struct bfa_fcs_vport_s *vport; /* NULL for base ports */
131 struct bfa_fcxp_s *fcxp;
132 struct bfa_fcxp_wqe_s fcxp_wqe;
133 struct bfa_port_stats_s stats;
134 struct bfa_wc_s wc; /* waiting counter for events */
135};
136
137#define bfa_fcs_lport_t struct bfa_fcs_port_s
138
139/**
140 * Symbolic Name related defines
141 * Total bytes 255.
142 * Physical Port's symbolic name 128 bytes.
143 * For Vports, Vport's symbolic name is appended to the Physical port's
144 * Symbolic Name.
145 *
146 * Physical Port's symbolic name Format : (Total 128 bytes)
147 * Adapter Model number/name : 12 bytes
148 * Driver Version : 10 bytes
149 * Host Machine Name : 30 bytes
150 * Host OS Info : 48 bytes
151 * Host OS PATCH Info : 16 bytes
152 * ( remaining 12 bytes reserved to be used for separator)
153 */
154#define BFA_FCS_PORT_SYMBNAME_SEPARATOR " | "
155
156#define BFA_FCS_PORT_SYMBNAME_MODEL_SZ 12
157#define BFA_FCS_PORT_SYMBNAME_VERSION_SZ 10
158#define BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ 30
159#define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ 48
160#define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ 16
161
162/**
163 * Get FC port ID for a logical port.
164 */
165#define bfa_fcs_port_get_fcid(_lport) ((_lport)->pid)
166#define bfa_fcs_port_get_pwwn(_lport) ((_lport)->port_cfg.pwwn)
167#define bfa_fcs_port_get_nwwn(_lport) ((_lport)->port_cfg.nwwn)
168#define bfa_fcs_port_get_psym_name(_lport) ((_lport)->port_cfg.sym_name)
169#define bfa_fcs_port_is_initiator(_lport) \
170 ((_lport)->port_cfg.roles & BFA_PORT_ROLE_FCP_IM)
171#define bfa_fcs_port_is_target(_lport) \
172 ((_lport)->port_cfg.roles & BFA_PORT_ROLE_FCP_TM)
173#define bfa_fcs_port_get_nrports(_lport) \
174 ((_lport) ? (_lport)->num_rports : 0)
175
176static inline struct bfad_port_s *
177bfa_fcs_port_get_drvport(struct bfa_fcs_port_s *port)
178{
179 return port->bfad_port;
180}
181
182
183#define bfa_fcs_port_get_opertype(_lport) ((_lport)->fabric->oper_type)
184
185
186#define bfa_fcs_port_get_fabric_name(_lport) ((_lport)->fabric->fabric_name)
187
188
189#define bfa_fcs_port_get_fabric_ipaddr(_lport) \
190 ((_lport)->fabric->fabric_ip_addr)
191
192/**
193 * bfa fcs port public functions
194 */
195void bfa_fcs_cfg_base_port(struct bfa_fcs_s *fcs,
196 struct bfa_port_cfg_s *port_cfg);
197struct bfa_fcs_port_s *bfa_fcs_get_base_port(struct bfa_fcs_s *fcs);
198void bfa_fcs_port_get_rports(struct bfa_fcs_port_s *port,
199 wwn_t rport_wwns[], int *nrports);
200
201wwn_t bfa_fcs_port_get_rport(struct bfa_fcs_port_s *port, wwn_t wwn,
202 int index, int nrports, bfa_boolean_t bwwn);
203
204struct bfa_fcs_port_s *bfa_fcs_lookup_port(struct bfa_fcs_s *fcs,
205 u16 vf_id, wwn_t lpwwn);
206
207void bfa_fcs_port_get_info(struct bfa_fcs_port_s *port,
208 struct bfa_port_info_s *port_info);
209void bfa_fcs_port_get_attr(struct bfa_fcs_port_s *port,
210 struct bfa_port_attr_s *port_attr);
211void bfa_fcs_port_get_stats(struct bfa_fcs_port_s *fcs_port,
212 struct bfa_port_stats_s *port_stats);
213void bfa_fcs_port_clear_stats(struct bfa_fcs_port_s *fcs_port);
214enum bfa_pport_speed bfa_fcs_port_get_rport_max_speed(
215 struct bfa_fcs_port_s *port);
216void bfa_fcs_port_enable_ipfc_roles(struct bfa_fcs_port_s *fcs_port);
217void bfa_fcs_port_disable_ipfc_roles(struct bfa_fcs_port_s *fcs_port);
218
219#endif /* __BFA_FCS_PORT_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_rport.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_rport.h
deleted file mode 100644
index 3027fc6c7722..000000000000
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs_rport.h
+++ /dev/null
@@ -1,105 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCS_RPORT_H__
19#define __BFA_FCS_RPORT_H__
20
21#include <defs/bfa_defs_status.h>
22#include <cs/bfa_q.h>
23#include <fcs/bfa_fcs.h>
24#include <defs/bfa_defs_rport.h>
25
26#define BFA_FCS_RPORT_DEF_DEL_TIMEOUT 90 /* in secs */
27/*
28 * forward declarations
29 */
30struct bfad_rport_s;
31
32struct bfa_fcs_itnim_s;
33struct bfa_fcs_tin_s;
34struct bfa_fcs_iprp_s;
35
36/* Rport Features (RPF) */
37struct bfa_fcs_rpf_s {
38 bfa_sm_t sm; /* state machine */
39 struct bfa_fcs_rport_s *rport; /* parent rport */
40 struct bfa_timer_s timer; /* general purpose timer */
41 struct bfa_fcxp_s *fcxp; /* FCXP needed for discarding */
42 struct bfa_fcxp_wqe_s fcxp_wqe; /* fcxp wait queue element */
43 int rpsc_retries; /* max RPSC retry attempts */
44 enum bfa_pport_speed rpsc_speed; /* Current Speed from RPSC.
45 * O if RPSC fails */
46 enum bfa_pport_speed assigned_speed; /* Speed assigned by the user.
47 * will be used if RPSC is not
48 * supported by the rport */
49};
50
51struct bfa_fcs_rport_s {
52 struct list_head qe; /* used by port/vport */
53 struct bfa_fcs_port_s *port; /* parent FCS port */
54 struct bfa_fcs_s *fcs; /* fcs instance */
55 struct bfad_rport_s *rp_drv; /* driver peer instance */
56 u32 pid; /* port ID of rport */
57 u16 maxfrsize; /* maximum frame size */
58 u16 reply_oxid; /* OX_ID of inbound requests */
59 enum fc_cos fc_cos; /* FC classes of service supp */
60 bfa_boolean_t cisc; /* CISC capable device */
61 bfa_boolean_t prlo; /* processing prlo or LOGO */
62 wwn_t pwwn; /* port wwn of rport */
63 wwn_t nwwn; /* node wwn of rport */
64 struct bfa_rport_symname_s psym_name; /* port symbolic name */
65 bfa_sm_t sm; /* state machine */
66 struct bfa_timer_s timer; /* general purpose timer */
67 struct bfa_fcs_itnim_s *itnim; /* ITN initiator mode role */
68 struct bfa_fcs_tin_s *tin; /* ITN initiator mode role */
69 struct bfa_fcs_iprp_s *iprp; /* IP/FC role */
70 struct bfa_rport_s *bfa_rport; /* BFA Rport */
71 struct bfa_fcxp_s *fcxp; /* FCXP needed for discarding */
72 int plogi_retries; /* max plogi retry attempts */
73 int ns_retries; /* max NS query retry attempts */
74 struct bfa_fcxp_wqe_s fcxp_wqe; /* fcxp wait queue element */
75 struct bfa_rport_stats_s stats; /* rport stats */
76 enum bfa_rport_function scsi_function; /* Initiator/Target */
77 struct bfa_fcs_rpf_s rpf; /* Rport features module */
78};
79
80static inline struct bfa_rport_s *
81bfa_fcs_rport_get_halrport(struct bfa_fcs_rport_s *rport)
82{
83 return rport->bfa_rport;
84}
85
86/**
87 * bfa fcs rport API functions
88 */
89bfa_status_t bfa_fcs_rport_add(struct bfa_fcs_port_s *port, wwn_t *pwwn,
90 struct bfa_fcs_rport_s *rport,
91 struct bfad_rport_s *rport_drv);
92bfa_status_t bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport);
93void bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
94 struct bfa_rport_attr_s *attr);
95void bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport,
96 struct bfa_rport_stats_s *stats);
97void bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport);
98struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_port_s *port,
99 wwn_t rpwwn);
100struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn(
101 struct bfa_fcs_port_s *port, wwn_t rnwwn);
102void bfa_fcs_rport_set_del_timeout(u8 rport_tmo);
103void bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport,
104 enum bfa_pport_speed speed);
105#endif /* __BFA_FCS_RPORT_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_vport.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_vport.h
deleted file mode 100644
index 0af262430860..000000000000
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs_vport.h
+++ /dev/null
@@ -1,67 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcs_vport.h BFA fcs vport module public interface
20 */
21
22#ifndef __BFA_FCS_VPORT_H__
23#define __BFA_FCS_VPORT_H__
24
25#include <defs/bfa_defs_status.h>
26#include <defs/bfa_defs_port.h>
27#include <defs/bfa_defs_vport.h>
28#include <fcs/bfa_fcs.h>
29#include <fcb/bfa_fcb_vport.h>
30
31struct bfa_fcs_vport_s {
32 struct list_head qe; /* queue elem */
33 bfa_sm_t sm; /* state machine */
34 bfa_fcs_lport_t lport; /* logical port */
35 struct bfa_timer_s timer; /* general purpose timer */
36 struct bfad_vport_s *vport_drv; /* Driver private */
37 struct bfa_vport_stats_s vport_stats; /* vport statistics */
38 struct bfa_lps_s *lps; /* Lport login service */
39 int fdisc_retries;
40};
41
42#define bfa_fcs_vport_get_port(vport) \
43 ((struct bfa_fcs_port_s *)(&vport->port))
44
45/**
46 * bfa fcs vport public functions
47 */
48bfa_status_t bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport,
49 struct bfa_fcs_s *fcs, u16 vf_id,
50 struct bfa_port_cfg_s *port_cfg,
51 struct bfad_vport_s *vport_drv);
52bfa_status_t bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport,
53 struct bfa_fcs_s *fcs, uint16_t vf_id,
54 struct bfa_port_cfg_s *port_cfg,
55 struct bfad_vport_s *vport_drv);
56bfa_status_t bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport);
57bfa_status_t bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport);
58bfa_status_t bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport);
59void bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
60 struct bfa_vport_attr_s *vport_attr);
61void bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
62 struct bfa_vport_stats_s *vport_stats);
63void bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport);
64struct bfa_fcs_vport_s *bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs,
65 u16 vf_id, wwn_t vpwwn);
66
67#endif /* __BFA_FCS_VPORT_H__ */
diff --git a/drivers/scsi/bfa/include/log/bfa_log_fcs.h b/drivers/scsi/bfa/include/log/bfa_log_fcs.h
deleted file mode 100644
index b6f5df8827f8..000000000000
--- a/drivers/scsi/bfa/include/log/bfa_log_fcs.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/*
19 * messages define for FCS Module
20 */
21#ifndef __BFA_LOG_FCS_H__
22#define __BFA_LOG_FCS_H__
23#include <cs/bfa_log.h>
24#define BFA_LOG_FCS_FABRIC_NOSWITCH \
25 (((u32) BFA_LOG_FCS_ID << BFA_LOG_MODID_OFFSET) | 1)
26#define BFA_LOG_FCS_FABRIC_ISOLATED \
27 (((u32) BFA_LOG_FCS_ID << BFA_LOG_MODID_OFFSET) | 2)
28#endif
diff --git a/drivers/scsi/bfa/include/log/bfa_log_hal.h b/drivers/scsi/bfa/include/log/bfa_log_hal.h
deleted file mode 100644
index 5f8f5e30b9e8..000000000000
--- a/drivers/scsi/bfa/include/log/bfa_log_hal.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for HAL Module */
19#ifndef __BFA_LOG_HAL_H__
20#define __BFA_LOG_HAL_H__
21#include <cs/bfa_log.h>
22#define BFA_LOG_HAL_ASSERT \
23 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 1)
24#define BFA_LOG_HAL_HEARTBEAT_FAILURE \
25 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 2)
26#define BFA_LOG_HAL_FCPIM_PARM_INVALID \
27 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 3)
28#define BFA_LOG_HAL_SM_ASSERT \
29 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 4)
30#define BFA_LOG_HAL_DRIVER_ERROR \
31 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 5)
32#define BFA_LOG_HAL_DRIVER_CONFIG_ERROR \
33 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 6)
34#define BFA_LOG_HAL_MBOX_ERROR \
35 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 7)
36#endif
diff --git a/drivers/scsi/bfa/include/log/bfa_log_linux.h b/drivers/scsi/bfa/include/log/bfa_log_linux.h
deleted file mode 100644
index 44bc89768bda..000000000000
--- a/drivers/scsi/bfa/include/log/bfa_log_linux.h
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for LINUX Module */
19#ifndef __BFA_LOG_LINUX_H__
20#define __BFA_LOG_LINUX_H__
21#include <cs/bfa_log.h>
22#define BFA_LOG_LINUX_DEVICE_CLAIMED \
23 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 1)
24#define BFA_LOG_LINUX_HASH_INIT_FAILED \
25 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 2)
26#define BFA_LOG_LINUX_SYSFS_FAILED \
27 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 3)
28#define BFA_LOG_LINUX_MEM_ALLOC_FAILED \
29 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 4)
30#define BFA_LOG_LINUX_DRIVER_REGISTRATION_FAILED \
31 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 5)
32#define BFA_LOG_LINUX_ITNIM_FREE \
33 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 6)
34#define BFA_LOG_LINUX_ITNIM_ONLINE \
35 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 7)
36#define BFA_LOG_LINUX_ITNIM_OFFLINE \
37 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 8)
38#define BFA_LOG_LINUX_SCSI_HOST_FREE \
39 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 9)
40#define BFA_LOG_LINUX_SCSI_ABORT \
41 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 10)
42#define BFA_LOG_LINUX_SCSI_ABORT_COMP \
43 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 11)
44#define BFA_LOG_LINUX_DRIVER_CONFIG_ERROR \
45 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 12)
46#define BFA_LOG_LINUX_BNA_STATE_MACHINE \
47 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 13)
48#define BFA_LOG_LINUX_IOC_ERROR \
49 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 14)
50#define BFA_LOG_LINUX_RESOURCE_ALLOC_ERROR \
51 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 15)
52#define BFA_LOG_LINUX_RING_BUFFER_ERROR \
53 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 16)
54#define BFA_LOG_LINUX_DRIVER_ERROR \
55 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 17)
56#define BFA_LOG_LINUX_DRIVER_INFO \
57 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 18)
58#define BFA_LOG_LINUX_DRIVER_DIAG \
59 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 19)
60#define BFA_LOG_LINUX_DRIVER_AEN \
61 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 20)
62#endif
diff --git a/drivers/scsi/bfa/include/log/bfa_log_wdrv.h b/drivers/scsi/bfa/include/log/bfa_log_wdrv.h
deleted file mode 100644
index 809a95f7afe2..000000000000
--- a/drivers/scsi/bfa/include/log/bfa_log_wdrv.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/*
19 * messages define for WDRV Module
20 */
21#ifndef __BFA_LOG_WDRV_H__
22#define __BFA_LOG_WDRV_H__
23#include <cs/bfa_log.h>
24#define BFA_LOG_WDRV_IOC_INIT_ERROR \
25 (((u32) BFA_LOG_WDRV_ID << BFA_LOG_MODID_OFFSET) | 1)
26#define BFA_LOG_WDRV_IOC_INTERNAL_ERROR \
27 (((u32) BFA_LOG_WDRV_ID << BFA_LOG_MODID_OFFSET) | 2)
28#define BFA_LOG_WDRV_IOC_START_ERROR \
29 (((u32) BFA_LOG_WDRV_ID << BFA_LOG_MODID_OFFSET) | 3)
30#define BFA_LOG_WDRV_IOC_STOP_ERROR \
31 (((u32) BFA_LOG_WDRV_ID << BFA_LOG_MODID_OFFSET) | 4)
32#define BFA_LOG_WDRV_INSUFFICIENT_RESOURCES \
33 (((u32) BFA_LOG_WDRV_ID << BFA_LOG_MODID_OFFSET) | 5)
34#define BFA_LOG_WDRV_BASE_ADDRESS_MAP_ERROR \
35 (((u32) BFA_LOG_WDRV_ID << BFA_LOG_MODID_OFFSET) | 6)
36#endif
diff --git a/drivers/scsi/bfa/include/protocol/ct.h b/drivers/scsi/bfa/include/protocol/ct.h
deleted file mode 100644
index b82540a230c4..000000000000
--- a/drivers/scsi/bfa/include/protocol/ct.h
+++ /dev/null
@@ -1,492 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __CT_H__
19#define __CT_H__
20
21#include <protocol/types.h>
22
23#pragma pack(1)
24
25struct ct_hdr_s{
26 u32 rev_id:8; /* Revision of the CT */
27 u32 in_id:24; /* Initiator Id */
28 u32 gs_type:8; /* Generic service Type */
29 u32 gs_sub_type:8; /* Generic service sub type */
30 u32 options:8; /* options */
31 u32 rsvrd:8; /* reserved */
32 u32 cmd_rsp_code:16;/* ct command/response code */
33 u32 max_res_size:16;/* maximum/residual size */
34 u32 frag_id:8; /* fragment ID */
35 u32 reason_code:8; /* reason code */
36 u32 exp_code:8; /* explanation code */
37 u32 vendor_unq:8; /* vendor unique */
38};
39
40/*
41 * defines for the Revision
42 */
43enum {
44 CT_GS3_REVISION = 0x01,
45};
46
47/*
48 * defines for gs_type
49 */
50enum {
51 CT_GSTYPE_KEYSERVICE = 0xF7,
52 CT_GSTYPE_ALIASSERVICE = 0xF8,
53 CT_GSTYPE_MGMTSERVICE = 0xFA,
54 CT_GSTYPE_TIMESERVICE = 0xFB,
55 CT_GSTYPE_DIRSERVICE = 0xFC,
56};
57
58/*
59 * defines for gs_sub_type for gs type directory service
60 */
61enum {
62 CT_GSSUBTYPE_NAMESERVER = 0x02,
63};
64
65/*
66 * defines for gs_sub_type for gs type management service
67 */
68enum {
69 CT_GSSUBTYPE_CFGSERVER = 0x01,
70 CT_GSSUBTYPE_UNZONED_NS = 0x02,
71 CT_GSSUBTYPE_ZONESERVER = 0x03,
72 CT_GSSUBTYPE_LOCKSERVER = 0x04,
73 CT_GSSUBTYPE_HBA_MGMTSERVER = 0x10, /* for FDMI */
74};
75
76/*
77 * defines for CT response code field
78 */
79enum {
80 CT_RSP_REJECT = 0x8001,
81 CT_RSP_ACCEPT = 0x8002,
82};
83
84/*
85 * definitions for CT reason code
86 */
87enum {
88 CT_RSN_INV_CMD = 0x01,
89 CT_RSN_INV_VER = 0x02,
90 CT_RSN_LOGIC_ERR = 0x03,
91 CT_RSN_INV_SIZE = 0x04,
92 CT_RSN_LOGICAL_BUSY = 0x05,
93 CT_RSN_PROTO_ERR = 0x07,
94 CT_RSN_UNABLE_TO_PERF = 0x09,
95 CT_RSN_NOT_SUPP = 0x0B,
96 CT_RSN_SERVER_NOT_AVBL = 0x0D,
97 CT_RSN_SESSION_COULD_NOT_BE_ESTBD = 0x0E,
98 CT_RSN_VENDOR_SPECIFIC = 0xFF,
99
100};
101
102/*
103 * definitions for explanations code for Name server
104 */
105enum {
106 CT_NS_EXP_NOADDITIONAL = 0x00,
107 CT_NS_EXP_ID_NOT_REG = 0x01,
108 CT_NS_EXP_PN_NOT_REG = 0x02,
109 CT_NS_EXP_NN_NOT_REG = 0x03,
110 CT_NS_EXP_CS_NOT_REG = 0x04,
111 CT_NS_EXP_IPN_NOT_REG = 0x05,
112 CT_NS_EXP_IPA_NOT_REG = 0x06,
113 CT_NS_EXP_FT_NOT_REG = 0x07,
114 CT_NS_EXP_SPN_NOT_REG = 0x08,
115 CT_NS_EXP_SNN_NOT_REG = 0x09,
116 CT_NS_EXP_PT_NOT_REG = 0x0A,
117 CT_NS_EXP_IPP_NOT_REG = 0x0B,
118 CT_NS_EXP_FPN_NOT_REG = 0x0C,
119 CT_NS_EXP_HA_NOT_REG = 0x0D,
120 CT_NS_EXP_FD_NOT_REG = 0x0E,
121 CT_NS_EXP_FF_NOT_REG = 0x0F,
122 CT_NS_EXP_ACCESSDENIED = 0x10,
123 CT_NS_EXP_UNACCEPTABLE_ID = 0x11,
124 CT_NS_EXP_DATABASEEMPTY = 0x12,
125 CT_NS_EXP_NOT_REG_IN_SCOPE = 0x13,
126 CT_NS_EXP_DOM_ID_NOT_PRESENT = 0x14,
127 CT_NS_EXP_PORT_NUM_NOT_PRESENT = 0x15,
128 CT_NS_EXP_NO_DEVICE_ATTACHED = 0x16
129};
130
131/*
132 * definitions for the explanation code for all servers
133 */
134enum {
135 CT_EXP_AUTH_EXCEPTION = 0xF1,
136 CT_EXP_DB_FULL = 0xF2,
137 CT_EXP_DB_EMPTY = 0xF3,
138 CT_EXP_PROCESSING_REQ = 0xF4,
139 CT_EXP_UNABLE_TO_VERIFY_CONN = 0xF5,
140 CT_EXP_DEVICES_NOT_IN_CMN_ZONE = 0xF6
141};
142
143/*
144 * Command codes for Name server
145 */
146enum {
147 GS_GID_PN = 0x0121, /* Get Id on port name */
148 GS_GPN_ID = 0x0112, /* Get port name on ID */
149 GS_GNN_ID = 0x0113, /* Get node name on ID */
150 GS_GID_FT = 0x0171, /* Get Id on FC4 type */
151 GS_GSPN_ID = 0x0118, /* Get symbolic PN on ID */
152 GS_RFT_ID = 0x0217, /* Register fc4type on ID */
153 GS_RSPN_ID = 0x0218, /* Register symbolic PN on ID */
154 GS_RPN_ID = 0x0212, /* Register port name */
155 GS_RNN_ID = 0x0213, /* Register node name */
156 GS_RCS_ID = 0x0214, /* Register class of service */
157 GS_RPT_ID = 0x021A, /* Register port type */
158 GS_GA_NXT = 0x0100, /* Get all next */
159 GS_RFF_ID = 0x021F, /* Register FC4 Feature */
160};
161
162struct fcgs_id_req_s{
163 u32 rsvd:8;
164 u32 dap:24; /* port identifier */
165};
166#define fcgs_gpnid_req_t struct fcgs_id_req_s
167#define fcgs_gnnid_req_t struct fcgs_id_req_s
168#define fcgs_gspnid_req_t struct fcgs_id_req_s
169
170struct fcgs_gidpn_req_s{
171 wwn_t port_name; /* port wwn */
172};
173
174struct fcgs_gidpn_resp_s{
175 u32 rsvd:8;
176 u32 dap:24; /* port identifier */
177};
178
179/**
180 * RFT_ID
181 */
182struct fcgs_rftid_req_s {
183 u32 rsvd:8;
184 u32 dap:24; /* port identifier */
185 u32 fc4_type[8]; /* fc4 types */
186};
187
188/**
189 * RFF_ID : Register FC4 features.
190 */
191
192#define FC_GS_FCP_FC4_FEATURE_INITIATOR 0x02
193#define FC_GS_FCP_FC4_FEATURE_TARGET 0x01
194
195struct fcgs_rffid_req_s{
196 u32 rsvd:8;
197 u32 dap:24; /* port identifier */
198 u32 rsvd1:16;
199 u32 fc4ftr_bits:8; /* fc4 feature bits */
200 u32 fc4_type:8; /* corresponding FC4 Type */
201};
202
203/**
204 * GID_FT Request
205 */
206struct fcgs_gidft_req_s{
207 u8 reserved;
208 u8 domain_id; /* domain, 0 - all fabric */
209 u8 area_id; /* area, 0 - whole domain */
210 u8 fc4_type; /* FC_TYPE_FCP for SCSI devices */
211}; /* GID_FT Request */
212
213/**
214 * GID_FT Response
215 */
216struct fcgs_gidft_resp_s {
217 u8 last:1; /* last port identifier flag */
218 u8 reserved:7;
219 u32 pid:24; /* port identifier */
220}; /* GID_FT Response */
221
222/**
223 * RSPN_ID
224 */
225struct fcgs_rspnid_req_s{
226 u32 rsvd:8;
227 u32 dap:24; /* port identifier */
228 u8 spn_len; /* symbolic port name length */
229 u8 spn[256]; /* symbolic port name */
230};
231
232/**
233 * RPN_ID
234 */
235struct fcgs_rpnid_req_s{
236 u32 rsvd:8;
237 u32 port_id:24;
238 wwn_t port_name;
239};
240
241/**
242 * RNN_ID
243 */
244struct fcgs_rnnid_req_s{
245 u32 rsvd:8;
246 u32 port_id:24;
247 wwn_t node_name;
248};
249
250/**
251 * RCS_ID
252 */
253struct fcgs_rcsid_req_s{
254 u32 rsvd:8;
255 u32 port_id:24;
256 u32 cos;
257};
258
259/**
260 * RPT_ID
261 */
262struct fcgs_rptid_req_s{
263 u32 rsvd:8;
264 u32 port_id:24;
265 u32 port_type:8;
266 u32 rsvd1:24;
267};
268
269/**
270 * GA_NXT Request
271 */
272struct fcgs_ganxt_req_s{
273 u32 rsvd:8;
274 u32 port_id:24;
275};
276
277/**
278 * GA_NXT Response
279 */
280struct fcgs_ganxt_rsp_s{
281 u32 port_type:8; /* Port Type */
282 u32 port_id:24; /* Port Identifier */
283 wwn_t port_name; /* Port Name */
284 u8 spn_len; /* Length of Symbolic Port Name */
285 char spn[255]; /* Symbolic Port Name */
286 wwn_t node_name; /* Node Name */
287 u8 snn_len; /* Length of Symbolic Node Name */
288 char snn[255]; /* Symbolic Node Name */
289 u8 ipa[8]; /* Initial Process Associator */
290 u8 ip[16]; /* IP Address */
291 u32 cos; /* Class of Service */
292 u32 fc4types[8]; /* FC-4 TYPEs */
293 wwn_t fabric_port_name;
294 /* Fabric Port Name */
295 u32 rsvd:8; /* Reserved */
296 u32 hard_addr:24; /* Hard Address */
297};
298
299/*
300 * Fabric Config Server
301 */
302
303/*
304 * Command codes for Fabric Configuration Server
305 */
306enum {
307 GS_FC_GFN_CMD = 0x0114, /* GS FC Get Fabric Name */
308 GS_FC_GMAL_CMD = 0x0116, /* GS FC GMAL */
309 GS_FC_TRACE_CMD = 0x0400, /* GS FC Trace Route */
310 GS_FC_PING_CMD = 0x0401, /* GS FC Ping */
311};
312
313/*
314 * Source or Destination Port Tags.
315 */
316enum {
317 GS_FTRACE_TAG_NPORT_ID = 1,
318 GS_FTRACE_TAG_NPORT_NAME = 2,
319};
320
321/*
322* Port Value : Could be a Port id or wwn
323 */
324union fcgs_port_val_u{
325 u32 nport_id;
326 wwn_t nport_wwn;
327};
328
329#define GS_FTRACE_MAX_HOP_COUNT 20
330#define GS_FTRACE_REVISION 1
331
332/*
333 * Ftrace Related Structures.
334 */
335
336/*
337 * STR (Switch Trace) Reject Reason Codes. From FC-SW.
338 */
339enum {
340 GS_FTRACE_STR_CMD_COMPLETED_SUCC = 0,
341 GS_FTRACE_STR_CMD_NOT_SUPP_IN_NEXT_SWITCH,
342 GS_FTRACE_STR_NO_RESP_FROM_NEXT_SWITCH,
343 GS_FTRACE_STR_MAX_HOP_CNT_REACHED,
344 GS_FTRACE_STR_SRC_PORT_NOT_FOUND,
345 GS_FTRACE_STR_DST_PORT_NOT_FOUND,
346 GS_FTRACE_STR_DEVICES_NOT_IN_COMMON_ZONE,
347 GS_FTRACE_STR_NO_ROUTE_BW_PORTS,
348 GS_FTRACE_STR_NO_ADDL_EXPLN,
349 GS_FTRACE_STR_FABRIC_BUSY,
350 GS_FTRACE_STR_FABRIC_BUILD_IN_PROGRESS,
351 GS_FTRACE_STR_VENDOR_SPECIFIC_ERR_START = 0xf0,
352 GS_FTRACE_STR_VENDOR_SPECIFIC_ERR_END = 0xff,
353};
354
355/*
356 * Ftrace Request
357 */
358struct fcgs_ftrace_req_s{
359 u32 revision;
360 u16 src_port_tag; /* Source Port tag */
361 u16 src_port_len; /* Source Port len */
362 union fcgs_port_val_u src_port_val; /* Source Port value */
363 u16 dst_port_tag; /* Destination Port tag */
364 u16 dst_port_len; /* Destination Port len */
365 union fcgs_port_val_u dst_port_val; /* Destination Port value */
366 u32 token;
367 u8 vendor_id[8]; /* T10 Vendor Identifier */
368 u8 vendor_info[8]; /* Vendor specific Info */
369 u32 max_hop_cnt; /* Max Hop Count */
370};
371
372/*
373 * Path info structure
374 */
375struct fcgs_ftrace_path_info_s{
376 wwn_t switch_name; /* Switch WWN */
377 u32 domain_id;
378 wwn_t ingress_port_name; /* Ingress ports wwn */
379 u32 ingress_phys_port_num; /* Ingress ports physical port
380 * number
381 */
382 wwn_t egress_port_name; /* Ingress ports wwn */
383 u32 egress_phys_port_num; /* Ingress ports physical port
384 * number
385 */
386};
387
388/*
389 * Ftrace Acc Response
390 */
391struct fcgs_ftrace_resp_s{
392 u32 revision;
393 u32 token;
394 u8 vendor_id[8]; /* T10 Vendor Identifier */
395 u8 vendor_info[8]; /* Vendor specific Info */
396 u32 str_rej_reason_code; /* STR Reject Reason Code */
397 u32 num_path_info_entries; /* No. of path info entries */
398 /*
399 * path info entry/entries.
400 */
401 struct fcgs_ftrace_path_info_s path_info[1];
402
403};
404
405/*
406* Fabric Config Server : FCPing
407 */
408
409/*
410 * FC Ping Request
411 */
412struct fcgs_fcping_req_s{
413 u32 revision;
414 u16 port_tag;
415 u16 port_len; /* Port len */
416 union fcgs_port_val_u port_val; /* Port value */
417 u32 token;
418};
419
420/*
421 * FC Ping Response
422 */
423struct fcgs_fcping_resp_s{
424 u32 token;
425};
426
427/*
428 * Command codes for zone server query.
429 */
430enum {
431 ZS_GZME = 0x0124, /* Get zone member extended */
432};
433
434/*
435 * ZS GZME request
436 */
437#define ZS_GZME_ZNAMELEN 32
438struct zs_gzme_req_s{
439 u8 znamelen;
440 u8 rsvd[3];
441 u8 zname[ZS_GZME_ZNAMELEN];
442};
443
444enum zs_mbr_type{
445 ZS_MBR_TYPE_PWWN = 1,
446 ZS_MBR_TYPE_DOMPORT = 2,
447 ZS_MBR_TYPE_PORTID = 3,
448 ZS_MBR_TYPE_NWWN = 4,
449};
450
451struct zs_mbr_wwn_s{
452 u8 mbr_type;
453 u8 rsvd[3];
454 wwn_t wwn;
455};
456
457struct zs_query_resp_s{
458 u32 nmbrs; /* number of zone members */
459 struct zs_mbr_wwn_s mbr[1];
460};
461
462/*
463 * GMAL Command ( Get ( interconnect Element) Management Address List)
464 * To retrieve the IP Address of a Switch.
465 */
466
467#define CT_GMAL_RESP_PREFIX_TELNET "telnet://"
468#define CT_GMAL_RESP_PREFIX_HTTP "http://"
469
470/* GMAL/GFN request */
471struct fcgs_req_s {
472 wwn_t wwn; /* PWWN/NWWN */
473};
474
475#define fcgs_gmal_req_t struct fcgs_req_s
476#define fcgs_gfn_req_t struct fcgs_req_s
477
478/* Accept Response to GMAL */
479struct fcgs_gmal_resp_s {
480 u32 ms_len; /* Num of entries */
481 u8 ms_ma[256];
482};
483
484struct fc_gmal_entry_s {
485 u8 len;
486 u8 prefix[7]; /* like "http://" */
487 u8 ip_addr[248];
488};
489
490#pragma pack()
491
492#endif
diff --git a/drivers/scsi/bfa/include/protocol/fc_sp.h b/drivers/scsi/bfa/include/protocol/fc_sp.h
deleted file mode 100644
index 55bb0b31d04b..000000000000
--- a/drivers/scsi/bfa/include/protocol/fc_sp.h
+++ /dev/null
@@ -1,224 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __FC_SP_H__
19#define __FC_SP_H__
20
21#include <protocol/types.h>
22
23#pragma pack(1)
24
25enum auth_els_flags{
26 FC_AUTH_ELS_MORE_FRAGS_FLAG = 0x80, /*! bit-7. More Fragments
27 * Follow
28 */
29 FC_AUTH_ELS_CONCAT_FLAG = 0x40, /*! bit-6. Concatenation Flag */
30 FC_AUTH_ELS_SEQ_NUM_FLAG = 0x01 /*! bit-0. Sequence Number */
31};
32
33enum auth_msg_codes{
34 FC_AUTH_MC_AUTH_RJT = 0x0A, /*! Auth Reject */
35 FC_AUTH_MC_AUTH_NEG = 0x0B, /*! Auth Negotiate */
36 FC_AUTH_MC_AUTH_DONE = 0x0C, /*! Auth Done */
37
38 FC_AUTH_MC_DHCHAP_CHAL = 0x10, /*! DHCHAP Challenge */
39 FC_AUTH_MC_DHCHAP_REPLY = 0x11, /*! DHCHAP Reply */
40 FC_AUTH_MC_DHCHAP_SUCC = 0x12, /*! DHCHAP Success */
41
42 FC_AUTH_MC_FCAP_REQ = 0x13, /*! FCAP Request */
43 FC_AUTH_MC_FCAP_ACK = 0x14, /*! FCAP Acknowledge */
44 FC_AUTH_MC_FCAP_CONF = 0x15, /*! FCAP Confirm */
45
46 FC_AUTH_MC_FCPAP_INIT = 0x16, /*! FCPAP Init */
47 FC_AUTH_MC_FCPAP_ACC = 0x17, /*! FCPAP Accept */
48 FC_AUTH_MC_FCPAP_COMP = 0x18, /*! FCPAP Complete */
49
50 FC_AUTH_MC_IKE_SA_INIT = 0x22, /*! IKE SA INIT */
51 FC_AUTH_MC_IKE_SA_AUTH = 0x23, /*! IKE SA Auth */
52 FC_AUTH_MC_IKE_CREATE_CHILD_SA = 0x24, /*! IKE Create Child SA */
53 FC_AUTH_MC_IKE_INFO = 0x25, /*! IKE informational */
54};
55
56enum auth_proto_version{
57 FC_AUTH_PROTO_VER_1 = 1, /*! Protocol Version 1 */
58};
59
60enum {
61 FC_AUTH_ELS_COMMAND_CODE = 0x90,/*! Authentication ELS Command code */
62 FC_AUTH_PROTO_PARAM_LEN_SZ = 4, /*! Size of Proto Parameter Len Field */
63 FC_AUTH_PROTO_PARAM_VAL_SZ = 4, /*! Size of Proto Parameter Val Field */
64 FC_MAX_AUTH_SECRET_LEN = 256,
65 /*! Maximum secret string length */
66 FC_AUTH_NUM_USABLE_PROTO_LEN_SZ = 4,
67 /*! Size of usable protocols field */
68 FC_AUTH_RESP_VALUE_LEN_SZ = 4,
69 /*! Size of response value length */
70 FC_MAX_CHAP_KEY_LEN = 256, /*! Maximum md5 digest length */
71 FC_MAX_AUTH_RETRIES = 3, /*! Maximum number of retries */
72 FC_MD5_DIGEST_LEN = 16, /*! MD5 digest length */
73 FC_SHA1_DIGEST_LEN = 20, /*! SHA1 digest length */
74 FC_MAX_DHG_SUPPORTED = 1, /*! Maximum DH Groups supported */
75 FC_MAX_ALG_SUPPORTED = 1, /*! Maximum algorithms supported */
76 FC_MAX_PROTO_SUPPORTED = 1, /*! Maximum protocols supported */
77 FC_START_TXN_ID = 2, /*! Starting transaction ID */
78};
79
80enum auth_proto_id{
81 FC_AUTH_PROTO_DHCHAP = 0x00000001,
82 FC_AUTH_PROTO_FCAP = 0x00000002,
83 FC_AUTH_PROTO_FCPAP = 0x00000003,
84 FC_AUTH_PROTO_IKEv2 = 0x00000004,
85 FC_AUTH_PROTO_IKEv2_AUTH = 0x00000005,
86};
87
88struct auth_name_s{
89 u16 name_tag; /*! Name Tag = 1 for Authentication */
90 u16 name_len; /*! Name Length = 8 for Authentication
91 */
92 wwn_t name; /*! Name. TODO - is this PWWN */
93};
94
95
96enum auth_hash_func{
97 FC_AUTH_HASH_FUNC_MD5 = 0x00000005,
98 FC_AUTH_HASH_FUNC_SHA_1 = 0x00000006,
99};
100
101enum auth_dh_gid{
102 FC_AUTH_DH_GID_0_DHG_NULL = 0x00000000,
103 FC_AUTH_DH_GID_1_DHG_1024 = 0x00000001,
104 FC_AUTH_DH_GID_2_DHG_1280 = 0x00000002,
105 FC_AUTH_DH_GID_3_DHG_1536 = 0x00000003,
106 FC_AUTH_DH_GID_4_DHG_2048 = 0x00000004,
107 FC_AUTH_DH_GID_6_DHG_3072 = 0x00000006,
108 FC_AUTH_DH_GID_7_DHG_4096 = 0x00000007,
109 FC_AUTH_DH_GID_8_DHG_6144 = 0x00000008,
110 FC_AUTH_DH_GID_9_DHG_8192 = 0x00000009,
111};
112
113struct auth_els_msg_s {
114 u8 auth_els_code; /* Authentication ELS Code (0x90) */
115 u8 auth_els_flag; /* Authentication ELS Flags */
116 u8 auth_msg_code; /* Authentication Message Code */
117 u8 proto_version; /* Protocol Version */
118 u32 msg_len; /* Message Length */
119 u32 trans_id; /* Transaction Identifier (T_ID) */
120
121 /* Msg payload follows... */
122};
123
124
125enum auth_neg_param_tags {
126 FC_AUTH_NEG_DHCHAP_HASHLIST = 0x0001,
127 FC_AUTH_NEG_DHCHAP_DHG_ID_LIST = 0x0002,
128};
129
130
131struct dhchap_param_format_s {
132 u16 tag; /*! Parameter Tag. See
133 * auth_neg_param_tags_t
134 */
135 u16 word_cnt;
136
137 /* followed by variable length parameter value... */
138};
139
140struct auth_proto_params_s {
141 u32 proto_param_len;
142 u32 proto_id;
143
144 /*
145 * Followed by variable length Protocol specific parameters. DH-CHAP
146 * uses dhchap_param_format_t
147 */
148};
149
150struct auth_neg_msg_s {
151 struct auth_name_s auth_ini_name;
152 u32 usable_auth_protos;
153 struct auth_proto_params_s proto_params[1]; /*! (1..usable_auth_proto)
154 * protocol params
155 */
156};
157
158struct auth_dh_val_s {
159 u32 dh_val_len;
160 u32 dh_val[1];
161};
162
163struct auth_dhchap_chal_msg_s {
164 struct auth_els_msg_s hdr;
165 struct auth_name_s auth_responder_name; /* TODO VRK - is auth_name_t
166 * type OK?
167 */
168 u32 hash_id;
169 u32 dh_grp_id;
170 u32 chal_val_len;
171 char chal_val[1];
172
173 /* ...followed by variable Challenge length/value and DH length/value */
174};
175
176
177enum auth_rjt_codes {
178 FC_AUTH_RJT_CODE_AUTH_FAILURE = 0x01,
179 FC_AUTH_RJT_CODE_LOGICAL_ERR = 0x02,
180};
181
182enum auth_rjt_code_exps {
183 FC_AUTH_CEXP_AUTH_MECH_NOT_USABLE = 0x01,
184 FC_AUTH_CEXP_DH_GROUP_NOT_USABLE = 0x02,
185 FC_AUTH_CEXP_HASH_FUNC_NOT_USABLE = 0x03,
186 FC_AUTH_CEXP_AUTH_XACT_STARTED = 0x04,
187 FC_AUTH_CEXP_AUTH_FAILED = 0x05,
188 FC_AUTH_CEXP_INCORRECT_PLD = 0x06,
189 FC_AUTH_CEXP_INCORRECT_PROTO_MSG = 0x07,
190 FC_AUTH_CEXP_RESTART_AUTH_PROTO = 0x08,
191 FC_AUTH_CEXP_AUTH_CONCAT_NOT_SUPP = 0x09,
192 FC_AUTH_CEXP_PROTO_VER_NOT_SUPP = 0x0A,
193};
194
195enum auth_status {
196 FC_AUTH_STATE_INPROGRESS = 0, /*! authentication in progress */
197 FC_AUTH_STATE_FAILED = 1, /*! authentication failed */
198 FC_AUTH_STATE_SUCCESS = 2 /*! authentication successful */
199};
200
201struct auth_rjt_msg_s {
202 struct auth_els_msg_s hdr;
203 u8 reason_code;
204 u8 reason_code_exp;
205 u8 rsvd[2];
206};
207
208
209struct auth_dhchap_neg_msg_s {
210 struct auth_els_msg_s hdr;
211 struct auth_neg_msg_s nego;
212};
213
214struct auth_dhchap_reply_msg_s {
215 struct auth_els_msg_s hdr;
216
217 /*
218 * followed by response value length & Value + DH Value Length & Value
219 */
220};
221
222#pragma pack()
223
224#endif /* __FC_SP_H__ */
diff --git a/drivers/scsi/bfa/include/protocol/fcp.h b/drivers/scsi/bfa/include/protocol/fcp.h
deleted file mode 100644
index 74ea63ce84b7..000000000000
--- a/drivers/scsi/bfa/include/protocol/fcp.h
+++ /dev/null
@@ -1,184 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __FCPPROTO_H__
19#define __FCPPROTO_H__
20
21#include <linux/bitops.h>
22#include <protocol/scsi.h>
23
24#pragma pack(1)
25
26enum {
27 FCP_RJT = 0x01000000, /* SRR reject */
28 FCP_SRR_ACCEPT = 0x02000000, /* SRR accept */
29 FCP_SRR = 0x14000000, /* Sequence Retransmission Request */
30};
31
32/*
33 * SRR FC-4 LS payload
34 */
35struct fc_srr_s{
36 u32 ls_cmd;
37 u32 ox_id:16; /* ox-id */
38 u32 rx_id:16; /* rx-id */
39 u32 ro; /* relative offset */
40 u32 r_ctl:8; /* R_CTL for I.U. */
41 u32 res:24;
42};
43
44
45/*
46 * FCP_CMND definitions
47 */
48#define FCP_CMND_CDB_LEN 16
49#define FCP_CMND_LUN_LEN 8
50
51struct fcp_cmnd_s{
52 lun_t lun; /* 64-bit LU number */
53 u8 crn; /* command reference number */
54#ifdef __BIGENDIAN
55 u8 resvd:1,
56 priority:4, /* FCP-3: SAM-3 priority */
57 taskattr:3; /* scsi task attribute */
58#else
59 u8 taskattr:3, /* scsi task attribute */
60 priority:4, /* FCP-3: SAM-3 priority */
61 resvd:1;
62#endif
63 u8 tm_flags; /* task management flags */
64#ifdef __BIGENDIAN
65 u8 addl_cdb_len:6, /* additional CDB length words */
66 iodir:2; /* read/write FCP_DATA IUs */
67#else
68 u8 iodir:2, /* read/write FCP_DATA IUs */
69 addl_cdb_len:6; /* additional CDB length */
70#endif
71 struct scsi_cdb_s cdb;
72
73 /*
74 * !!! additional cdb bytes follows here!!!
75 */
76 u32 fcp_dl; /* bytes to be transferred */
77};
78
79#define fcp_cmnd_cdb_len(_cmnd) ((_cmnd)->addl_cdb_len * 4 + FCP_CMND_CDB_LEN)
80#define fcp_cmnd_fcpdl(_cmnd) ((&(_cmnd)->fcp_dl)[(_cmnd)->addl_cdb_len])
81
82/*
83 * fcp_cmnd_t.iodir field values
84 */
85enum fcp_iodir{
86 FCP_IODIR_NONE = 0,
87 FCP_IODIR_WRITE = 1,
88 FCP_IODIR_READ = 2,
89 FCP_IODIR_RW = 3,
90};
91
92/*
93 * Task attribute field
94 */
95enum {
96 FCP_TASK_ATTR_SIMPLE = 0,
97 FCP_TASK_ATTR_HOQ = 1,
98 FCP_TASK_ATTR_ORDERED = 2,
99 FCP_TASK_ATTR_ACA = 4,
100 FCP_TASK_ATTR_UNTAGGED = 5, /* obsolete in FCP-3 */
101};
102
103/*
104 * Task management flags field - only one bit shall be set
105 */
106enum fcp_tm_cmnd{
107 FCP_TM_ABORT_TASK_SET = BIT(1),
108 FCP_TM_CLEAR_TASK_SET = BIT(2),
109 FCP_TM_LUN_RESET = BIT(4),
110 FCP_TM_TARGET_RESET = BIT(5), /* obsolete in FCP-3 */
111 FCP_TM_CLEAR_ACA = BIT(6),
112};
113
114/*
115 * FCP_XFER_RDY IU defines
116 */
117struct fcp_xfer_rdy_s{
118 u32 data_ro;
119 u32 burst_len;
120 u32 reserved;
121};
122
123/*
124 * FCP_RSP residue flags
125 */
126enum fcp_residue{
127 FCP_NO_RESIDUE = 0, /* no residue */
128 FCP_RESID_OVER = 1, /* more data left that was not sent */
129 FCP_RESID_UNDER = 2, /* less data than requested */
130};
131
132enum {
133 FCP_RSPINFO_GOOD = 0,
134 FCP_RSPINFO_DATALEN_MISMATCH = 1,
135 FCP_RSPINFO_CMND_INVALID = 2,
136 FCP_RSPINFO_ROLEN_MISMATCH = 3,
137 FCP_RSPINFO_TM_NOT_SUPP = 4,
138 FCP_RSPINFO_TM_FAILED = 5,
139};
140
141struct fcp_rspinfo_s{
142 u32 res0:24;
143 u32 rsp_code:8; /* response code (as above) */
144 u32 res1;
145};
146
147struct fcp_resp_s{
148 u32 reserved[2]; /* 2 words reserved */
149 u16 reserved2;
150#ifdef __BIGENDIAN
151 u8 reserved3:3;
152 u8 fcp_conf_req:1; /* FCP_CONF is requested */
153 u8 resid_flags:2; /* underflow/overflow */
154 u8 sns_len_valid:1;/* sense len is valid */
155 u8 rsp_len_valid:1;/* response len is valid */
156#else
157 u8 rsp_len_valid:1;/* response len is valid */
158 u8 sns_len_valid:1;/* sense len is valid */
159 u8 resid_flags:2; /* underflow/overflow */
160 u8 fcp_conf_req:1; /* FCP_CONF is requested */
161 u8 reserved3:3;
162#endif
163 u8 scsi_status; /* one byte SCSI status */
164 u32 residue; /* residual data bytes */
165 u32 sns_len; /* length od sense info */
166 u32 rsp_len; /* length of response info */
167};
168
169#define fcp_snslen(__fcprsp) ((__fcprsp)->sns_len_valid ? \
170 (__fcprsp)->sns_len : 0)
171#define fcp_rsplen(__fcprsp) ((__fcprsp)->rsp_len_valid ? \
172 (__fcprsp)->rsp_len : 0)
173#define fcp_rspinfo(__fcprsp) ((struct fcp_rspinfo_s *)((__fcprsp) + 1))
174#define fcp_snsinfo(__fcprsp) (((u8 *)fcp_rspinfo(__fcprsp)) + \
175 fcp_rsplen(__fcprsp))
176
177struct fcp_cmnd_fr_s{
178 struct fchs_s fchs;
179 struct fcp_cmnd_s fcp;
180};
181
182#pragma pack()
183
184#endif
diff --git a/drivers/scsi/bfa/include/protocol/fdmi.h b/drivers/scsi/bfa/include/protocol/fdmi.h
deleted file mode 100644
index 6c05c268c71b..000000000000
--- a/drivers/scsi/bfa/include/protocol/fdmi.h
+++ /dev/null
@@ -1,163 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __FDMI_H__
19#define __FDMI_H__
20
21#include <protocol/types.h>
22#include <protocol/fc.h>
23#include <protocol/ct.h>
24
25#pragma pack(1)
26
27/*
28 * FDMI Command Codes
29 */
30#define FDMI_GRHL 0x0100
31#define FDMI_GHAT 0x0101
32#define FDMI_GRPL 0x0102
33#define FDMI_GPAT 0x0110
34#define FDMI_RHBA 0x0200
35#define FDMI_RHAT 0x0201
36#define FDMI_RPRT 0x0210
37#define FDMI_RPA 0x0211
38#define FDMI_DHBA 0x0300
39#define FDMI_DPRT 0x0310
40
41/*
42 * FDMI reason codes
43 */
44#define FDMI_NO_ADDITIONAL_EXP 0x00
45#define FDMI_HBA_ALREADY_REG 0x10
46#define FDMI_HBA_ATTRIB_NOT_REG 0x11
47#define FDMI_HBA_ATTRIB_MULTIPLE 0x12
48#define FDMI_HBA_ATTRIB_LENGTH_INVALID 0x13
49#define FDMI_HBA_ATTRIB_NOT_PRESENT 0x14
50#define FDMI_PORT_ORIG_NOT_IN_LIST 0x15
51#define FDMI_PORT_HBA_NOT_IN_LIST 0x16
52#define FDMI_PORT_ATTRIB_NOT_REG 0x20
53#define FDMI_PORT_NOT_REG 0x21
54#define FDMI_PORT_ATTRIB_MULTIPLE 0x22
55#define FDMI_PORT_ATTRIB_LENGTH_INVALID 0x23
56#define FDMI_PORT_ALREADY_REGISTEREED 0x24
57
58/*
59 * FDMI Transmission Speed Mask values
60 */
61#define FDMI_TRANS_SPEED_1G 0x00000001
62#define FDMI_TRANS_SPEED_2G 0x00000002
63#define FDMI_TRANS_SPEED_10G 0x00000004
64#define FDMI_TRANS_SPEED_4G 0x00000008
65#define FDMI_TRANS_SPEED_8G 0x00000010
66#define FDMI_TRANS_SPEED_16G 0x00000020
67#define FDMI_TRANS_SPEED_UNKNOWN 0x00008000
68
69/*
70 * FDMI HBA attribute types
71 */
72enum fdmi_hba_attribute_type {
73 FDMI_HBA_ATTRIB_NODENAME = 1, /* 0x0001 */
74 FDMI_HBA_ATTRIB_MANUFACTURER, /* 0x0002 */
75 FDMI_HBA_ATTRIB_SERIALNUM, /* 0x0003 */
76 FDMI_HBA_ATTRIB_MODEL, /* 0x0004 */
77 FDMI_HBA_ATTRIB_MODEL_DESC, /* 0x0005 */
78 FDMI_HBA_ATTRIB_HW_VERSION, /* 0x0006 */
79 FDMI_HBA_ATTRIB_DRIVER_VERSION, /* 0x0007 */
80 FDMI_HBA_ATTRIB_ROM_VERSION, /* 0x0008 */
81 FDMI_HBA_ATTRIB_FW_VERSION, /* 0x0009 */
82 FDMI_HBA_ATTRIB_OS_NAME, /* 0x000A */
83 FDMI_HBA_ATTRIB_MAX_CT, /* 0x000B */
84
85 FDMI_HBA_ATTRIB_MAX_TYPE
86};
87
88/*
89 * FDMI Port attribute types
90 */
91enum fdmi_port_attribute_type {
92 FDMI_PORT_ATTRIB_FC4_TYPES = 1, /* 0x0001 */
93 FDMI_PORT_ATTRIB_SUPP_SPEED, /* 0x0002 */
94 FDMI_PORT_ATTRIB_PORT_SPEED, /* 0x0003 */
95 FDMI_PORT_ATTRIB_FRAME_SIZE, /* 0x0004 */
96 FDMI_PORT_ATTRIB_DEV_NAME, /* 0x0005 */
97 FDMI_PORT_ATTRIB_HOST_NAME, /* 0x0006 */
98
99 FDMI_PORT_ATTR_MAX_TYPE
100};
101
102/*
103 * FDMI attribute
104 */
105struct fdmi_attr_s {
106 u16 type;
107 u16 len;
108 u8 value[1];
109};
110
111/*
112 * HBA Attribute Block
113 */
114struct fdmi_hba_attr_s {
115 u32 attr_count; /* # of attributes */
116 struct fdmi_attr_s hba_attr; /* n attributes */
117};
118
119/*
120 * Registered Port List
121 */
122struct fdmi_port_list_s {
123 u32 num_ports; /* number Of Port Entries */
124 wwn_t port_entry; /* one or more */
125};
126
127/*
128 * Port Attribute Block
129 */
130struct fdmi_port_attr_s {
131 u32 attr_count; /* # of attributes */
132 struct fdmi_attr_s port_attr; /* n attributes */
133};
134
135/*
136 * FDMI Register HBA Attributes
137 */
138struct fdmi_rhba_s {
139 wwn_t hba_id; /* HBA Identifier */
140 struct fdmi_port_list_s port_list; /* Registered Port List */
141 struct fdmi_hba_attr_s hba_attr_blk; /* HBA attribute block */
142};
143
144/*
145 * FDMI Register Port
146 */
147struct fdmi_rprt_s {
148 wwn_t hba_id; /* HBA Identifier */
149 wwn_t port_name; /* Port wwn */
150 struct fdmi_port_attr_s port_attr_blk; /* Port Attr Block */
151};
152
153/*
154 * FDMI Register Port Attributes
155 */
156struct fdmi_rpa_s {
157 wwn_t port_name; /* port wwn */
158 struct fdmi_port_attr_s port_attr_blk; /* Port Attr Block */
159};
160
161#pragma pack()
162
163#endif
diff --git a/drivers/scsi/bfa/include/protocol/scsi.h b/drivers/scsi/bfa/include/protocol/scsi.h
deleted file mode 100644
index b220e6b4f6e1..000000000000
--- a/drivers/scsi/bfa/include/protocol/scsi.h
+++ /dev/null
@@ -1,1648 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __SCSI_H__
19#define __SCSI_H__
20
21#include <protocol/types.h>
22
23#pragma pack(1)
24
25/*
26 * generic SCSI cdb definition
27 */
28#define SCSI_MAX_CDBLEN 16
29struct scsi_cdb_s{
30 u8 scsi_cdb[SCSI_MAX_CDBLEN];
31};
32
33/*
34 * scsi lun serial number definition
35 */
36#define SCSI_LUN_SN_LEN 32
37struct scsi_lun_sn_s{
38 u8 lun_sn[SCSI_LUN_SN_LEN];
39};
40
41/*
42 * SCSI Direct Access Commands
43 */
44enum {
45 SCSI_OP_TEST_UNIT_READY = 0x00,
46 SCSI_OP_REQUEST_SENSE = 0x03,
47 SCSI_OP_FORMAT_UNIT = 0x04,
48 SCSI_OP_READ6 = 0x08,
49 SCSI_OP_WRITE6 = 0x0A,
50 SCSI_OP_WRITE_FILEMARKS = 0x10,
51 SCSI_OP_INQUIRY = 0x12,
52 SCSI_OP_MODE_SELECT6 = 0x15,
53 SCSI_OP_RESERVE6 = 0x16,
54 SCSI_OP_RELEASE6 = 0x17,
55 SCSI_OP_MODE_SENSE6 = 0x1A,
56 SCSI_OP_START_STOP_UNIT = 0x1B,
57 SCSI_OP_SEND_DIAGNOSTIC = 0x1D,
58 SCSI_OP_READ_CAPACITY = 0x25,
59 SCSI_OP_READ10 = 0x28,
60 SCSI_OP_WRITE10 = 0x2A,
61 SCSI_OP_VERIFY10 = 0x2F,
62 SCSI_OP_READ_DEFECT_DATA = 0x37,
63 SCSI_OP_LOG_SELECT = 0x4C,
64 SCSI_OP_LOG_SENSE = 0x4D,
65 SCSI_OP_MODE_SELECT10 = 0x55,
66 SCSI_OP_RESERVE10 = 0x56,
67 SCSI_OP_RELEASE10 = 0x57,
68 SCSI_OP_MODE_SENSE10 = 0x5A,
69 SCSI_OP_PER_RESERVE_IN = 0x5E,
70 SCSI_OP_PER_RESERVE_OUR = 0x5E,
71 SCSI_OP_READ16 = 0x88,
72 SCSI_OP_WRITE16 = 0x8A,
73 SCSI_OP_VERIFY16 = 0x8F,
74 SCSI_OP_READ_CAPACITY16 = 0x9E,
75 SCSI_OP_REPORT_LUNS = 0xA0,
76 SCSI_OP_READ12 = 0xA8,
77 SCSI_OP_WRITE12 = 0xAA,
78 SCSI_OP_UNDEF = 0xFF,
79};
80
81/*
82 * SCSI START_STOP_UNIT command
83 */
84struct scsi_start_stop_unit_s{
85 u8 opcode;
86#ifdef __BIGENDIAN
87 u8 lun:3;
88 u8 reserved1:4;
89 u8 immed:1;
90#else
91 u8 immed:1;
92 u8 reserved1:4;
93 u8 lun:3;
94#endif
95 u8 reserved2;
96 u8 reserved3;
97#ifdef __BIGENDIAN
98 u8 power_conditions:4;
99 u8 reserved4:2;
100 u8 loEj:1;
101 u8 start:1;
102#else
103 u8 start:1;
104 u8 loEj:1;
105 u8 reserved4:2;
106 u8 power_conditions:4;
107#endif
108 u8 control;
109};
110
111/*
112 * SCSI SEND_DIAGNOSTIC command
113 */
114struct scsi_send_diagnostic_s{
115 u8 opcode;
116#ifdef __BIGENDIAN
117 u8 self_test_code:3;
118 u8 pf:1;
119 u8 reserved1:1;
120 u8 self_test:1;
121 u8 dev_offl:1;
122 u8 unit_offl:1;
123#else
124 u8 unit_offl:1;
125 u8 dev_offl:1;
126 u8 self_test:1;
127 u8 reserved1:1;
128 u8 pf:1;
129 u8 self_test_code:3;
130#endif
131 u8 reserved2;
132
133 u8 param_list_length[2]; /* MSB first */
134 u8 control;
135
136};
137
138/*
139 * SCSI READ10/WRITE10 commands
140 */
141struct scsi_rw10_s{
142 u8 opcode;
143#ifdef __BIGENDIAN
144 u8 lun:3;
145 u8 dpo:1; /* Disable Page Out */
146 u8 fua:1; /* Force Unit Access */
147 u8 reserved1:2;
148 u8 rel_adr:1; /* relative address */
149#else
150 u8 rel_adr:1;
151 u8 reserved1:2;
152 u8 fua:1;
153 u8 dpo:1;
154 u8 lun:3;
155#endif
156 u8 lba0; /* logical block address - MSB */
157 u8 lba1;
158 u8 lba2;
159 u8 lba3; /* LSB */
160 u8 reserved3;
161 u8 xfer_length0; /* transfer length in blocks - MSB */
162 u8 xfer_length1; /* LSB */
163 u8 control;
164};
165
166#define SCSI_CDB10_GET_LBA(cdb) \
167 (((cdb)->lba0 << 24) | ((cdb)->lba1 << 16) | \
168 ((cdb)->lba2 << 8) | (cdb)->lba3)
169
170#define SCSI_CDB10_SET_LBA(cdb, lba) { \
171 (cdb)->lba0 = lba >> 24; \
172 (cdb)->lba1 = (lba >> 16) & 0xFF; \
173 (cdb)->lba2 = (lba >> 8) & 0xFF; \
174 (cdb)->lba3 = lba & 0xFF; \
175}
176
177#define SCSI_CDB10_GET_TL(cdb) \
178 ((cdb)->xfer_length0 << 8 | (cdb)->xfer_length1)
179#define SCSI_CDB10_SET_TL(cdb, tl) { \
180 (cdb)->xfer_length0 = tl >> 8; \
181 (cdb)->xfer_length1 = tl & 0xFF; \
182}
183
184/*
185 * SCSI READ6/WRITE6 commands
186 */
187struct scsi_rw6_s{
188 u8 opcode;
189#ifdef __BIGENDIAN
190 u8 lun:3;
191 u8 lba0:5; /* MSb */
192#else
193 u8 lba0:5; /* MSb */
194 u8 lun:3;
195#endif
196 u8 lba1;
197 u8 lba2; /* LSB */
198 u8 xfer_length;
199 u8 control;
200};
201
202#define SCSI_TAPE_CDB6_GET_TL(cdb) \
203 (((cdb)->tl0 << 16) | ((cdb)->tl1 << 8) | (cdb)->tl2)
204
205#define SCSI_TAPE_CDB6_SET_TL(cdb, tl) { \
206 (cdb)->tl0 = tl >> 16; \
207 (cdb)->tl1 = (tl >> 8) & 0xFF; \
208 (cdb)->tl2 = tl & 0xFF; \
209}
210
211/*
212 * SCSI sequential (TAPE) wrtie command
213 */
214struct scsi_tape_wr_s{
215 u8 opcode;
216#ifdef __BIGENDIAN
217 u8 rsvd:7;
218 u8 fixed:1; /* MSb */
219#else
220 u8 fixed:1; /* MSb */
221 u8 rsvd:7;
222#endif
223 u8 tl0; /* Msb */
224 u8 tl1;
225 u8 tl2; /* Lsb */
226
227 u8 control;
228};
229
230#define SCSI_CDB6_GET_LBA(cdb) \
231 (((cdb)->lba0 << 16) | ((cdb)->lba1 << 8) | (cdb)->lba2)
232
233#define SCSI_CDB6_SET_LBA(cdb, lba) { \
234 (cdb)->lba0 = lba >> 16; \
235 (cdb)->lba1 = (lba >> 8) & 0xFF; \
236 (cdb)->lba2 = lba & 0xFF; \
237}
238
239#define SCSI_CDB6_GET_TL(cdb) ((cdb)->xfer_length)
240#define SCSI_CDB6_SET_TL(cdb, tl) { \
241 (cdb)->xfer_length = tl; \
242}
243
244/*
245 * SCSI sense data format
246 */
247struct scsi_sense_s{
248#ifdef __BIGENDIAN
249 u8 valid:1;
250 u8 rsp_code:7;
251#else
252 u8 rsp_code:7;
253 u8 valid:1;
254#endif
255 u8 seg_num;
256#ifdef __BIGENDIAN
257 u8 file_mark:1;
258 u8 eom:1; /* end of media */
259 u8 ili:1; /* incorrect length indicator */
260 u8 reserved:1;
261 u8 sense_key:4;
262#else
263 u8 sense_key:4;
264 u8 reserved:1;
265 u8 ili:1; /* incorrect length indicator */
266 u8 eom:1; /* end of media */
267 u8 file_mark:1;
268#endif
269 u8 information[4]; /* device-type or command specific info
270 */
271 u8 add_sense_length;
272 /* additional sense length */
273 u8 command_info[4];/* command specific information
274 */
275 u8 asc; /* additional sense code */
276 u8 ascq; /* additional sense code qualifier */
277 u8 fru_code; /* field replaceable unit code */
278#ifdef __BIGENDIAN
279 u8 sksv:1; /* sense key specific valid */
280 u8 c_d:1; /* command/data bit */
281 u8 res1:2;
282 u8 bpv:1; /* bit pointer valid */
283 u8 bpointer:3; /* bit pointer */
284#else
285 u8 bpointer:3; /* bit pointer */
286 u8 bpv:1; /* bit pointer valid */
287 u8 res1:2;
288 u8 c_d:1; /* command/data bit */
289 u8 sksv:1; /* sense key specific valid */
290#endif
291 u8 fpointer[2]; /* field pointer */
292};
293
294#define SCSI_SENSE_CUR_ERR 0x70
295#define SCSI_SENSE_DEF_ERR 0x71
296
297/*
298 * SCSI sense key values
299 */
300#define SCSI_SK_NO_SENSE 0x0
301#define SCSI_SK_REC_ERR 0x1 /* recovered error */
302#define SCSI_SK_NOT_READY 0x2
303#define SCSI_SK_MED_ERR 0x3 /* medium error */
304#define SCSI_SK_HW_ERR 0x4 /* hardware error */
305#define SCSI_SK_ILLEGAL_REQ 0x5
306#define SCSI_SK_UNIT_ATT 0x6 /* unit attention */
307#define SCSI_SK_DATA_PROTECT 0x7
308#define SCSI_SK_BLANK_CHECK 0x8
309#define SCSI_SK_VENDOR_SPEC 0x9
310#define SCSI_SK_COPY_ABORTED 0xA
311#define SCSI_SK_ABORTED_CMND 0xB
312#define SCSI_SK_VOL_OVERFLOW 0xD
313#define SCSI_SK_MISCOMPARE 0xE
314
315/*
316 * SCSI additional sense codes
317 */
318#define SCSI_ASC_NO_ADD_SENSE 0x00
319#define SCSI_ASC_LUN_NOT_READY 0x04
320#define SCSI_ASC_LUN_COMMUNICATION 0x08
321#define SCSI_ASC_WRITE_ERROR 0x0C
322#define SCSI_ASC_INVALID_CMND_CODE 0x20
323#define SCSI_ASC_BAD_LBA 0x21
324#define SCSI_ASC_INVALID_FIELD_IN_CDB 0x24
325#define SCSI_ASC_LUN_NOT_SUPPORTED 0x25
326#define SCSI_ASC_LUN_WRITE_PROTECT 0x27
327#define SCSI_ASC_POWERON_BDR 0x29 /* power on reset, bus reset,
328 * bus device reset
329 */
330#define SCSI_ASC_PARAMS_CHANGED 0x2A
331#define SCSI_ASC_CMND_CLEARED_BY_A_I 0x2F
332#define SCSI_ASC_SAVING_PARAM_NOTSUPP 0x39
333#define SCSI_ASC_TOCC 0x3F /* target operating condtions
334 * changed
335 */
336#define SCSI_ASC_PARITY_ERROR 0x47
337#define SCSI_ASC_CMND_PHASE_ERROR 0x4A
338#define SCSI_ASC_DATA_PHASE_ERROR 0x4B
339#define SCSI_ASC_VENDOR_SPEC 0x7F
340
341/*
342 * SCSI additional sense code qualifiers
343 */
344#define SCSI_ASCQ_CAUSE_NOT_REPORT 0x00
345#define SCSI_ASCQ_BECOMING_READY 0x01
346#define SCSI_ASCQ_INIT_CMD_REQ 0x02
347#define SCSI_ASCQ_FORMAT_IN_PROGRESS 0x04
348#define SCSI_ASCQ_OPERATION_IN_PROGRESS 0x07
349#define SCSI_ASCQ_SELF_TEST_IN_PROGRESS 0x09
350#define SCSI_ASCQ_WR_UNEXP_UNSOL_DATA 0x0C
351#define SCSI_ASCQ_WR_NOTENG_UNSOL_DATA 0x0D
352
353#define SCSI_ASCQ_LBA_OUT_OF_RANGE 0x00
354#define SCSI_ASCQ_INVALID_ELEMENT_ADDR 0x01
355
356#define SCSI_ASCQ_LUN_WRITE_PROTECTED 0x00
357#define SCSI_ASCQ_LUN_HW_WRITE_PROTECTED 0x01
358#define SCSI_ASCQ_LUN_SW_WRITE_PROTECTED 0x02
359
360#define SCSI_ASCQ_POR 0x01 /* power on reset */
361#define SCSI_ASCQ_SBR 0x02 /* scsi bus reset */
362#define SCSI_ASCQ_BDR 0x03 /* bus device reset */
363#define SCSI_ASCQ_DIR 0x04 /* device internal reset */
364
365#define SCSI_ASCQ_MODE_PARAMS_CHANGED 0x01
366#define SCSI_ASCQ_LOG_PARAMS_CHANGED 0x02
367#define SCSI_ASCQ_RESERVATIONS_PREEMPTED 0x03
368#define SCSI_ASCQ_RESERVATIONS_RELEASED 0x04
369#define SCSI_ASCQ_REGISTRATIONS_PREEMPTED 0x05
370
371#define SCSI_ASCQ_MICROCODE_CHANGED 0x01
372#define SCSI_ASCQ_CHANGED_OPER_COND 0x02
373#define SCSI_ASCQ_INQ_CHANGED 0x03 /* inquiry data changed */
374#define SCSI_ASCQ_DI_CHANGED 0x05 /* device id changed */
375#define SCSI_ASCQ_RL_DATA_CHANGED 0x0E /* report luns data changed */
376
377#define SCSI_ASCQ_DP_CRC_ERR 0x01 /* data phase crc error */
378#define SCSI_ASCQ_DP_SCSI_PARITY_ERR 0x02 /* data phase scsi parity error
379 */
380#define SCSI_ASCQ_IU_CRC_ERR 0x03 /* information unit crc error */
381#define SCSI_ASCQ_PROTO_SERV_CRC_ERR 0x05
382
383#define SCSI_ASCQ_LUN_TIME_OUT 0x01
384
385/* ------------------------------------------------------------
386 * SCSI INQUIRY
387 * ------------------------------------------------------------*/
388
389struct scsi_inquiry_s{
390 u8 opcode;
391#ifdef __BIGENDIAN
392 u8 lun:3;
393 u8 reserved1:3;
394 u8 cmd_dt:1;
395 u8 evpd:1;
396#else
397 u8 evpd:1;
398 u8 cmd_dt:1;
399 u8 reserved1:3;
400 u8 lun:3;
401#endif
402 u8 page_code;
403 u8 reserved2;
404 u8 alloc_length;
405 u8 control;
406};
407
408struct scsi_inquiry_vendor_s{
409 u8 vendor_id[8];
410};
411
412struct scsi_inquiry_prodid_s{
413 u8 product_id[16];
414};
415
416struct scsi_inquiry_prodrev_s{
417 u8 product_rev[4];
418};
419
420struct scsi_inquiry_data_s{
421#ifdef __BIGENDIAN
422 u8 peripheral_qual:3; /* peripheral qualifier */
423 u8 device_type:5; /* peripheral device type */
424
425 u8 rmb:1; /* removable medium bit */
426 u8 device_type_mod:7; /* device type modifier */
427
428 u8 version;
429
430 u8 aenc:1; /* async event notification capability
431 */
432 u8 trm_iop:1; /* terminate I/O process */
433 u8 norm_aca:1; /* normal ACA supported */
434 u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
435 u8 rsp_data_format:4;
436
437 u8 additional_len;
438 u8 sccs:1;
439 u8 reserved1:7;
440
441 u8 reserved2:1;
442 u8 enc_serv:1; /* enclosure service component */
443 u8 reserved3:1;
444 u8 multi_port:1; /* multi-port device */
445 u8 m_chngr:1; /* device in medium transport element */
446 u8 ack_req_q:1; /* SIP specific bit */
447 u8 addr32:1; /* SIP specific bit */
448 u8 addr16:1; /* SIP specific bit */
449
450 u8 rel_adr:1; /* relative address */
451 u8 w_bus32:1;
452 u8 w_bus16:1;
453 u8 synchronous:1;
454 u8 linked_commands:1;
455 u8 trans_dis:1;
456 u8 cmd_queue:1; /* command queueing supported */
457 u8 soft_reset:1; /* soft reset alternative (VS) */
458#else
459 u8 device_type:5; /* peripheral device type */
460 u8 peripheral_qual:3;
461 /* peripheral qualifier */
462
463 u8 device_type_mod:7;
464 /* device type modifier */
465 u8 rmb:1; /* removable medium bit */
466
467 u8 version;
468
469 u8 rsp_data_format:4;
470 u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
471 u8 norm_aca:1; /* normal ACA supported */
472 u8 terminate_iop:1;/* terminate I/O process */
473 u8 aenc:1; /* async event notification capability
474 */
475
476 u8 additional_len;
477 u8 reserved1:7;
478 u8 sccs:1;
479
480 u8 addr16:1; /* SIP specific bit */
481 u8 addr32:1; /* SIP specific bit */
482 u8 ack_req_q:1; /* SIP specific bit */
483 u8 m_chngr:1; /* device in medium transport element */
484 u8 multi_port:1; /* multi-port device */
485 u8 reserved3:1; /* TBD - Vendor Specific */
486 u8 enc_serv:1; /* enclosure service component */
487 u8 reserved2:1;
488
489 u8 soft_seset:1; /* soft reset alternative (VS) */
490 u8 cmd_queue:1; /* command queueing supported */
491 u8 trans_dis:1;
492 u8 linked_commands:1;
493 u8 synchronous:1;
494 u8 w_bus16:1;
495 u8 w_bus32:1;
496 u8 rel_adr:1; /* relative address */
497#endif
498 struct scsi_inquiry_vendor_s vendor_id;
499 struct scsi_inquiry_prodid_s product_id;
500 struct scsi_inquiry_prodrev_s product_rev;
501 u8 vendor_specific[20];
502 u8 reserved4[40];
503};
504
505/*
506 * inquiry.peripheral_qual field values
507 */
508#define SCSI_DEVQUAL_DEFAULT 0
509#define SCSI_DEVQUAL_NOT_CONNECTED 1
510#define SCSI_DEVQUAL_NOT_SUPPORTED 3
511
512/*
513 * inquiry.device_type field values
514 */
515#define SCSI_DEVICE_DIRECT_ACCESS 0x00
516#define SCSI_DEVICE_SEQ_ACCESS 0x01
517#define SCSI_DEVICE_ARRAY_CONTROLLER 0x0C
518#define SCSI_DEVICE_UNKNOWN 0x1F
519
520/*
521 * inquiry.version
522 */
523#define SCSI_VERSION_ANSI_X3131 2 /* ANSI X3.131 SCSI-2 */
524#define SCSI_VERSION_SPC 3 /* SPC (SCSI-3), ANSI X3.301:1997 */
525#define SCSI_VERSION_SPC_2 4 /* SPC-2 */
526
527/*
528 * response data format
529 */
530#define SCSI_RSP_DATA_FORMAT 2 /* SCSI-2 & SPC */
531
532/*
533 * SCSI inquiry page codes
534 */
535#define SCSI_INQ_PAGE_VPD_PAGES 0x00 /* supported vpd pages */
536#define SCSI_INQ_PAGE_USN_PAGE 0x80 /* unit serial number page */
537#define SCSI_INQ_PAGE_DEV_IDENT 0x83 /* device indentification page
538 */
539#define SCSI_INQ_PAGES_MAX 3
540
541/*
542 * supported vital product data pages
543 */
544struct scsi_inq_page_vpd_pages_s{
545#ifdef __BIGENDIAN
546 u8 peripheral_qual:3;
547 u8 device_type:5;
548#else
549 u8 device_type:5;
550 u8 peripheral_qual:3;
551#endif
552 u8 page_code;
553 u8 reserved;
554 u8 page_length;
555 u8 pages[SCSI_INQ_PAGES_MAX];
556};
557
558/*
559 * Unit serial number page
560 */
561#define SCSI_INQ_USN_LEN 32
562
563struct scsi_inq_usn_s{
564 char usn[SCSI_INQ_USN_LEN];
565};
566
567struct scsi_inq_page_usn_s{
568#ifdef __BIGENDIAN
569 u8 peripheral_qual:3;
570 u8 device_type:5;
571#else
572 u8 device_type:5;
573 u8 peripheral_qual:3;
574#endif
575 u8 page_code;
576 u8 reserved1;
577 u8 page_length;
578 struct scsi_inq_usn_s usn;
579};
580
581enum {
582 SCSI_INQ_DIP_CODE_BINARY = 1, /* identifier has binary value */
583 SCSI_INQ_DIP_CODE_ASCII = 2, /* identifier has ascii value */
584};
585
586enum {
587 SCSI_INQ_DIP_ASSOC_LUN = 0, /* id is associated with device */
588 SCSI_INQ_DIP_ASSOC_PORT = 1, /* id is associated with port that
589 * received the request
590 */
591};
592
593enum {
594 SCSI_INQ_ID_TYPE_VENDOR = 1,
595 SCSI_INQ_ID_TYPE_IEEE = 2,
596 SCSI_INQ_ID_TYPE_FC_FS = 3,
597 SCSI_INQ_ID_TYPE_OTHER = 4,
598};
599
600struct scsi_inq_dip_desc_s{
601#ifdef __BIGENDIAN
602 u8 res0:4;
603 u8 code_set:4;
604 u8 res1:2;
605 u8 association:2;
606 u8 id_type:4;
607#else
608 u8 code_set:4;
609 u8 res0:4;
610 u8 id_type:4;
611 u8 association:2;
612 u8 res1:2;
613#endif
614 u8 res2;
615 u8 id_len;
616 struct scsi_lun_sn_s id;
617};
618
619/*
620 * Device indentification page
621 */
622struct scsi_inq_page_dev_ident_s{
623#ifdef __BIGENDIAN
624 u8 peripheral_qual:3;
625 u8 device_type:5;
626#else
627 u8 device_type:5;
628 u8 peripheral_qual:3;
629#endif
630 u8 page_code;
631 u8 reserved1;
632 u8 page_length;
633 struct scsi_inq_dip_desc_s desc;
634};
635
636/* ------------------------------------------------------------
637 * READ CAPACITY
638 * ------------------------------------------------------------
639 */
640
641struct scsi_read_capacity_s{
642 u8 opcode;
643#ifdef __BIGENDIAN
644 u8 lun:3;
645 u8 reserved1:4;
646 u8 rel_adr:1;
647#else
648 u8 rel_adr:1;
649 u8 reserved1:4;
650 u8 lun:3;
651#endif
652 u8 lba0; /* MSB */
653 u8 lba1;
654 u8 lba2;
655 u8 lba3; /* LSB */
656 u8 reserved2;
657 u8 reserved3;
658#ifdef __BIGENDIAN
659 u8 reserved4:7;
660 u8 pmi:1; /* partial medium indicator */
661#else
662 u8 pmi:1; /* partial medium indicator */
663 u8 reserved4:7;
664#endif
665 u8 control;
666};
667
668struct scsi_read_capacity_data_s{
669 u32 max_lba; /* maximum LBA available */
670 u32 block_length; /* in bytes */
671};
672
673struct scsi_read_capacity16_data_s{
674 u64 lba; /* maximum LBA available */
675 u32 block_length; /* in bytes */
676#ifdef __BIGENDIAN
677 u8 reserved1:4,
678 p_type:3,
679 prot_en:1;
680 u8 reserved2:4,
681 lb_pbe:4; /* logical blocks per physical block
682 * exponent */
683 u16 reserved3:2,
684 lba_align:14; /* lowest aligned logical block
685 * address */
686#else
687 u16 lba_align:14, /* lowest aligned logical block
688 * address */
689 reserved3:2;
690 u8 lb_pbe:4, /* logical blocks per physical block
691 * exponent */
692 reserved2:4;
693 u8 prot_en:1,
694 p_type:3,
695 reserved1:4;
696#endif
697 u64 reserved4;
698 u64 reserved5;
699};
700
701/* ------------------------------------------------------------
702 * REPORT LUNS command
703 * ------------------------------------------------------------
704 */
705
706struct scsi_report_luns_s{
707 u8 opcode; /* A0h - REPORT LUNS opCode */
708 u8 reserved1[5];
709 u8 alloc_length[4];/* allocation length MSB first */
710 u8 reserved2;
711 u8 control;
712};
713
714#define SCSI_REPORT_LUN_ALLOC_LENGTH(rl) \
715 ((rl->alloc_length[0] << 24) | (rl->alloc_length[1] << 16) | \
716 (rl->alloc_length[2] << 8) | (rl->alloc_length[3]))
717
718#define SCSI_REPORT_LUNS_SET_ALLOCLEN(rl, alloc_len) { \
719 (rl)->alloc_length[0] = (alloc_len) >> 24; \
720 (rl)->alloc_length[1] = ((alloc_len) >> 16) & 0xFF; \
721 (rl)->alloc_length[2] = ((alloc_len) >> 8) & 0xFF; \
722 (rl)->alloc_length[3] = (alloc_len) & 0xFF; \
723}
724
725struct scsi_report_luns_data_s{
726 u32 lun_list_length; /* length of LUN list length */
727 u32 reserved;
728 lun_t lun[1]; /* first LUN in lun list */
729};
730
731/* -------------------------------------------------------------
732 * SCSI mode parameters
733 * -----------------------------------------------------------
734 */
735enum {
736 SCSI_DA_MEDIUM_DEF = 0, /* direct access default medium type */
737 SCSI_DA_MEDIUM_SS = 1, /* direct access single sided */
738 SCSI_DA_MEDIUM_DS = 2, /* direct access double sided */
739};
740
741/*
742 * SCSI Mode Select(6) cdb
743 */
744struct scsi_mode_select6_s{
745 u8 opcode;
746#ifdef __BIGENDIAN
747 u8 reserved1:3;
748 u8 pf:1; /* page format */
749 u8 reserved2:3;
750 u8 sp:1; /* save pages if set to 1 */
751#else
752 u8 sp:1; /* save pages if set to 1 */
753 u8 reserved2:3;
754 u8 pf:1; /* page format */
755 u8 reserved1:3;
756#endif
757 u8 reserved3[2];
758 u8 alloc_len;
759 u8 control;
760};
761
762/*
763 * SCSI Mode Select(10) cdb
764 */
765struct scsi_mode_select10_s{
766 u8 opcode;
767#ifdef __BIGENDIAN
768 u8 reserved1:3;
769 u8 pf:1; /* page format */
770 u8 reserved2:3;
771 u8 sp:1; /* save pages if set to 1 */
772#else
773 u8 sp:1; /* save pages if set to 1 */
774 u8 reserved2:3;
775 u8 pf:1; /* page format */
776 u8 reserved1:3;
777#endif
778 u8 reserved3[5];
779 u8 alloc_len_msb;
780 u8 alloc_len_lsb;
781 u8 control;
782};
783
784/*
785 * SCSI Mode Sense(6) cdb
786 */
787struct scsi_mode_sense6_s{
788 u8 opcode;
789#ifdef __BIGENDIAN
790 u8 reserved1:4;
791 u8 dbd:1; /* disable block discriptors if set to 1 */
792 u8 reserved2:3;
793
794 u8 pc:2; /* page control */
795 u8 page_code:6;
796#else
797 u8 reserved2:3;
798 u8 dbd:1; /* disable block descriptors if set to 1 */
799 u8 reserved1:4;
800
801 u8 page_code:6;
802 u8 pc:2; /* page control */
803#endif
804 u8 reserved3;
805 u8 alloc_len;
806 u8 control;
807};
808
809/*
810 * SCSI Mode Sense(10) cdb
811 */
812struct scsi_mode_sense10_s{
813 u8 opcode;
814#ifdef __BIGENDIAN
815 u8 reserved1:3;
816 u8 LLBAA:1; /* long LBA accepted if set to 1 */
817 u8 dbd:1; /* disable block descriptors if set
818 * to 1
819 */
820 u8 reserved2:3;
821
822 u8 pc:2; /* page control */
823 u8 page_code:6;
824#else
825 u8 reserved2:3;
826 u8 dbd:1; /* disable block descriptors if set to
827 * 1
828 */
829 u8 LLBAA:1; /* long LBA accepted if set to 1 */
830 u8 reserved1:3;
831
832 u8 page_code:6;
833 u8 pc:2; /* page control */
834#endif
835 u8 reserved3[4];
836 u8 alloc_len_msb;
837 u8 alloc_len_lsb;
838 u8 control;
839};
840
841#define SCSI_CDB10_GET_AL(cdb) \
842 ((cdb)->alloc_len_msb << 8 | (cdb)->alloc_len_lsb)
843
844#define SCSI_CDB10_SET_AL(cdb, al) { \
845 (cdb)->alloc_len_msb = al >> 8; \
846 (cdb)->alloc_len_lsb = al & 0xFF; \
847}
848
849#define SCSI_CDB6_GET_AL(cdb) ((cdb)->alloc_len)
850
851#define SCSI_CDB6_SET_AL(cdb, al) { \
852 (cdb)->alloc_len = al; \
853}
854
855/*
856 * page control field values
857 */
858#define SCSI_PC_CURRENT_VALUES 0x0
859#define SCSI_PC_CHANGEABLE_VALUES 0x1
860#define SCSI_PC_DEFAULT_VALUES 0x2
861#define SCSI_PC_SAVED_VALUES 0x3
862
863/*
864 * SCSI mode page codes
865 */
866#define SCSI_MP_VENDOR_SPEC 0x00
867#define SCSI_MP_DISC_RECN 0x02 /* disconnect-reconnect page */
868#define SCSI_MP_FORMAT_DEVICE 0x03
869#define SCSI_MP_RDG 0x04 /* rigid disk geometry page */
870#define SCSI_MP_FDP 0x05 /* flexible disk page */
871#define SCSI_MP_CACHING 0x08 /* caching page */
872#define SCSI_MP_CONTROL 0x0A /* control mode page */
873#define SCSI_MP_MED_TYPES_SUP 0x0B /* medium types supported page */
874#define SCSI_MP_INFO_EXCP_CNTL 0x1C /* informational exception control */
875#define SCSI_MP_ALL 0x3F /* return all pages - mode sense only */
876
877/*
878 * mode parameter header
879 */
880struct scsi_mode_param_header6_s{
881 u8 mode_datalen;
882 u8 medium_type;
883
884 /*
885 * device specific parameters expanded for direct access devices
886 */
887#ifdef __BIGENDIAN
888 u32 wp:1; /* write protected */
889 u32 reserved1:2;
890 u32 dpofua:1; /* disable page out + force unit access
891 */
892 u32 reserved2:4;
893#else
894 u32 reserved2:4;
895 u32 dpofua:1; /* disable page out + force unit access
896 */
897 u32 reserved1:2;
898 u32 wp:1; /* write protected */
899#endif
900
901 u8 block_desclen;
902};
903
904struct scsi_mode_param_header10_s{
905 u32 mode_datalen:16;
906 u32 medium_type:8;
907
908 /*
909 * device specific parameters expanded for direct access devices
910 */
911#ifdef __BIGENDIAN
912 u32 wp:1; /* write protected */
913 u32 reserved1:2;
914 u32 dpofua:1; /* disable page out + force unit access
915 */
916 u32 reserved2:4;
917#else
918 u32 reserved2:4;
919 u32 dpofua:1; /* disable page out + force unit access
920 */
921 u32 reserved1:2;
922 u32 wp:1; /* write protected */
923#endif
924
925#ifdef __BIGENDIAN
926 u32 reserved3:7;
927 u32 longlba:1;
928#else
929 u32 longlba:1;
930 u32 reserved3:7;
931#endif
932 u32 reserved4:8;
933 u32 block_desclen:16;
934};
935
936/*
937 * mode parameter block descriptor
938 */
939struct scsi_mode_param_desc_s{
940 u32 nblks;
941 u32 density_code:8;
942 u32 block_length:24;
943};
944
945/*
946 * Disconnect-reconnect mode page format
947 */
948struct scsi_mp_disc_recn_s{
949#ifdef __BIGENDIAN
950 u8 ps:1;
951 u8 reserved1:1;
952 u8 page_code:6;
953#else
954 u8 page_code:6;
955 u8 reserved1:1;
956 u8 ps:1;
957#endif
958 u8 page_len;
959 u8 buf_full_ratio;
960 u8 buf_empty_ratio;
961
962 u8 bil_msb; /* bus inactivity limit -MSB */
963 u8 bil_lsb; /* bus inactivity limit -LSB */
964
965 u8 dtl_msb; /* disconnect time limit - MSB */
966 u8 dtl_lsb; /* disconnect time limit - LSB */
967
968 u8 ctl_msb; /* connect time limit - MSB */
969 u8 ctl_lsb; /* connect time limit - LSB */
970
971 u8 max_burst_len_msb;
972 u8 max_burst_len_lsb;
973#ifdef __BIGENDIAN
974 u8 emdp:1; /* enable modify data pointers */
975 u8 fa:3; /* fair arbitration */
976 u8 dimm:1; /* disconnect immediate */
977 u8 dtdc:3; /* data transfer disconnect control */
978#else
979 u8 dtdc:3; /* data transfer disconnect control */
980 u8 dimm:1; /* disconnect immediate */
981 u8 fa:3; /* fair arbitration */
982 u8 emdp:1; /* enable modify data pointers */
983#endif
984
985 u8 reserved3;
986
987 u8 first_burst_len_msb;
988 u8 first_burst_len_lsb;
989};
990
991/*
992 * SCSI format device mode page
993 */
994struct scsi_mp_format_device_s{
995#ifdef __BIGENDIAN
996 u32 ps:1;
997 u32 reserved1:1;
998 u32 page_code:6;
999#else
1000 u32 page_code:6;
1001 u32 reserved1:1;
1002 u32 ps:1;
1003#endif
1004 u32 page_len:8;
1005 u32 tracks_per_zone:16;
1006
1007 u32 a_sec_per_zone:16;
1008 u32 a_tracks_per_zone:16;
1009
1010 u32 a_tracks_per_lun:16; /* alternate tracks/lun-MSB */
1011 u32 sec_per_track:16; /* sectors/track-MSB */
1012
1013 u32 bytes_per_sector:16;
1014 u32 interleave:16;
1015
1016 u32 tsf:16; /* track skew factor-MSB */
1017 u32 csf:16; /* cylinder skew factor-MSB */
1018
1019#ifdef __BIGENDIAN
1020 u32 ssec:1; /* soft sector formatting */
1021 u32 hsec:1; /* hard sector formatting */
1022 u32 rmb:1; /* removable media */
1023 u32 surf:1; /* surface */
1024 u32 reserved2:4;
1025#else
1026 u32 reserved2:4;
1027 u32 surf:1; /* surface */
1028 u32 rmb:1; /* removable media */
1029 u32 hsec:1; /* hard sector formatting */
1030 u32 ssec:1; /* soft sector formatting */
1031#endif
1032 u32 reserved3:24;
1033};
1034
1035/*
1036 * SCSI rigid disk device geometry page
1037 */
1038struct scsi_mp_rigid_device_geometry_s{
1039#ifdef __BIGENDIAN
1040 u32 ps:1;
1041 u32 reserved1:1;
1042 u32 page_code:6;
1043#else
1044 u32 page_code:6;
1045 u32 reserved1:1;
1046 u32 ps:1;
1047#endif
1048 u32 page_len:8;
1049 u32 num_cylinders0:8;
1050 u32 num_cylinders1:8;
1051
1052 u32 num_cylinders2:8;
1053 u32 num_heads:8;
1054 u32 scwp0:8;
1055 u32 scwp1:8;
1056
1057 u32 scwp2:8;
1058 u32 scrwc0:8;
1059 u32 scrwc1:8;
1060 u32 scrwc2:8;
1061
1062 u32 dsr:16;
1063 u32 lscyl0:8;
1064 u32 lscyl1:8;
1065
1066 u32 lscyl2:8;
1067#ifdef __BIGENDIAN
1068 u32 reserved2:6;
1069 u32 rpl:2; /* rotational position locking */
1070#else
1071 u32 rpl:2; /* rotational position locking */
1072 u32 reserved2:6;
1073#endif
1074 u32 rot_off:8;
1075 u32 reserved3:8;
1076
1077 u32 med_rot_rate:16;
1078 u32 reserved4:16;
1079};
1080
1081/*
1082 * SCSI caching mode page
1083 */
1084struct scsi_mp_caching_s{
1085#ifdef __BIGENDIAN
1086 u8 ps:1;
1087 u8 res1:1;
1088 u8 page_code:6;
1089#else
1090 u8 page_code:6;
1091 u8 res1:1;
1092 u8 ps:1;
1093#endif
1094 u8 page_len;
1095#ifdef __BIGENDIAN
1096 u8 ic:1; /* initiator control */
1097 u8 abpf:1; /* abort pre-fetch */
1098 u8 cap:1; /* caching analysis permitted */
1099 u8 disc:1; /* discontinuity */
1100 u8 size:1; /* size enable */
1101 u8 wce:1; /* write cache enable */
1102 u8 mf:1; /* multiplication factor */
1103 u8 rcd:1; /* read cache disable */
1104
1105 u8 drrp:4; /* demand read retention priority */
1106 u8 wrp:4; /* write retention priority */
1107#else
1108 u8 rcd:1; /* read cache disable */
1109 u8 mf:1; /* multiplication factor */
1110 u8 wce:1; /* write cache enable */
1111 u8 size:1; /* size enable */
1112 u8 disc:1; /* discontinuity */
1113 u8 cap:1; /* caching analysis permitted */
1114 u8 abpf:1; /* abort pre-fetch */
1115 u8 ic:1; /* initiator control */
1116
1117 u8 wrp:4; /* write retention priority */
1118 u8 drrp:4; /* demand read retention priority */
1119#endif
1120 u8 dptl[2];/* disable pre-fetch transfer length */
1121 u8 min_prefetch[2];
1122 u8 max_prefetch[2];
1123 u8 max_prefetch_limit[2];
1124#ifdef __BIGENDIAN
1125 u8 fsw:1; /* force sequential write */
1126 u8 lbcss:1;/* logical block cache segment size */
1127 u8 dra:1; /* disable read ahead */
1128 u8 vs:2; /* vendor specific */
1129 u8 res2:3;
1130#else
1131 u8 res2:3;
1132 u8 vs:2; /* vendor specific */
1133 u8 dra:1; /* disable read ahead */
1134 u8 lbcss:1;/* logical block cache segment size */
1135 u8 fsw:1; /* force sequential write */
1136#endif
1137 u8 num_cache_segs;
1138
1139 u8 cache_seg_size[2];
1140 u8 res3;
1141 u8 non_cache_seg_size[3];
1142};
1143
1144/*
1145 * SCSI control mode page
1146 */
1147struct scsi_mp_control_page_s{
1148#ifdef __BIGENDIAN
1149u8 ps:1;
1150u8 reserved1:1;
1151u8 page_code:6;
1152#else
1153u8 page_code:6;
1154u8 reserved1:1;
1155u8 ps:1;
1156#endif
1157 u8 page_len;
1158#ifdef __BIGENDIAN
1159 u8 tst:3; /* task set type */
1160 u8 reserved3:3;
1161 u8 gltsd:1; /* global logging target save disable */
1162 u8 rlec:1; /* report log exception condition */
1163
1164 u8 qalgo_mod:4; /* queue alogorithm modifier */
1165 u8 reserved4:1;
1166 u8 qerr:2; /* queue error management */
1167 u8 dque:1; /* disable queuing */
1168
1169 u8 reserved5:1;
1170 u8 rac:1; /* report a check */
1171 u8 reserved6:2;
1172 u8 swp:1; /* software write protect */
1173 u8 raerp:1; /* ready AER permission */
1174 u8 uaaerp:1; /* unit attenstion AER permission */
1175 u8 eaerp:1; /* error AER permission */
1176
1177 u8 reserved7:5;
1178 u8 autoload_mod:3;
1179#else
1180 u8 rlec:1; /* report log exception condition */
1181 u8 gltsd:1; /* global logging target save disable */
1182 u8 reserved3:3;
1183 u8 tst:3; /* task set type */
1184
1185 u8 dque:1; /* disable queuing */
1186 u8 qerr:2; /* queue error management */
1187 u8 reserved4:1;
1188 u8 qalgo_mod:4; /* queue alogorithm modifier */
1189
1190 u8 eaerp:1; /* error AER permission */
1191 u8 uaaerp:1; /* unit attenstion AER permission */
1192 u8 raerp:1; /* ready AER permission */
1193 u8 swp:1; /* software write protect */
1194 u8 reserved6:2;
1195 u8 rac:1; /* report a check */
1196 u8 reserved5:1;
1197
1198 u8 autoload_mod:3;
1199 u8 reserved7:5;
1200#endif
1201 u8 rahp_msb; /* ready AER holdoff period - MSB */
1202 u8 rahp_lsb; /* ready AER holdoff period - LSB */
1203
1204 u8 busy_timeout_period_msb;
1205 u8 busy_timeout_period_lsb;
1206
1207 u8 ext_selftest_compl_time_msb;
1208 u8 ext_selftest_compl_time_lsb;
1209};
1210
1211/*
1212 * SCSI medium types supported mode page
1213 */
1214struct scsi_mp_medium_types_sup_s{
1215#ifdef __BIGENDIAN
1216 u8 ps:1;
1217 u8 reserved1:1;
1218 u8 page_code:6;
1219#else
1220 u8 page_code:6;
1221 u8 reserved1:1;
1222 u8 ps:1;
1223#endif
1224 u8 page_len;
1225
1226 u8 reserved3[2];
1227 u8 med_type1_sup; /* medium type one supported */
1228 u8 med_type2_sup; /* medium type two supported */
1229 u8 med_type3_sup; /* medium type three supported */
1230 u8 med_type4_sup; /* medium type four supported */
1231};
1232
1233/*
1234 * SCSI informational exception control mode page
1235 */
1236struct scsi_mp_info_excpt_cntl_s{
1237#ifdef __BIGENDIAN
1238 u8 ps:1;
1239 u8 reserved1:1;
1240 u8 page_code:6;
1241#else
1242 u8 page_code:6;
1243 u8 reserved1:1;
1244 u8 ps:1;
1245#endif
1246 u8 page_len;
1247#ifdef __BIGENDIAN
1248 u8 perf:1; /* performance */
1249 u8 reserved3:1;
1250 u8 ebf:1; /* enable background fucntion */
1251 u8 ewasc:1; /* enable warning */
1252 u8 dexcpt:1; /* disable exception control */
1253 u8 test:1; /* enable test device failure
1254 * notification
1255 */
1256 u8 reserved4:1;
1257 u8 log_error:1;
1258
1259 u8 reserved5:4;
1260 u8 mrie:4; /* method of reporting info
1261 * exceptions
1262 */
1263#else
1264 u8 log_error:1;
1265 u8 reserved4:1;
1266 u8 test:1; /* enable test device failure
1267 * notification
1268 */
1269 u8 dexcpt:1; /* disable exception control */
1270 u8 ewasc:1; /* enable warning */
1271 u8 ebf:1; /* enable background fucntion */
1272 u8 reserved3:1;
1273 u8 perf:1; /* performance */
1274
1275 u8 mrie:4; /* method of reporting info
1276 * exceptions
1277 */
1278 u8 reserved5:4;
1279#endif
1280 u8 interval_timer_msb;
1281 u8 interval_timer_lsb;
1282
1283 u8 report_count_msb;
1284 u8 report_count_lsb;
1285};
1286
1287/*
1288 * Methods of reporting informational exceptions
1289 */
1290#define SCSI_MP_IEC_NO_REPORT 0x0 /* no reporting of exceptions */
1291#define SCSI_MP_IEC_AER 0x1 /* async event reporting */
1292#define SCSI_MP_IEC_UNIT_ATTN 0x2 /* generate unit attenstion */
1293#define SCSI_MO_IEC_COND_REC_ERR 0x3 /* conditionally generate recovered
1294 * error
1295 */
1296#define SCSI_MP_IEC_UNCOND_REC_ERR 0x4 /* unconditionally generate recovered
1297 * error
1298 */
1299#define SCSI_MP_IEC_NO_SENSE 0x5 /* generate no sense */
1300#define SCSI_MP_IEC_ON_REQUEST 0x6 /* only report exceptions on request */
1301
1302/*
1303 * SCSI flexible disk page
1304 */
1305struct scsi_mp_flexible_disk_s{
1306#ifdef __BIGENDIAN
1307 u8 ps:1;
1308 u8 reserved1:1;
1309 u8 page_code:6;
1310#else
1311 u8 page_code:6;
1312 u8 reserved1:1;
1313 u8 ps:1;
1314#endif
1315 u8 page_len;
1316
1317 u8 transfer_rate_msb;
1318 u8 transfer_rate_lsb;
1319
1320 u8 num_heads;
1321 u8 num_sectors;
1322
1323 u8 bytes_per_sector_msb;
1324 u8 bytes_per_sector_lsb;
1325
1326 u8 num_cylinders_msb;
1327 u8 num_cylinders_lsb;
1328
1329 u8 sc_wpc_msb; /* starting cylinder-write
1330 * precompensation msb
1331 */
1332 u8 sc_wpc_lsb; /* starting cylinder-write
1333 * precompensation lsb
1334 */
1335 u8 sc_rwc_msb; /* starting cylinder-reduced write
1336 * current msb
1337 */
1338 u8 sc_rwc_lsb; /* starting cylinder-reduced write
1339 * current lsb
1340 */
1341
1342 u8 dev_step_rate_msb;
1343 u8 dev_step_rate_lsb;
1344
1345 u8 dev_step_pulse_width;
1346
1347 u8 head_sd_msb; /* head settle delay msb */
1348 u8 head_sd_lsb; /* head settle delay lsb */
1349
1350 u8 motor_on_delay;
1351 u8 motor_off_delay;
1352#ifdef __BIGENDIAN
1353 u8 trdy:1; /* true ready bit */
1354 u8 ssn:1; /* start sector number bit */
1355 u8 mo:1; /* motor on bit */
1356 u8 reserved3:5;
1357
1358 u8 reserved4:4;
1359 u8 spc:4; /* step pulse per cylinder */
1360#else
1361 u8 reserved3:5;
1362 u8 mo:1; /* motor on bit */
1363 u8 ssn:1; /* start sector number bit */
1364 u8 trdy:1; /* true ready bit */
1365
1366 u8 spc:4; /* step pulse per cylinder */
1367 u8 reserved4:4;
1368#endif
1369 u8 write_comp;
1370 u8 head_load_delay;
1371 u8 head_unload_delay;
1372#ifdef __BIGENDIAN
1373 u8 pin34:4; /* pin34 usage */
1374 u8 pin2:4; /* pin2 usage */
1375
1376 u8 pin4:4; /* pin4 usage */
1377 u8 pin1:4; /* pin1 usage */
1378#else
1379 u8 pin2:4; /* pin2 usage */
1380 u8 pin34:4; /* pin34 usage */
1381
1382 u8 pin1:4; /* pin1 usage */
1383 u8 pin4:4; /* pin4 usage */
1384#endif
1385 u8 med_rot_rate_msb;
1386 u8 med_rot_rate_lsb;
1387
1388 u8 reserved5[2];
1389};
1390
1391struct scsi_mode_page_format_data6_s{
1392 struct scsi_mode_param_header6_s mph; /* mode page header */
1393 struct scsi_mode_param_desc_s desc; /* block descriptor */
1394 struct scsi_mp_format_device_s format; /* format device data */
1395};
1396
1397struct scsi_mode_page_format_data10_s{
1398 struct scsi_mode_param_header10_s mph; /* mode page header */
1399 struct scsi_mode_param_desc_s desc; /* block descriptor */
1400 struct scsi_mp_format_device_s format; /* format device data */
1401};
1402
1403struct scsi_mode_page_rdg_data6_s{
1404 struct scsi_mode_param_header6_s mph; /* mode page header */
1405 struct scsi_mode_param_desc_s desc; /* block descriptor */
1406 struct scsi_mp_rigid_device_geometry_s rdg;
1407 /* rigid geometry data */
1408};
1409
1410struct scsi_mode_page_rdg_data10_s{
1411 struct scsi_mode_param_header10_s mph; /* mode page header */
1412 struct scsi_mode_param_desc_s desc; /* block descriptor */
1413 struct scsi_mp_rigid_device_geometry_s rdg;
1414 /* rigid geometry data */
1415};
1416
1417struct scsi_mode_page_cache6_s{
1418 struct scsi_mode_param_header6_s mph; /* mode page header */
1419 struct scsi_mode_param_desc_s desc; /* block descriptor */
1420 struct scsi_mp_caching_s cache; /* cache page data */
1421};
1422
1423struct scsi_mode_page_cache10_s{
1424 struct scsi_mode_param_header10_s mph; /* mode page header */
1425 struct scsi_mode_param_desc_s desc; /* block descriptor */
1426 struct scsi_mp_caching_s cache; /* cache page data */
1427};
1428
1429/* --------------------------------------------------------------
1430 * Format Unit command
1431 * ------------------------------------------------------------
1432 */
1433
1434/*
1435 * Format Unit CDB
1436 */
1437struct scsi_format_unit_s{
1438 u8 opcode;
1439#ifdef __BIGENDIAN
1440 u8 res1:3;
1441 u8 fmtdata:1; /* if set, data out phase has format
1442 * data
1443 */
1444 u8 cmplst:1; /* if set, defect list is complete */
1445 u8 def_list:3; /* format of defect descriptor is
1446 * fmtdata =1
1447 */
1448#else
1449 u8 def_list:3; /* format of defect descriptor is
1450 * fmtdata = 1
1451 */
1452 u8 cmplst:1; /* if set, defect list is complete */
1453 u8 fmtdata:1; /* if set, data out phase has format
1454 * data
1455 */
1456 u8 res1:3;
1457#endif
1458 u8 interleave_msb;
1459 u8 interleave_lsb;
1460 u8 vendor_spec;
1461 u8 control;
1462};
1463
1464/*
1465 * h
1466 */
1467struct scsi_reserve6_s{
1468 u8 opcode;
1469#ifdef __BIGENDIAN
1470 u8 reserved:3;
1471 u8 obsolete:4;
1472 u8 extent:1;
1473#else
1474 u8 extent:1;
1475 u8 obsolete:4;
1476 u8 reserved:3;
1477#endif
1478 u8 reservation_id;
1479 u16 param_list_len;
1480 u8 control;
1481};
1482
1483/*
1484 * h
1485 */
1486struct scsi_release6_s{
1487 u8 opcode;
1488#ifdef __BIGENDIAN
1489 u8 reserved1:3;
1490 u8 obsolete:4;
1491 u8 extent:1;
1492#else
1493 u8 extent:1;
1494 u8 obsolete:4;
1495 u8 reserved1:3;
1496#endif
1497 u8 reservation_id;
1498 u16 reserved2;
1499 u8 control;
1500};
1501
1502/*
1503 * h
1504 */
1505struct scsi_reserve10_s{
1506 u8 opcode;
1507#ifdef __BIGENDIAN
1508 u8 reserved1:3;
1509 u8 third_party:1;
1510 u8 reserved2:2;
1511 u8 long_id:1;
1512 u8 extent:1;
1513#else
1514 u8 extent:1;
1515 u8 long_id:1;
1516 u8 reserved2:2;
1517 u8 third_party:1;
1518 u8 reserved1:3;
1519#endif
1520 u8 reservation_id;
1521 u8 third_pty_dev_id;
1522 u8 reserved3;
1523 u8 reserved4;
1524 u8 reserved5;
1525 u16 param_list_len;
1526 u8 control;
1527};
1528
1529struct scsi_release10_s{
1530 u8 opcode;
1531#ifdef __BIGENDIAN
1532 u8 reserved1:3;
1533 u8 third_party:1;
1534 u8 reserved2:2;
1535 u8 long_id:1;
1536 u8 extent:1;
1537#else
1538 u8 extent:1;
1539 u8 long_id:1;
1540 u8 reserved2:2;
1541 u8 third_party:1;
1542 u8 reserved1:3;
1543#endif
1544 u8 reservation_id;
1545 u8 third_pty_dev_id;
1546 u8 reserved3;
1547 u8 reserved4;
1548 u8 reserved5;
1549 u16 param_list_len;
1550 u8 control;
1551};
1552
1553struct scsi_verify10_s{
1554 u8 opcode;
1555#ifdef __BIGENDIAN
1556 u8 lun:3;
1557 u8 dpo:1;
1558 u8 reserved:2;
1559 u8 bytchk:1;
1560 u8 reladdr:1;
1561#else
1562 u8 reladdr:1;
1563 u8 bytchk:1;
1564 u8 reserved:2;
1565 u8 dpo:1;
1566 u8 lun:3;
1567#endif
1568 u8 lba0;
1569 u8 lba1;
1570 u8 lba2;
1571 u8 lba3;
1572 u8 reserved1;
1573 u8 verification_len0;
1574 u8 verification_len1;
1575 u8 control_byte;
1576};
1577
1578struct scsi_request_sense_s{
1579 u8 opcode;
1580#ifdef __BIGENDIAN
1581 u8 lun:3;
1582 u8 reserved:5;
1583#else
1584 u8 reserved:5;
1585 u8 lun:3;
1586#endif
1587 u8 reserved0;
1588 u8 reserved1;
1589 u8 alloc_len;
1590 u8 control_byte;
1591};
1592
1593/* ------------------------------------------------------------
1594 * SCSI status byte values
1595 * ------------------------------------------------------------
1596 */
1597#define SCSI_STATUS_GOOD 0x00
1598#define SCSI_STATUS_CHECK_CONDITION 0x02
1599#define SCSI_STATUS_CONDITION_MET 0x04
1600#define SCSI_STATUS_BUSY 0x08
1601#define SCSI_STATUS_INTERMEDIATE 0x10
1602#define SCSI_STATUS_ICM 0x14 /* intermediate condition met */
1603#define SCSI_STATUS_RESERVATION_CONFLICT 0x18
1604#define SCSI_STATUS_COMMAND_TERMINATED 0x22
1605#define SCSI_STATUS_QUEUE_FULL 0x28
1606#define SCSI_STATUS_ACA_ACTIVE 0x30
1607
1608#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length
1609 * in CDBs
1610 */
1611
1612#define SCSI_OP_WRITE_VERIFY10 0x2E
1613#define SCSI_OP_WRITE_VERIFY12 0xAE
1614#define SCSI_OP_UNDEF 0xFF
1615
1616/*
1617 * SCSI WRITE-VERIFY(10) command
1618 */
1619struct scsi_write_verify10_s{
1620 u8 opcode;
1621#ifdef __BIGENDIAN
1622 u8 reserved1:3;
1623 u8 dpo:1; /* Disable Page Out */
1624 u8 reserved2:1;
1625 u8 ebp:1; /* erse by-pass */
1626 u8 bytchk:1; /* byte check */
1627 u8 rel_adr:1; /* relative address */
1628#else
1629 u8 rel_adr:1; /* relative address */
1630 u8 bytchk:1; /* byte check */
1631 u8 ebp:1; /* erse by-pass */
1632 u8 reserved2:1;
1633 u8 dpo:1; /* Disable Page Out */
1634 u8 reserved1:3;
1635#endif
1636 u8 lba0; /* logical block address - MSB */
1637 u8 lba1;
1638 u8 lba2;
1639 u8 lba3; /* LSB */
1640 u8 reserved3;
1641 u8 xfer_length0; /* transfer length in blocks - MSB */
1642 u8 xfer_length1; /* LSB */
1643 u8 control;
1644};
1645
1646#pragma pack()
1647
1648#endif /* __SCSI_H__ */
diff --git a/drivers/scsi/bfa/include/protocol/types.h b/drivers/scsi/bfa/include/protocol/types.h
deleted file mode 100644
index 2875a6cced3b..000000000000
--- a/drivers/scsi/bfa/include/protocol/types.h
+++ /dev/null
@@ -1,42 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * types.h Protocol defined base types
20 */
21
22#ifndef __TYPES_H__
23#define __TYPES_H__
24
25#include <bfa_os_inc.h>
26
27#define wwn_t u64
28#define lun_t u64
29
30#define WWN_NULL (0)
31#define FC_SYMNAME_MAX 256 /* max name server symbolic name size */
32#define FC_ALPA_MAX 128
33
34#pragma pack(1)
35
36#define MAC_ADDRLEN (6)
37struct mac_s { u8 mac[MAC_ADDRLEN]; };
38#define mac_t struct mac_s
39
40#pragma pack()
41
42#endif
diff --git a/drivers/scsi/bfa/loop.c b/drivers/scsi/bfa/loop.c
deleted file mode 100644
index f6342efb6a90..000000000000
--- a/drivers/scsi/bfa/loop.c
+++ /dev/null
@@ -1,213 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * port_loop.c vport private loop implementation.
20 */
21#include <bfa.h>
22#include <bfa_svc.h>
23#include "fcs_lport.h"
24#include "fcs_rport.h"
25#include "fcs_trcmod.h"
26#include "lport_priv.h"
27
28BFA_TRC_FILE(FCS, LOOP);
29
30/**
31 * ALPA to LIXA bitmap mapping
32 *
33 * ALPA 0x00 (Word 0, Bit 30) is invalid for N_Ports. Also Word 0 Bit 31
34 * is for L_bit (login required) and is filled as ALPA 0x00 here.
35 */
36static const u8 port_loop_alpa_map[] = {
37 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, /* Word 3 Bits 0..7 */
38 0xD9, 0xD6, 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, /* Word 3 Bits 8..15 */
39 0xCD, 0xCC, 0xCB, 0xCA, 0xC9, 0xC7, 0xC6, 0xC5, /* Word 3 Bits 16..23 */
40 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5, 0xB4, 0xB3, /* Word 3 Bits 24..31 */
41
42 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, /* Word 2 Bits 0..7 */
43 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, /* Word 2 Bits 8..15 */
44 0x98, 0x97, 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, /* Word 2 Bits 16..23 */
45 0x80, 0x7C, 0x7A, 0x79, 0x76, 0x75, 0x74, 0x73, /* Word 2 Bits 24..31 */
46
47 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B, 0x6A, 0x69, /* Word 1 Bits 0..7 */
48 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56, /* Word 1 Bits 8..15 */
49 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, /* Word 1 Bits 16..23 */
50 0x4B, 0x4A, 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, /* Word 1 Bits 24..31 */
51
52 0x3A, 0x39, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, /* Word 0 Bits 0..7 */
53 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x27, 0x26, /* Word 0 Bits 8..15 */
54 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17, /* Word 0 Bits 16..23 */
55 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01, 0x00, 0x00, /* Word 0 Bits 24..31 */
56};
57
58/*
59 * Local Functions
60 */
61static bfa_status_t bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port,
62 u8 alpa);
63
64static void bfa_fcs_port_loop_plogi_response(void *fcsarg,
65 struct bfa_fcxp_s *fcxp,
66 void *cbarg,
67 bfa_status_t req_status,
68 u32 rsp_len,
69 u32 resid_len,
70 struct fchs_s *rsp_fchs);
71/**
72 * Called by port to initializar in provate LOOP topology.
73 */
74void
75bfa_fcs_port_loop_init(struct bfa_fcs_port_s *port)
76{
77}
78
79/**
80 * Called by port to notify transition to online state.
81 */
82void
83bfa_fcs_port_loop_online(struct bfa_fcs_port_s *port)
84{
85
86 u8 num_alpa = port->port_topo.ploop.num_alpa;
87 u8 *alpa_pos_map = port->port_topo.ploop.alpa_pos_map;
88 struct bfa_fcs_rport_s *r_port;
89 int ii = 0;
90
91 /*
92 * If the port role is Initiator Mode, create Rports.
93 */
94 if (port->port_cfg.roles == BFA_PORT_ROLE_FCP_IM) {
95 /*
96 * Check if the ALPA positional bitmap is available.
97 * if not, we send PLOGI to all possible ALPAs.
98 */
99 if (num_alpa > 0) {
100 for (ii = 0; ii < num_alpa; ii++) {
101 /*
102 * ignore ALPA of bfa port
103 */
104 if (alpa_pos_map[ii] != port->pid) {
105 r_port = bfa_fcs_rport_create(port,
106 alpa_pos_map[ii]);
107 }
108 }
109 } else {
110 for (ii = 0; ii < MAX_ALPA_COUNT; ii++) {
111 /*
112 * ignore ALPA of bfa port
113 */
114 if ((port_loop_alpa_map[ii] > 0)
115 && (port_loop_alpa_map[ii] != port->pid))
116 bfa_fcs_port_loop_send_plogi(port,
117 port_loop_alpa_map[ii]);
118 /**TBD */
119 }
120 }
121 } else {
122 /*
123 * TBD Target Mode ??
124 */
125 }
126
127}
128
129/**
130 * Called by port to notify transition to offline state.
131 */
132void
133bfa_fcs_port_loop_offline(struct bfa_fcs_port_s *port)
134{
135
136}
137
138/**
139 * Called by port to notify a LIP on the loop.
140 */
141void
142bfa_fcs_port_loop_lip(struct bfa_fcs_port_s *port)
143{
144}
145
146/**
147 * Local Functions.
148 */
149static bfa_status_t
150bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port, u8 alpa)
151{
152 struct fchs_s fchs;
153 struct bfa_fcxp_s *fcxp = NULL;
154 int len;
155
156 bfa_trc(port->fcs, alpa);
157
158 fcxp = bfa_fcxp_alloc(NULL, port->fcs->bfa, 0, 0, NULL, NULL, NULL,
159 NULL);
160 bfa_assert(fcxp);
161
162 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa,
163 bfa_fcs_port_get_fcid(port), 0,
164 port->port_cfg.pwwn, port->port_cfg.nwwn,
165 bfa_fcport_get_maxfrsize(port->fcs->bfa));
166
167 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
168 FC_CLASS_3, len, &fchs,
169 bfa_fcs_port_loop_plogi_response, (void *)port,
170 FC_MAX_PDUSZ, FC_RA_TOV);
171
172 return BFA_STATUS_OK;
173}
174
175/**
176 * Called by fcxp to notify the Plogi response
177 */
178static void
179bfa_fcs_port_loop_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
180 void *cbarg, bfa_status_t req_status,
181 u32 rsp_len, u32 resid_len,
182 struct fchs_s *rsp_fchs)
183{
184 struct bfa_fcs_port_s *port = (struct bfa_fcs_port_s *) cbarg;
185 struct fc_logi_s *plogi_resp;
186 struct fc_els_cmd_s *els_cmd;
187
188 bfa_trc(port->fcs, req_status);
189
190 /*
191 * Sanity Checks
192 */
193 if (req_status != BFA_STATUS_OK) {
194 bfa_trc(port->fcs, req_status);
195 /*
196 * @todo
197 * This could mean that the device with this APLA does not
198 * exist on the loop.
199 */
200
201 return;
202 }
203
204 els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
205 plogi_resp = (struct fc_logi_s *) els_cmd;
206
207 if (els_cmd->els_code == FC_ELS_ACC) {
208 bfa_fcs_rport_start(port, rsp_fchs, plogi_resp);
209 } else {
210 bfa_trc(port->fcs, plogi_resp->els_cmd.els_code);
211 bfa_assert(0);
212 }
213}
diff --git a/drivers/scsi/bfa/lport_api.c b/drivers/scsi/bfa/lport_api.c
deleted file mode 100644
index 72b3f508d0e9..000000000000
--- a/drivers/scsi/bfa/lport_api.c
+++ /dev/null
@@ -1,303 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * port_api.c BFA FCS port
20 */
21
22#include <fcs/bfa_fcs.h>
23#include <fcs/bfa_fcs_lport.h>
24#include <fcs/bfa_fcs_rport.h>
25#include "fcs_rport.h"
26#include "fcs_fabric.h"
27#include "fcs_trcmod.h"
28#include "fcs_vport.h"
29
30BFA_TRC_FILE(FCS, PORT_API);
31
32
33
34/**
35 * fcs_port_api BFA FCS port API
36 */
37
38void
39bfa_fcs_cfg_base_port(struct bfa_fcs_s *fcs, struct bfa_port_cfg_s *port_cfg)
40{
41}
42
43struct bfa_fcs_port_s *
44bfa_fcs_get_base_port(struct bfa_fcs_s *fcs)
45{
46 return &fcs->fabric.bport;
47}
48
49wwn_t
50bfa_fcs_port_get_rport(struct bfa_fcs_port_s *port, wwn_t wwn, int index,
51 int nrports, bfa_boolean_t bwwn)
52{
53 struct list_head *qh, *qe;
54 struct bfa_fcs_rport_s *rport = NULL;
55 int i;
56 struct bfa_fcs_s *fcs;
57
58 if (port == NULL || nrports == 0)
59 return (wwn_t) 0;
60
61 fcs = port->fcs;
62 bfa_trc(fcs, (u32) nrports);
63
64 i = 0;
65 qh = &port->rport_q;
66 qe = bfa_q_first(qh);
67
68 while ((qe != qh) && (i < nrports)) {
69 rport = (struct bfa_fcs_rport_s *)qe;
70 if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) {
71 qe = bfa_q_next(qe);
72 bfa_trc(fcs, (u32) rport->pwwn);
73 bfa_trc(fcs, rport->pid);
74 bfa_trc(fcs, i);
75 continue;
76 }
77
78 if (bwwn) {
79 if (!memcmp(&wwn, &rport->pwwn, 8))
80 break;
81 } else {
82 if (i == index)
83 break;
84 }
85
86 i++;
87 qe = bfa_q_next(qe);
88 }
89
90 bfa_trc(fcs, i);
91 if (rport)
92 return rport->pwwn;
93 else
94 return (wwn_t) 0;
95}
96
97void
98bfa_fcs_port_get_rports(struct bfa_fcs_port_s *port, wwn_t rport_wwns[],
99 int *nrports)
100{
101 struct list_head *qh, *qe;
102 struct bfa_fcs_rport_s *rport = NULL;
103 int i;
104 struct bfa_fcs_s *fcs;
105
106 if (port == NULL || rport_wwns == NULL || *nrports == 0)
107 return;
108
109 fcs = port->fcs;
110 bfa_trc(fcs, (u32) *nrports);
111
112 i = 0;
113 qh = &port->rport_q;
114 qe = bfa_q_first(qh);
115
116 while ((qe != qh) && (i < *nrports)) {
117 rport = (struct bfa_fcs_rport_s *)qe;
118 if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) {
119 qe = bfa_q_next(qe);
120 bfa_trc(fcs, (u32) rport->pwwn);
121 bfa_trc(fcs, rport->pid);
122 bfa_trc(fcs, i);
123 continue;
124 }
125
126 rport_wwns[i] = rport->pwwn;
127
128 i++;
129 qe = bfa_q_next(qe);
130 }
131
132 bfa_trc(fcs, i);
133 *nrports = i;
134 return;
135}
136
137/*
138 * Iterate's through all the rport's in the given port to
139 * determine the maximum operating speed.
140 *
141 * To be used in TRL Functionality only
142 */
143enum bfa_pport_speed
144bfa_fcs_port_get_rport_max_speed(struct bfa_fcs_port_s *port)
145{
146 struct list_head *qh, *qe;
147 struct bfa_fcs_rport_s *rport = NULL;
148 struct bfa_fcs_s *fcs;
149 enum bfa_pport_speed max_speed = 0;
150 struct bfa_pport_attr_s pport_attr;
151 enum bfa_pport_speed pport_speed, rport_speed;
152 bfa_boolean_t trl_enabled = bfa_fcport_is_ratelim(port->fcs->bfa);
153
154 if (port == NULL)
155 return 0;
156
157 fcs = port->fcs;
158
159 /*
160 * Get Physical port's current speed
161 */
162 bfa_fcport_get_attr(port->fcs->bfa, &pport_attr);
163 pport_speed = pport_attr.speed;
164 bfa_trc(fcs, pport_speed);
165
166 qh = &port->rport_q;
167 qe = bfa_q_first(qh);
168
169 while (qe != qh) {
170 rport = (struct bfa_fcs_rport_s *) qe;
171 if ((bfa_os_ntoh3b(rport->pid) > 0xFFF000) ||
172 (bfa_fcs_rport_get_state(rport) ==
173 BFA_RPORT_OFFLINE)) {
174 qe = bfa_q_next(qe);
175 continue;
176 }
177
178 rport_speed = rport->rpf.rpsc_speed;
179 if ((trl_enabled) && (rport_speed ==
180 BFA_PPORT_SPEED_UNKNOWN)) {
181 /* Use default ratelim speed setting */
182 rport_speed =
183 bfa_fcport_get_ratelim_speed(port->fcs->bfa);
184 }
185
186 if ((rport_speed == BFA_PPORT_SPEED_8GBPS) ||
187 (rport_speed > pport_speed)) {
188 max_speed = rport_speed;
189 break;
190 } else if (rport_speed > max_speed) {
191 max_speed = rport_speed;
192 }
193
194 qe = bfa_q_next(qe);
195 }
196
197 bfa_trc(fcs, max_speed);
198 return max_speed;
199}
200
201struct bfa_fcs_port_s *
202bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn)
203{
204 struct bfa_fcs_vport_s *vport;
205 bfa_fcs_vf_t *vf;
206
207 bfa_assert(fcs != NULL);
208
209 vf = bfa_fcs_vf_lookup(fcs, vf_id);
210 if (vf == NULL) {
211 bfa_trc(fcs, vf_id);
212 return NULL;
213 }
214
215 if (!lpwwn || (vf->bport.port_cfg.pwwn == lpwwn))
216 return &vf->bport;
217
218 vport = bfa_fcs_fabric_vport_lookup(vf, lpwwn);
219 if (vport)
220 return &vport->lport;
221
222 return NULL;
223}
224
225/*
226 * API corresponding to VmWare's NPIV_VPORT_GETINFO.
227 */
228void
229bfa_fcs_port_get_info(struct bfa_fcs_port_s *port,
230 struct bfa_port_info_s *port_info)
231{
232
233 bfa_trc(port->fcs, port->fabric->fabric_name);
234
235 if (port->vport == NULL) {
236 /*
237 * This is a Physical port
238 */
239 port_info->port_type = BFA_PORT_TYPE_PHYSICAL;
240
241 /*
242 * @todo : need to fix the state & reason
243 */
244 port_info->port_state = 0;
245 port_info->offline_reason = 0;
246
247 port_info->port_wwn = bfa_fcs_port_get_pwwn(port);
248 port_info->node_wwn = bfa_fcs_port_get_nwwn(port);
249
250 port_info->max_vports_supp =
251 bfa_lps_get_max_vport(port->fcs->bfa);
252 port_info->num_vports_inuse =
253 bfa_fcs_fabric_vport_count(port->fabric);
254 port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP;
255 port_info->num_rports_inuse = port->num_rports;
256 } else {
257 /*
258 * This is a virtual port
259 */
260 port_info->port_type = BFA_PORT_TYPE_VIRTUAL;
261
262 /*
263 * @todo : need to fix the state & reason
264 */
265 port_info->port_state = 0;
266 port_info->offline_reason = 0;
267
268 port_info->port_wwn = bfa_fcs_port_get_pwwn(port);
269 port_info->node_wwn = bfa_fcs_port_get_nwwn(port);
270 }
271}
272
273void
274bfa_fcs_port_get_stats(struct bfa_fcs_port_s *fcs_port,
275 struct bfa_port_stats_s *port_stats)
276{
277 bfa_os_memcpy(port_stats, &fcs_port->stats,
278 sizeof(struct bfa_port_stats_s));
279 return;
280}
281
282void
283bfa_fcs_port_clear_stats(struct bfa_fcs_port_s *fcs_port)
284{
285 bfa_os_memset(&fcs_port->stats, 0, sizeof(struct bfa_port_stats_s));
286 return;
287}
288
289void
290bfa_fcs_port_enable_ipfc_roles(struct bfa_fcs_port_s *fcs_port)
291{
292 fcs_port->port_cfg.roles |= BFA_PORT_ROLE_FCP_IPFC;
293 return;
294}
295
296void
297bfa_fcs_port_disable_ipfc_roles(struct bfa_fcs_port_s *fcs_port)
298{
299 fcs_port->port_cfg.roles &= ~BFA_PORT_ROLE_FCP_IPFC;
300 return;
301}
302
303
diff --git a/drivers/scsi/bfa/lport_priv.h b/drivers/scsi/bfa/lport_priv.h
deleted file mode 100644
index dbae370a599a..000000000000
--- a/drivers/scsi/bfa/lport_priv.h
+++ /dev/null
@@ -1,82 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __VP_PRIV_H__
19#define __VP_PRIV_H__
20
21#include <fcs/bfa_fcs_lport.h>
22#include <fcs/bfa_fcs_vport.h>
23
24/*
25 * Functions exported by vps
26 */
27void bfa_fcs_vport_init(struct bfa_fcs_vport_s *vport);
28
29/*
30 * Functions exported by vps
31 */
32void bfa_fcs_vps_online(struct bfa_fcs_port_s *port);
33void bfa_fcs_vps_offline(struct bfa_fcs_port_s *port);
34void bfa_fcs_vps_lip(struct bfa_fcs_port_s *port);
35
36/*
37 * Functions exported by port_fab
38 */
39void bfa_fcs_port_fab_init(struct bfa_fcs_port_s *vport);
40void bfa_fcs_port_fab_online(struct bfa_fcs_port_s *vport);
41void bfa_fcs_port_fab_offline(struct bfa_fcs_port_s *vport);
42void bfa_fcs_port_fab_rx_frame(struct bfa_fcs_port_s *port,
43 u8 *rx_frame, u32 len);
44
45/*
46 * Functions exported by VP-NS.
47 */
48void bfa_fcs_port_ns_init(struct bfa_fcs_port_s *vport);
49void bfa_fcs_port_ns_offline(struct bfa_fcs_port_s *vport);
50void bfa_fcs_port_ns_online(struct bfa_fcs_port_s *vport);
51void bfa_fcs_port_ns_query(struct bfa_fcs_port_s *port);
52
53/*
54 * Functions exported by VP-SCN
55 */
56void bfa_fcs_port_scn_init(struct bfa_fcs_port_s *vport);
57void bfa_fcs_port_scn_offline(struct bfa_fcs_port_s *vport);
58void bfa_fcs_port_scn_online(struct bfa_fcs_port_s *vport);
59void bfa_fcs_port_scn_process_rscn(struct bfa_fcs_port_s *port,
60 struct fchs_s *rx_frame, u32 len);
61
62/*
63 * Functions exported by VP-N2N
64 */
65
66void bfa_fcs_port_n2n_init(struct bfa_fcs_port_s *port);
67void bfa_fcs_port_n2n_online(struct bfa_fcs_port_s *port);
68void bfa_fcs_port_n2n_offline(struct bfa_fcs_port_s *port);
69void bfa_fcs_port_n2n_rx_frame(struct bfa_fcs_port_s *port,
70 u8 *rx_frame, u32 len);
71
72/*
73 * Functions exported by VP-LOOP
74 */
75void bfa_fcs_port_loop_init(struct bfa_fcs_port_s *port);
76void bfa_fcs_port_loop_online(struct bfa_fcs_port_s *port);
77void bfa_fcs_port_loop_offline(struct bfa_fcs_port_s *port);
78void bfa_fcs_port_loop_lip(struct bfa_fcs_port_s *port);
79void bfa_fcs_port_loop_rx_frame(struct bfa_fcs_port_s *port,
80 u8 *rx_frame, u32 len);
81
82#endif /* __VP_PRIV_H__ */
diff --git a/drivers/scsi/bfa/ms.c b/drivers/scsi/bfa/ms.c
deleted file mode 100644
index 1d579ef26122..000000000000
--- a/drivers/scsi/bfa/ms.c
+++ /dev/null
@@ -1,759 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18
19#include <bfa.h>
20#include <bfa_svc.h>
21#include "fcs_lport.h"
22#include "fcs_rport.h"
23#include "fcs_trcmod.h"
24#include "fcs_fcxp.h"
25#include "lport_priv.h"
26
27BFA_TRC_FILE(FCS, MS);
28
29#define BFA_FCS_MS_CMD_MAX_RETRIES 2
30/*
31 * forward declarations
32 */
33static void bfa_fcs_port_ms_send_plogi(void *ms_cbarg,
34 struct bfa_fcxp_s *fcxp_alloced);
35static void bfa_fcs_port_ms_timeout(void *arg);
36static void bfa_fcs_port_ms_plogi_response(void *fcsarg,
37 struct bfa_fcxp_s *fcxp,
38 void *cbarg,
39 bfa_status_t req_status,
40 u32 rsp_len,
41 u32 resid_len,
42 struct fchs_s *rsp_fchs);
43
44static void bfa_fcs_port_ms_send_gmal(void *ms_cbarg,
45 struct bfa_fcxp_s *fcxp_alloced);
46static void bfa_fcs_port_ms_gmal_response(void *fcsarg,
47 struct bfa_fcxp_s *fcxp,
48 void *cbarg,
49 bfa_status_t req_status,
50 u32 rsp_len,
51 u32 resid_len,
52 struct fchs_s *rsp_fchs);
53static void bfa_fcs_port_ms_send_gfn(void *ms_cbarg,
54 struct bfa_fcxp_s *fcxp_alloced);
55static void bfa_fcs_port_ms_gfn_response(void *fcsarg,
56 struct bfa_fcxp_s *fcxp,
57 void *cbarg,
58 bfa_status_t req_status,
59 u32 rsp_len,
60 u32 resid_len,
61 struct fchs_s *rsp_fchs);
62/**
63 * fcs_ms_sm FCS MS state machine
64 */
65
66/**
67 * MS State Machine events
68 */
69enum port_ms_event {
70 MSSM_EVENT_PORT_ONLINE = 1,
71 MSSM_EVENT_PORT_OFFLINE = 2,
72 MSSM_EVENT_RSP_OK = 3,
73 MSSM_EVENT_RSP_ERROR = 4,
74 MSSM_EVENT_TIMEOUT = 5,
75 MSSM_EVENT_FCXP_SENT = 6,
76 MSSM_EVENT_PORT_FABRIC_RSCN = 7
77};
78
79static void bfa_fcs_port_ms_sm_offline(struct bfa_fcs_port_ms_s *ms,
80 enum port_ms_event event);
81static void bfa_fcs_port_ms_sm_plogi_sending(struct bfa_fcs_port_ms_s *ms,
82 enum port_ms_event event);
83static void bfa_fcs_port_ms_sm_plogi(struct bfa_fcs_port_ms_s *ms,
84 enum port_ms_event event);
85static void bfa_fcs_port_ms_sm_plogi_retry(struct bfa_fcs_port_ms_s *ms,
86 enum port_ms_event event);
87static void bfa_fcs_port_ms_sm_gmal_sending(struct bfa_fcs_port_ms_s *ms,
88 enum port_ms_event event);
89static void bfa_fcs_port_ms_sm_gmal(struct bfa_fcs_port_ms_s *ms,
90 enum port_ms_event event);
91static void bfa_fcs_port_ms_sm_gmal_retry(struct bfa_fcs_port_ms_s *ms,
92 enum port_ms_event event);
93static void bfa_fcs_port_ms_sm_gfn_sending(struct bfa_fcs_port_ms_s *ms,
94 enum port_ms_event event);
95static void bfa_fcs_port_ms_sm_gfn(struct bfa_fcs_port_ms_s *ms,
96 enum port_ms_event event);
97static void bfa_fcs_port_ms_sm_gfn_retry(struct bfa_fcs_port_ms_s *ms,
98 enum port_ms_event event);
99static void bfa_fcs_port_ms_sm_online(struct bfa_fcs_port_ms_s *ms,
100 enum port_ms_event event);
101/**
102 * Start in offline state - awaiting NS to send start.
103 */
104static void
105bfa_fcs_port_ms_sm_offline(struct bfa_fcs_port_ms_s *ms,
106 enum port_ms_event event)
107{
108 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
109 bfa_trc(ms->port->fcs, event);
110
111 switch (event) {
112 case MSSM_EVENT_PORT_ONLINE:
113 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_plogi_sending);
114 bfa_fcs_port_ms_send_plogi(ms, NULL);
115 break;
116
117 case MSSM_EVENT_PORT_OFFLINE:
118 break;
119
120 default:
121 bfa_sm_fault(ms->port->fcs, event);
122 }
123}
124
125static void
126bfa_fcs_port_ms_sm_plogi_sending(struct bfa_fcs_port_ms_s *ms,
127 enum port_ms_event event)
128{
129 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
130 bfa_trc(ms->port->fcs, event);
131
132 switch (event) {
133 case MSSM_EVENT_FCXP_SENT:
134 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_plogi);
135 break;
136
137 case MSSM_EVENT_PORT_OFFLINE:
138 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
139 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
140 &ms->fcxp_wqe);
141 break;
142
143 default:
144 bfa_sm_fault(ms->port->fcs, event);
145 }
146}
147
148static void
149bfa_fcs_port_ms_sm_plogi(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event)
150{
151 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
152 bfa_trc(ms->port->fcs, event);
153
154 switch (event) {
155 case MSSM_EVENT_RSP_ERROR:
156 /*
157 * Start timer for a delayed retry
158 */
159 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_plogi_retry);
160 ms->port->stats.ms_retries++;
161 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port), &ms->timer,
162 bfa_fcs_port_ms_timeout, ms,
163 BFA_FCS_RETRY_TIMEOUT);
164 break;
165
166 case MSSM_EVENT_RSP_OK:
167 /*
168 * since plogi is done, now invoke MS related sub-modules
169 */
170 bfa_fcs_port_fdmi_online(ms);
171
172 /**
173 * if this is a Vport, go to online state.
174 */
175 if (ms->port->vport) {
176 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_online);
177 break;
178 }
179
180 /*
181 * For a base port we need to get the
182 * switch's IP address.
183 */
184 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gmal_sending);
185 bfa_fcs_port_ms_send_gmal(ms, NULL);
186 break;
187
188 case MSSM_EVENT_PORT_OFFLINE:
189 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
190 bfa_fcxp_discard(ms->fcxp);
191 break;
192
193 default:
194 bfa_sm_fault(ms->port->fcs, event);
195 }
196}
197
198static void
199bfa_fcs_port_ms_sm_plogi_retry(struct bfa_fcs_port_ms_s *ms,
200 enum port_ms_event event)
201{
202 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
203 bfa_trc(ms->port->fcs, event);
204
205 switch (event) {
206 case MSSM_EVENT_TIMEOUT:
207 /*
208 * Retry Timer Expired. Re-send
209 */
210 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_plogi_sending);
211 bfa_fcs_port_ms_send_plogi(ms, NULL);
212 break;
213
214 case MSSM_EVENT_PORT_OFFLINE:
215 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
216 bfa_timer_stop(&ms->timer);
217 break;
218
219 default:
220 bfa_sm_fault(ms->port->fcs, event);
221 }
222}
223
224static void
225bfa_fcs_port_ms_sm_online(struct bfa_fcs_port_ms_s *ms,
226 enum port_ms_event event)
227{
228 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
229 bfa_trc(ms->port->fcs, event);
230
231 switch (event) {
232 case MSSM_EVENT_PORT_OFFLINE:
233 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
234 break;
235
236 case MSSM_EVENT_PORT_FABRIC_RSCN:
237 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_sending);
238 ms->retry_cnt = 0;
239 bfa_fcs_port_ms_send_gfn(ms, NULL);
240 break;
241
242 default:
243 bfa_sm_fault(ms->port->fcs, event);
244 }
245}
246
247static void
248bfa_fcs_port_ms_sm_gmal_sending(struct bfa_fcs_port_ms_s *ms,
249 enum port_ms_event event)
250{
251 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
252 bfa_trc(ms->port->fcs, event);
253
254 switch (event) {
255 case MSSM_EVENT_FCXP_SENT:
256 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gmal);
257 break;
258
259 case MSSM_EVENT_PORT_OFFLINE:
260 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
261 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
262 &ms->fcxp_wqe);
263 break;
264
265 default:
266 bfa_sm_fault(ms->port->fcs, event);
267 }
268}
269
270static void
271bfa_fcs_port_ms_sm_gmal(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event)
272{
273 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
274 bfa_trc(ms->port->fcs, event);
275
276 switch (event) {
277 case MSSM_EVENT_RSP_ERROR:
278 /*
279 * Start timer for a delayed retry
280 */
281 if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) {
282 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gmal_retry);
283 ms->port->stats.ms_retries++;
284 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
285 &ms->timer, bfa_fcs_port_ms_timeout, ms,
286 BFA_FCS_RETRY_TIMEOUT);
287 } else {
288 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_sending);
289 bfa_fcs_port_ms_send_gfn(ms, NULL);
290 ms->retry_cnt = 0;
291 }
292 break;
293
294 case MSSM_EVENT_RSP_OK:
295 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_sending);
296 bfa_fcs_port_ms_send_gfn(ms, NULL);
297 break;
298
299 case MSSM_EVENT_PORT_OFFLINE:
300 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
301 bfa_fcxp_discard(ms->fcxp);
302 break;
303
304 default:
305 bfa_sm_fault(ms->port->fcs, event);
306 }
307}
308
309static void
310bfa_fcs_port_ms_sm_gmal_retry(struct bfa_fcs_port_ms_s *ms,
311 enum port_ms_event event)
312{
313 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
314 bfa_trc(ms->port->fcs, event);
315
316 switch (event) {
317 case MSSM_EVENT_TIMEOUT:
318 /*
319 * Retry Timer Expired. Re-send
320 */
321 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gmal_sending);
322 bfa_fcs_port_ms_send_gmal(ms, NULL);
323 break;
324
325 case MSSM_EVENT_PORT_OFFLINE:
326 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
327 bfa_timer_stop(&ms->timer);
328 break;
329
330 default:
331 bfa_sm_fault(ms->port->fcs, event);
332 }
333}
334
335/**
336 * ms_pvt MS local functions
337 */
338
339static void
340bfa_fcs_port_ms_send_gmal(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
341{
342 struct bfa_fcs_port_ms_s *ms = ms_cbarg;
343 struct bfa_fcs_port_s *port = ms->port;
344 struct fchs_s fchs;
345 int len;
346 struct bfa_fcxp_s *fcxp;
347
348 bfa_trc(port->fcs, port->pid);
349
350 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
351 if (!fcxp) {
352 bfa_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
353 bfa_fcs_port_ms_send_gmal, ms);
354 return;
355 }
356 ms->fcxp = fcxp;
357
358 len = fc_gmal_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
359 bfa_fcs_port_get_fcid(port),
360 bfa_lps_get_peer_nwwn(port->fabric->lps));
361
362 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
363 FC_CLASS_3, len, &fchs, bfa_fcs_port_ms_gmal_response,
364 (void *)ms, FC_MAX_PDUSZ, FC_FCCT_TOV);
365
366 bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT);
367}
368
369static void
370bfa_fcs_port_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
371 void *cbarg, bfa_status_t req_status,
372 u32 rsp_len, u32 resid_len,
373 struct fchs_s *rsp_fchs)
374{
375 struct bfa_fcs_port_ms_s *ms = (struct bfa_fcs_port_ms_s *)cbarg;
376 struct bfa_fcs_port_s *port = ms->port;
377 struct ct_hdr_s *cthdr = NULL;
378 struct fcgs_gmal_resp_s *gmal_resp;
379 struct fc_gmal_entry_s *gmal_entry;
380 u32 num_entries;
381 u8 *rsp_str;
382
383 bfa_trc(port->fcs, req_status);
384 bfa_trc(port->fcs, port->port_cfg.pwwn);
385
386 /*
387 * Sanity Checks
388 */
389 if (req_status != BFA_STATUS_OK) {
390 bfa_trc(port->fcs, req_status);
391 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
392 return;
393 }
394
395 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
396 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
397
398 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
399 gmal_resp = (struct fcgs_gmal_resp_s *)(cthdr + 1);
400 num_entries = bfa_os_ntohl(gmal_resp->ms_len);
401 if (num_entries == 0) {
402 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
403 return;
404 }
405 /*
406 * The response could contain multiple Entries.
407 * Entries for SNMP interface, etc.
408 * We look for the entry with a telnet prefix.
409 * First "http://" entry refers to IP addr
410 */
411
412 gmal_entry = (struct fc_gmal_entry_s *)gmal_resp->ms_ma;
413 while (num_entries > 0) {
414 if (strncmp
415 (gmal_entry->prefix, CT_GMAL_RESP_PREFIX_HTTP,
416 sizeof(gmal_entry->prefix)) == 0) {
417
418 /*
419 * if the IP address is terminating with a '/',
420 * remove it. *Byte 0 consists of the length
421 * of the string.
422 */
423 rsp_str = &(gmal_entry->prefix[0]);
424 if (rsp_str[gmal_entry->len - 1] == '/')
425 rsp_str[gmal_entry->len - 1] = 0;
426 /*
427 * copy IP Address to fabric
428 */
429 strncpy(bfa_fcs_port_get_fabric_ipaddr(port),
430 gmal_entry->ip_addr,
431 BFA_FCS_FABRIC_IPADDR_SZ);
432 break;
433 } else {
434 --num_entries;
435 ++gmal_entry;
436 }
437 }
438
439 bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK);
440 return;
441 }
442
443 bfa_trc(port->fcs, cthdr->reason_code);
444 bfa_trc(port->fcs, cthdr->exp_code);
445 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
446}
447
448static void
449bfa_fcs_port_ms_sm_gfn_sending(struct bfa_fcs_port_ms_s *ms,
450 enum port_ms_event event)
451{
452 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
453 bfa_trc(ms->port->fcs, event);
454
455 switch (event) {
456 case MSSM_EVENT_FCXP_SENT:
457 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn);
458 break;
459
460 case MSSM_EVENT_PORT_OFFLINE:
461 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
462 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
463 &ms->fcxp_wqe);
464 break;
465
466 default:
467 bfa_sm_fault(ms->port->fcs, event);
468 }
469}
470
471static void
472bfa_fcs_port_ms_sm_gfn(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event)
473{
474 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
475 bfa_trc(ms->port->fcs, event);
476
477 switch (event) {
478 case MSSM_EVENT_RSP_ERROR:
479 /*
480 * Start timer for a delayed retry
481 */
482 if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) {
483 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_retry);
484 ms->port->stats.ms_retries++;
485 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
486 &ms->timer, bfa_fcs_port_ms_timeout, ms,
487 BFA_FCS_RETRY_TIMEOUT);
488 } else {
489 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_online);
490 ms->retry_cnt = 0;
491 }
492 break;
493
494 case MSSM_EVENT_RSP_OK:
495 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_online);
496 break;
497
498 case MSSM_EVENT_PORT_OFFLINE:
499 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
500 bfa_fcxp_discard(ms->fcxp);
501 break;
502
503 default:
504 bfa_sm_fault(ms->port->fcs, event);
505 }
506}
507
508static void
509bfa_fcs_port_ms_sm_gfn_retry(struct bfa_fcs_port_ms_s *ms,
510 enum port_ms_event event)
511{
512 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
513 bfa_trc(ms->port->fcs, event);
514
515 switch (event) {
516 case MSSM_EVENT_TIMEOUT:
517 /*
518 * Retry Timer Expired. Re-send
519 */
520 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_sending);
521 bfa_fcs_port_ms_send_gfn(ms, NULL);
522 break;
523
524 case MSSM_EVENT_PORT_OFFLINE:
525 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
526 bfa_timer_stop(&ms->timer);
527 break;
528
529 default:
530 bfa_sm_fault(ms->port->fcs, event);
531 }
532}
533
534/**
535 * ms_pvt MS local functions
536 */
537
538static void
539bfa_fcs_port_ms_send_gfn(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
540{
541 struct bfa_fcs_port_ms_s *ms = ms_cbarg;
542 struct bfa_fcs_port_s *port = ms->port;
543 struct fchs_s fchs;
544 int len;
545 struct bfa_fcxp_s *fcxp;
546
547 bfa_trc(port->fcs, port->pid);
548
549 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
550 if (!fcxp) {
551 bfa_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
552 bfa_fcs_port_ms_send_gfn, ms);
553 return;
554 }
555 ms->fcxp = fcxp;
556
557 len = fc_gfn_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
558 bfa_fcs_port_get_fcid(port),
559 bfa_lps_get_peer_nwwn(port->fabric->lps));
560
561 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
562 FC_CLASS_3, len, &fchs, bfa_fcs_port_ms_gfn_response,
563 (void *)ms, FC_MAX_PDUSZ, FC_FCCT_TOV);
564
565 bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT);
566}
567
568static void
569bfa_fcs_port_ms_gfn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
570 bfa_status_t req_status, u32 rsp_len,
571 u32 resid_len, struct fchs_s *rsp_fchs)
572{
573 struct bfa_fcs_port_ms_s *ms = (struct bfa_fcs_port_ms_s *)cbarg;
574 struct bfa_fcs_port_s *port = ms->port;
575 struct ct_hdr_s *cthdr = NULL;
576 wwn_t *gfn_resp;
577
578 bfa_trc(port->fcs, req_status);
579 bfa_trc(port->fcs, port->port_cfg.pwwn);
580
581 /*
582 * Sanity Checks
583 */
584 if (req_status != BFA_STATUS_OK) {
585 bfa_trc(port->fcs, req_status);
586 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
587 return;
588 }
589
590 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
591 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
592
593 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
594 gfn_resp = (wwn_t *) (cthdr + 1);
595 /*
596 * check if it has actually changed
597 */
598 if ((memcmp
599 ((void *)&bfa_fcs_port_get_fabric_name(port), gfn_resp,
600 sizeof(wwn_t)) != 0))
601 bfa_fcs_fabric_set_fabric_name(port->fabric, *gfn_resp);
602 bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK);
603 return;
604 }
605
606 bfa_trc(port->fcs, cthdr->reason_code);
607 bfa_trc(port->fcs, cthdr->exp_code);
608 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
609}
610
611/**
612 * ms_pvt MS local functions
613 */
614
615static void
616bfa_fcs_port_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
617{
618 struct bfa_fcs_port_ms_s *ms = ms_cbarg;
619 struct bfa_fcs_port_s *port = ms->port;
620 struct fchs_s fchs;
621 int len;
622 struct bfa_fcxp_s *fcxp;
623
624 bfa_trc(port->fcs, port->pid);
625
626 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
627 if (!fcxp) {
628 port->stats.ms_plogi_alloc_wait++;
629 bfa_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
630 bfa_fcs_port_ms_send_plogi, ms);
631 return;
632 }
633 ms->fcxp = fcxp;
634
635 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
636 bfa_os_hton3b(FC_MGMT_SERVER),
637 bfa_fcs_port_get_fcid(port), 0,
638 port->port_cfg.pwwn, port->port_cfg.nwwn,
639 bfa_fcport_get_maxfrsize(port->fcs->bfa));
640
641 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
642 FC_CLASS_3, len, &fchs, bfa_fcs_port_ms_plogi_response,
643 (void *)ms, FC_MAX_PDUSZ, FC_ELS_TOV);
644
645 port->stats.ms_plogi_sent++;
646 bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT);
647}
648
649static void
650bfa_fcs_port_ms_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
651 void *cbarg, bfa_status_t req_status,
652 u32 rsp_len, u32 resid_len,
653 struct fchs_s *rsp_fchs)
654{
655 struct bfa_fcs_port_ms_s *ms = (struct bfa_fcs_port_ms_s *)cbarg;
656
657 struct bfa_fcs_port_s *port = ms->port;
658 struct fc_els_cmd_s *els_cmd;
659 struct fc_ls_rjt_s *ls_rjt;
660
661 bfa_trc(port->fcs, req_status);
662 bfa_trc(port->fcs, port->port_cfg.pwwn);
663
664 /*
665 * Sanity Checks
666 */
667 if (req_status != BFA_STATUS_OK) {
668 port->stats.ms_plogi_rsp_err++;
669 bfa_trc(port->fcs, req_status);
670 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
671 return;
672 }
673
674 els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
675
676 switch (els_cmd->els_code) {
677
678 case FC_ELS_ACC:
679 if (rsp_len < sizeof(struct fc_logi_s)) {
680 bfa_trc(port->fcs, rsp_len);
681 port->stats.ms_plogi_acc_err++;
682 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
683 break;
684 }
685 port->stats.ms_plogi_accepts++;
686 bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK);
687 break;
688
689 case FC_ELS_LS_RJT:
690 ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
691
692 bfa_trc(port->fcs, ls_rjt->reason_code);
693 bfa_trc(port->fcs, ls_rjt->reason_code_expl);
694
695 port->stats.ms_rejects++;
696 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
697 break;
698
699 default:
700 port->stats.ms_plogi_unknown_rsp++;
701 bfa_trc(port->fcs, els_cmd->els_code);
702 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
703 }
704}
705
706static void
707bfa_fcs_port_ms_timeout(void *arg)
708{
709 struct bfa_fcs_port_ms_s *ms = (struct bfa_fcs_port_ms_s *)arg;
710
711 ms->port->stats.ms_timeouts++;
712 bfa_sm_send_event(ms, MSSM_EVENT_TIMEOUT);
713}
714
715
716void
717bfa_fcs_port_ms_init(struct bfa_fcs_port_s *port)
718{
719 struct bfa_fcs_port_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
720
721 ms->port = port;
722 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
723
724 /*
725 * Invoke init routines of sub modules.
726 */
727 bfa_fcs_port_fdmi_init(ms);
728}
729
730void
731bfa_fcs_port_ms_offline(struct bfa_fcs_port_s *port)
732{
733 struct bfa_fcs_port_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
734
735 ms->port = port;
736 bfa_sm_send_event(ms, MSSM_EVENT_PORT_OFFLINE);
737 bfa_fcs_port_fdmi_offline(ms);
738}
739
740void
741bfa_fcs_port_ms_online(struct bfa_fcs_port_s *port)
742{
743 struct bfa_fcs_port_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
744
745 ms->port = port;
746 bfa_sm_send_event(ms, MSSM_EVENT_PORT_ONLINE);
747}
748
749void
750bfa_fcs_port_ms_fabric_rscn(struct bfa_fcs_port_s *port)
751{
752 struct bfa_fcs_port_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
753
754 /*
755 * @todo. Handle this only when in Online state
756 */
757 if (bfa_sm_cmp_state(ms, bfa_fcs_port_ms_sm_online))
758 bfa_sm_send_event(ms, MSSM_EVENT_PORT_FABRIC_RSCN);
759}
diff --git a/drivers/scsi/bfa/n2n.c b/drivers/scsi/bfa/n2n.c
deleted file mode 100644
index 735456824346..000000000000
--- a/drivers/scsi/bfa/n2n.c
+++ /dev/null
@@ -1,105 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * n2n.c n2n implementation.
20 */
21#include <bfa.h>
22#include <bfa_svc.h>
23#include "fcs_lport.h"
24#include "fcs_rport.h"
25#include "fcs_trcmod.h"
26#include "lport_priv.h"
27
28BFA_TRC_FILE(FCS, N2N);
29
30/**
31 * Called by fcs/port to initialize N2N topology.
32 */
33void
34bfa_fcs_port_n2n_init(struct bfa_fcs_port_s *port)
35{
36}
37
38/**
39 * Called by fcs/port to notify transition to online state.
40 */
41void
42bfa_fcs_port_n2n_online(struct bfa_fcs_port_s *port)
43{
44 struct bfa_fcs_port_n2n_s *n2n_port = &port->port_topo.pn2n;
45 struct bfa_port_cfg_s *pcfg = &port->port_cfg;
46 struct bfa_fcs_rport_s *rport;
47
48 bfa_trc(port->fcs, pcfg->pwwn);
49
50 /*
51 * If our PWWN is > than that of the r-port, we have to initiate PLOGI
52 * and assign an Address. if not, we need to wait for its PLOGI.
53 *
54 * If our PWWN is < than that of the remote port, it will send a PLOGI
55 * with the PIDs assigned. The rport state machine take care of this
56 * incoming PLOGI.
57 */
58 if (memcmp
59 ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn,
60 sizeof(wwn_t)) > 0) {
61 port->pid = N2N_LOCAL_PID;
62 /**
63 * First, check if we know the device by pwwn.
64 */
65 rport = bfa_fcs_port_get_rport_by_pwwn(port,
66 n2n_port->rem_port_wwn);
67 if (rport) {
68 bfa_trc(port->fcs, rport->pid);
69 bfa_trc(port->fcs, rport->pwwn);
70 rport->pid = N2N_REMOTE_PID;
71 bfa_fcs_rport_online(rport);
72 return;
73 }
74
75 /*
76 * In n2n there can be only one rport. Delete the old one whose
77 * pid should be zero, because it is offline.
78 */
79 if (port->num_rports > 0) {
80 rport = bfa_fcs_port_get_rport_by_pid(port, 0);
81 bfa_assert(rport != NULL);
82 if (rport) {
83 bfa_trc(port->fcs, rport->pwwn);
84 bfa_fcs_rport_delete(rport);
85 }
86 }
87 bfa_fcs_rport_create(port, N2N_REMOTE_PID);
88 }
89}
90
91/**
92 * Called by fcs/port to notify transition to offline state.
93 */
94void
95bfa_fcs_port_n2n_offline(struct bfa_fcs_port_s *port)
96{
97 struct bfa_fcs_port_n2n_s *n2n_port = &port->port_topo.pn2n;
98
99 bfa_trc(port->fcs, port->pid);
100 port->pid = 0;
101 n2n_port->rem_port_wwn = 0;
102 n2n_port->reply_oxid = 0;
103}
104
105
diff --git a/drivers/scsi/bfa/ns.c b/drivers/scsi/bfa/ns.c
deleted file mode 100644
index ae0edcc86ed5..000000000000
--- a/drivers/scsi/bfa/ns.c
+++ /dev/null
@@ -1,1242 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * @page ns_sm_info VPORT NS State Machine
20 *
21 * @section ns_sm_interactions VPORT NS State Machine Interactions
22 *
23 * @section ns_sm VPORT NS State Machine
24 * img ns_sm.jpg
25 */
26#include <bfa.h>
27#include <bfa_svc.h>
28#include <bfa_iocfc.h>
29#include "fcs_lport.h"
30#include "fcs_rport.h"
31#include "fcs_trcmod.h"
32#include "fcs_fcxp.h"
33#include "fcs.h"
34#include "lport_priv.h"
35
36BFA_TRC_FILE(FCS, NS);
37
38/*
39 * forward declarations
40 */
41static void bfa_fcs_port_ns_send_plogi(void *ns_cbarg,
42 struct bfa_fcxp_s *fcxp_alloced);
43static void bfa_fcs_port_ns_send_rspn_id(void *ns_cbarg,
44 struct bfa_fcxp_s *fcxp_alloced);
45static void bfa_fcs_port_ns_send_rft_id(void *ns_cbarg,
46 struct bfa_fcxp_s *fcxp_alloced);
47static void bfa_fcs_port_ns_send_rff_id(void *ns_cbarg,
48 struct bfa_fcxp_s *fcxp_alloced);
49static void bfa_fcs_port_ns_send_gid_ft(void *ns_cbarg,
50 struct bfa_fcxp_s *fcxp_alloced);
51static void bfa_fcs_port_ns_timeout(void *arg);
52static void bfa_fcs_port_ns_plogi_response(void *fcsarg,
53 struct bfa_fcxp_s *fcxp,
54 void *cbarg,
55 bfa_status_t req_status,
56 u32 rsp_len,
57 u32 resid_len,
58 struct fchs_s *rsp_fchs);
59static void bfa_fcs_port_ns_rspn_id_response(void *fcsarg,
60 struct bfa_fcxp_s *fcxp,
61 void *cbarg,
62 bfa_status_t req_status,
63 u32 rsp_len,
64 u32 resid_len,
65 struct fchs_s *rsp_fchs);
66static void bfa_fcs_port_ns_rft_id_response(void *fcsarg,
67 struct bfa_fcxp_s *fcxp,
68 void *cbarg,
69 bfa_status_t req_status,
70 u32 rsp_len,
71 u32 resid_len,
72 struct fchs_s *rsp_fchs);
73static void bfa_fcs_port_ns_rff_id_response(void *fcsarg,
74 struct bfa_fcxp_s *fcxp,
75 void *cbarg,
76 bfa_status_t req_status,
77 u32 rsp_len,
78 u32 resid_len,
79 struct fchs_s *rsp_fchs);
80static void bfa_fcs_port_ns_gid_ft_response(void *fcsarg,
81 struct bfa_fcxp_s *fcxp,
82 void *cbarg,
83 bfa_status_t req_status,
84 u32 rsp_len,
85 u32 resid_len,
86 struct fchs_s *rsp_fchs);
87static void bfa_fcs_port_ns_process_gidft_pids(struct bfa_fcs_port_s *port,
88 u32 *pid_buf,
89 u32 n_pids);
90
91static void bfa_fcs_port_ns_boot_target_disc(struct bfa_fcs_port_s *port);
92/**
93 * fcs_ns_sm FCS nameserver interface state machine
94 */
95
96/**
97 * VPort NS State Machine events
98 */
99enum vport_ns_event {
100 NSSM_EVENT_PORT_ONLINE = 1,
101 NSSM_EVENT_PORT_OFFLINE = 2,
102 NSSM_EVENT_PLOGI_SENT = 3,
103 NSSM_EVENT_RSP_OK = 4,
104 NSSM_EVENT_RSP_ERROR = 5,
105 NSSM_EVENT_TIMEOUT = 6,
106 NSSM_EVENT_NS_QUERY = 7,
107 NSSM_EVENT_RSPNID_SENT = 8,
108 NSSM_EVENT_RFTID_SENT = 9,
109 NSSM_EVENT_RFFID_SENT = 10,
110 NSSM_EVENT_GIDFT_SENT = 11,
111};
112
113static void bfa_fcs_port_ns_sm_offline(struct bfa_fcs_port_ns_s *ns,
114 enum vport_ns_event event);
115static void bfa_fcs_port_ns_sm_plogi_sending(struct bfa_fcs_port_ns_s *ns,
116 enum vport_ns_event event);
117static void bfa_fcs_port_ns_sm_plogi(struct bfa_fcs_port_ns_s *ns,
118 enum vport_ns_event event);
119static void bfa_fcs_port_ns_sm_plogi_retry(struct bfa_fcs_port_ns_s *ns,
120 enum vport_ns_event event);
121static void bfa_fcs_port_ns_sm_sending_rspn_id(struct bfa_fcs_port_ns_s *ns,
122 enum vport_ns_event event);
123static void bfa_fcs_port_ns_sm_rspn_id(struct bfa_fcs_port_ns_s *ns,
124 enum vport_ns_event event);
125static void bfa_fcs_port_ns_sm_rspn_id_retry(struct bfa_fcs_port_ns_s *ns,
126 enum vport_ns_event event);
127static void bfa_fcs_port_ns_sm_sending_rft_id(struct bfa_fcs_port_ns_s *ns,
128 enum vport_ns_event event);
129static void bfa_fcs_port_ns_sm_rft_id_retry(struct bfa_fcs_port_ns_s *ns,
130 enum vport_ns_event event);
131static void bfa_fcs_port_ns_sm_rft_id(struct bfa_fcs_port_ns_s *ns,
132 enum vport_ns_event event);
133static void bfa_fcs_port_ns_sm_sending_rff_id(struct bfa_fcs_port_ns_s *ns,
134 enum vport_ns_event event);
135static void bfa_fcs_port_ns_sm_rff_id_retry(struct bfa_fcs_port_ns_s *ns,
136 enum vport_ns_event event);
137static void bfa_fcs_port_ns_sm_rff_id(struct bfa_fcs_port_ns_s *ns,
138 enum vport_ns_event event);
139static void bfa_fcs_port_ns_sm_sending_gid_ft(struct bfa_fcs_port_ns_s *ns,
140 enum vport_ns_event event);
141static void bfa_fcs_port_ns_sm_gid_ft(struct bfa_fcs_port_ns_s *ns,
142 enum vport_ns_event event);
143static void bfa_fcs_port_ns_sm_gid_ft_retry(struct bfa_fcs_port_ns_s *ns,
144 enum vport_ns_event event);
145static void bfa_fcs_port_ns_sm_online(struct bfa_fcs_port_ns_s *ns,
146 enum vport_ns_event event);
147/**
148 * Start in offline state - awaiting linkup
149 */
150static void
151bfa_fcs_port_ns_sm_offline(struct bfa_fcs_port_ns_s *ns,
152 enum vport_ns_event event)
153{
154 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
155 bfa_trc(ns->port->fcs, event);
156
157 switch (event) {
158 case NSSM_EVENT_PORT_ONLINE:
159 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_plogi_sending);
160 bfa_fcs_port_ns_send_plogi(ns, NULL);
161 break;
162
163 case NSSM_EVENT_PORT_OFFLINE:
164 break;
165
166 default:
167 bfa_sm_fault(ns->port->fcs, event);
168 }
169}
170
171static void
172bfa_fcs_port_ns_sm_plogi_sending(struct bfa_fcs_port_ns_s *ns,
173 enum vport_ns_event event)
174{
175 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
176 bfa_trc(ns->port->fcs, event);
177
178 switch (event) {
179 case NSSM_EVENT_PLOGI_SENT:
180 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_plogi);
181 break;
182
183 case NSSM_EVENT_PORT_OFFLINE:
184 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
185 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
186 &ns->fcxp_wqe);
187 break;
188
189 default:
190 bfa_sm_fault(ns->port->fcs, event);
191 }
192}
193
194static void
195bfa_fcs_port_ns_sm_plogi(struct bfa_fcs_port_ns_s *ns,
196 enum vport_ns_event event)
197{
198 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
199 bfa_trc(ns->port->fcs, event);
200
201 switch (event) {
202 case NSSM_EVENT_RSP_ERROR:
203 /*
204 * Start timer for a delayed retry
205 */
206 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_plogi_retry);
207 ns->port->stats.ns_retries++;
208 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer,
209 bfa_fcs_port_ns_timeout, ns,
210 BFA_FCS_RETRY_TIMEOUT);
211 break;
212
213 case NSSM_EVENT_RSP_OK:
214 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rspn_id);
215 bfa_fcs_port_ns_send_rspn_id(ns, NULL);
216 break;
217
218 case NSSM_EVENT_PORT_OFFLINE:
219 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
220 bfa_fcxp_discard(ns->fcxp);
221 break;
222
223 default:
224 bfa_sm_fault(ns->port->fcs, event);
225 }
226}
227
228static void
229bfa_fcs_port_ns_sm_plogi_retry(struct bfa_fcs_port_ns_s *ns,
230 enum vport_ns_event event)
231{
232 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
233 bfa_trc(ns->port->fcs, event);
234
235 switch (event) {
236 case NSSM_EVENT_TIMEOUT:
237 /*
238 * Retry Timer Expired. Re-send
239 */
240 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_plogi_sending);
241 bfa_fcs_port_ns_send_plogi(ns, NULL);
242 break;
243
244 case NSSM_EVENT_PORT_OFFLINE:
245 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
246 bfa_timer_stop(&ns->timer);
247 break;
248
249 default:
250 bfa_sm_fault(ns->port->fcs, event);
251 }
252}
253
254static void
255bfa_fcs_port_ns_sm_sending_rspn_id(struct bfa_fcs_port_ns_s *ns,
256 enum vport_ns_event event)
257{
258 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
259 bfa_trc(ns->port->fcs, event);
260
261 switch (event) {
262 case NSSM_EVENT_RSPNID_SENT:
263 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_rspn_id);
264 break;
265
266 case NSSM_EVENT_PORT_OFFLINE:
267 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
268 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
269 &ns->fcxp_wqe);
270 break;
271
272 default:
273 bfa_sm_fault(ns->port->fcs, event);
274 }
275}
276
277static void
278bfa_fcs_port_ns_sm_rspn_id(struct bfa_fcs_port_ns_s *ns,
279 enum vport_ns_event event)
280{
281 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
282 bfa_trc(ns->port->fcs, event);
283
284 switch (event) {
285 case NSSM_EVENT_RSP_ERROR:
286 /*
287 * Start timer for a delayed retry
288 */
289 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_rspn_id_retry);
290 ns->port->stats.ns_retries++;
291 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer,
292 bfa_fcs_port_ns_timeout, ns,
293 BFA_FCS_RETRY_TIMEOUT);
294 break;
295
296 case NSSM_EVENT_RSP_OK:
297 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rft_id);
298 bfa_fcs_port_ns_send_rft_id(ns, NULL);
299 break;
300
301 case NSSM_EVENT_PORT_OFFLINE:
302 bfa_fcxp_discard(ns->fcxp);
303 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
304 break;
305
306 default:
307 bfa_sm_fault(ns->port->fcs, event);
308 }
309}
310
311static void
312bfa_fcs_port_ns_sm_rspn_id_retry(struct bfa_fcs_port_ns_s *ns,
313 enum vport_ns_event event)
314{
315 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
316 bfa_trc(ns->port->fcs, event);
317
318 switch (event) {
319 case NSSM_EVENT_TIMEOUT:
320 /*
321 * Retry Timer Expired. Re-send
322 */
323 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rspn_id);
324 bfa_fcs_port_ns_send_rspn_id(ns, NULL);
325 break;
326
327 case NSSM_EVENT_PORT_OFFLINE:
328 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
329 bfa_timer_stop(&ns->timer);
330 break;
331
332 default:
333 bfa_sm_fault(ns->port->fcs, event);
334 }
335}
336
337static void
338bfa_fcs_port_ns_sm_sending_rft_id(struct bfa_fcs_port_ns_s *ns,
339 enum vport_ns_event event)
340{
341 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
342 bfa_trc(ns->port->fcs, event);
343
344 switch (event) {
345 case NSSM_EVENT_RFTID_SENT:
346 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_rft_id);
347 break;
348
349 case NSSM_EVENT_PORT_OFFLINE:
350 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
351 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
352 &ns->fcxp_wqe);
353 break;
354
355 default:
356 bfa_sm_fault(ns->port->fcs, event);
357 }
358}
359
360static void
361bfa_fcs_port_ns_sm_rft_id(struct bfa_fcs_port_ns_s *ns,
362 enum vport_ns_event event)
363{
364 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
365 bfa_trc(ns->port->fcs, event);
366
367 switch (event) {
368 case NSSM_EVENT_RSP_OK:
369 /*
370 * Now move to register FC4 Features
371 */
372 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rff_id);
373 bfa_fcs_port_ns_send_rff_id(ns, NULL);
374 break;
375
376 case NSSM_EVENT_RSP_ERROR:
377 /*
378 * Start timer for a delayed retry
379 */
380 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_rft_id_retry);
381 ns->port->stats.ns_retries++;
382 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer,
383 bfa_fcs_port_ns_timeout, ns,
384 BFA_FCS_RETRY_TIMEOUT);
385 break;
386
387 case NSSM_EVENT_PORT_OFFLINE:
388 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
389 bfa_fcxp_discard(ns->fcxp);
390 break;
391
392 default:
393 bfa_sm_fault(ns->port->fcs, event);
394 }
395}
396
397static void
398bfa_fcs_port_ns_sm_rft_id_retry(struct bfa_fcs_port_ns_s *ns,
399 enum vport_ns_event event)
400{
401 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
402 bfa_trc(ns->port->fcs, event);
403
404 switch (event) {
405 case NSSM_EVENT_TIMEOUT:
406 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rft_id);
407 bfa_fcs_port_ns_send_rft_id(ns, NULL);
408 break;
409
410 case NSSM_EVENT_PORT_OFFLINE:
411 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
412 bfa_timer_stop(&ns->timer);
413 break;
414
415 default:
416 bfa_sm_fault(ns->port->fcs, event);
417 }
418}
419
420static void
421bfa_fcs_port_ns_sm_sending_rff_id(struct bfa_fcs_port_ns_s *ns,
422 enum vport_ns_event event)
423{
424 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
425 bfa_trc(ns->port->fcs, event);
426
427 switch (event) {
428 case NSSM_EVENT_RFFID_SENT:
429 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_rff_id);
430 break;
431
432 case NSSM_EVENT_PORT_OFFLINE:
433 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
434 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
435 &ns->fcxp_wqe);
436 break;
437
438 default:
439 bfa_sm_fault(ns->port->fcs, event);
440 }
441}
442
443static void
444bfa_fcs_port_ns_sm_rff_id(struct bfa_fcs_port_ns_s *ns,
445 enum vport_ns_event event)
446{
447 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
448 bfa_trc(ns->port->fcs, event);
449
450 switch (event) {
451 case NSSM_EVENT_RSP_OK:
452
453 /*
454 * If min cfg mode is enabled, we donot initiate rport
455 * discovery with the fabric. Instead, we will retrieve the
456 * boot targets from HAL/FW.
457 */
458 if (__fcs_min_cfg(ns->port->fcs)) {
459 bfa_fcs_port_ns_boot_target_disc(ns->port);
460 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_online);
461 return;
462 }
463
464 /*
465 * If the port role is Initiator Mode issue NS query.
466 * If it is Target Mode, skip this and go to online.
467 */
468 if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) {
469 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_gid_ft);
470 bfa_fcs_port_ns_send_gid_ft(ns, NULL);
471 } else if (BFA_FCS_VPORT_IS_TARGET_MODE(ns->port)) {
472 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_online);
473 }
474 /*
475 * kick off mgmt srvr state machine
476 */
477 bfa_fcs_port_ms_online(ns->port);
478 break;
479
480 case NSSM_EVENT_RSP_ERROR:
481 /*
482 * Start timer for a delayed retry
483 */
484 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_rff_id_retry);
485 ns->port->stats.ns_retries++;
486 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer,
487 bfa_fcs_port_ns_timeout, ns,
488 BFA_FCS_RETRY_TIMEOUT);
489 break;
490
491 case NSSM_EVENT_PORT_OFFLINE:
492 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
493 bfa_fcxp_discard(ns->fcxp);
494 break;
495
496 default:
497 bfa_sm_fault(ns->port->fcs, event);
498 }
499}
500
501static void
502bfa_fcs_port_ns_sm_rff_id_retry(struct bfa_fcs_port_ns_s *ns,
503 enum vport_ns_event event)
504{
505 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
506 bfa_trc(ns->port->fcs, event);
507
508 switch (event) {
509 case NSSM_EVENT_TIMEOUT:
510 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rff_id);
511 bfa_fcs_port_ns_send_rff_id(ns, NULL);
512 break;
513
514 case NSSM_EVENT_PORT_OFFLINE:
515 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
516 bfa_timer_stop(&ns->timer);
517 break;
518
519 default:
520 bfa_sm_fault(ns->port->fcs, event);
521 }
522}
523static void
524bfa_fcs_port_ns_sm_sending_gid_ft(struct bfa_fcs_port_ns_s *ns,
525 enum vport_ns_event event)
526{
527 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
528 bfa_trc(ns->port->fcs, event);
529
530 switch (event) {
531 case NSSM_EVENT_GIDFT_SENT:
532 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_gid_ft);
533 break;
534
535 case NSSM_EVENT_PORT_OFFLINE:
536 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
537 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
538 &ns->fcxp_wqe);
539 break;
540
541 default:
542 bfa_sm_fault(ns->port->fcs, event);
543 }
544}
545
546static void
547bfa_fcs_port_ns_sm_gid_ft(struct bfa_fcs_port_ns_s *ns,
548 enum vport_ns_event event)
549{
550 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
551 bfa_trc(ns->port->fcs, event);
552
553 switch (event) {
554 case NSSM_EVENT_RSP_OK:
555 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_online);
556 break;
557
558 case NSSM_EVENT_RSP_ERROR:
559 /*
560 * TBD: for certain reject codes, we don't need to retry
561 */
562 /*
563 * Start timer for a delayed retry
564 */
565 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_gid_ft_retry);
566 ns->port->stats.ns_retries++;
567 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer,
568 bfa_fcs_port_ns_timeout, ns,
569 BFA_FCS_RETRY_TIMEOUT);
570 break;
571
572 case NSSM_EVENT_PORT_OFFLINE:
573 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
574 bfa_fcxp_discard(ns->fcxp);
575 break;
576
577 default:
578 bfa_sm_fault(ns->port->fcs, event);
579 }
580}
581
582static void
583bfa_fcs_port_ns_sm_gid_ft_retry(struct bfa_fcs_port_ns_s *ns,
584 enum vport_ns_event event)
585{
586 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
587 bfa_trc(ns->port->fcs, event);
588
589 switch (event) {
590 case NSSM_EVENT_TIMEOUT:
591 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_gid_ft);
592 bfa_fcs_port_ns_send_gid_ft(ns, NULL);
593 break;
594
595 case NSSM_EVENT_PORT_OFFLINE:
596 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
597 bfa_timer_stop(&ns->timer);
598 break;
599
600 default:
601 bfa_sm_fault(ns->port->fcs, event);
602 }
603}
604
605static void
606bfa_fcs_port_ns_sm_online(struct bfa_fcs_port_ns_s *ns,
607 enum vport_ns_event event)
608{
609 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
610 bfa_trc(ns->port->fcs, event);
611
612 switch (event) {
613 case NSSM_EVENT_PORT_OFFLINE:
614 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
615 break;
616
617 case NSSM_EVENT_NS_QUERY:
618 /*
619 * If the port role is Initiator Mode issue NS query.
620 * If it is Target Mode, skip this and go to online.
621 */
622 if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) {
623 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_gid_ft);
624 bfa_fcs_port_ns_send_gid_ft(ns, NULL);
625 };
626 break;
627
628 default:
629 bfa_sm_fault(ns->port->fcs, event);
630 }
631}
632
633
634
635/**
636 * ns_pvt Nameserver local functions
637 */
638
639static void
640bfa_fcs_port_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
641{
642 struct bfa_fcs_port_ns_s *ns = ns_cbarg;
643 struct bfa_fcs_port_s *port = ns->port;
644 struct fchs_s fchs;
645 int len;
646 struct bfa_fcxp_s *fcxp;
647
648 bfa_trc(port->fcs, port->pid);
649
650 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
651 if (!fcxp) {
652 port->stats.ns_plogi_alloc_wait++;
653 bfa_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
654 bfa_fcs_port_ns_send_plogi, ns);
655 return;
656 }
657 ns->fcxp = fcxp;
658
659 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
660 bfa_os_hton3b(FC_NAME_SERVER),
661 bfa_fcs_port_get_fcid(port), 0,
662 port->port_cfg.pwwn, port->port_cfg.nwwn,
663 bfa_fcport_get_maxfrsize(port->fcs->bfa));
664
665 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
666 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_plogi_response,
667 (void *)ns, FC_MAX_PDUSZ, FC_ELS_TOV);
668 port->stats.ns_plogi_sent++;
669
670 bfa_sm_send_event(ns, NSSM_EVENT_PLOGI_SENT);
671}
672
673static void
674bfa_fcs_port_ns_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
675 void *cbarg, bfa_status_t req_status,
676 u32 rsp_len, u32 resid_len,
677 struct fchs_s *rsp_fchs)
678{
679 struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)cbarg;
680 struct bfa_fcs_port_s *port = ns->port;
681 /* struct fc_logi_s *plogi_resp; */
682 struct fc_els_cmd_s *els_cmd;
683 struct fc_ls_rjt_s *ls_rjt;
684
685 bfa_trc(port->fcs, req_status);
686 bfa_trc(port->fcs, port->port_cfg.pwwn);
687
688 /*
689 * Sanity Checks
690 */
691 if (req_status != BFA_STATUS_OK) {
692 bfa_trc(port->fcs, req_status);
693 port->stats.ns_plogi_rsp_err++;
694 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
695 return;
696 }
697
698 els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
699
700 switch (els_cmd->els_code) {
701
702 case FC_ELS_ACC:
703 if (rsp_len < sizeof(struct fc_logi_s)) {
704 bfa_trc(port->fcs, rsp_len);
705 port->stats.ns_plogi_acc_err++;
706 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
707 break;
708 }
709 port->stats.ns_plogi_accepts++;
710 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
711 break;
712
713 case FC_ELS_LS_RJT:
714 ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
715
716 bfa_trc(port->fcs, ls_rjt->reason_code);
717 bfa_trc(port->fcs, ls_rjt->reason_code_expl);
718
719 port->stats.ns_rejects++;
720
721 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
722 break;
723
724 default:
725 port->stats.ns_plogi_unknown_rsp++;
726 bfa_trc(port->fcs, els_cmd->els_code);
727 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
728 }
729}
730
731/**
732 * Register the symbolic port name.
733 */
734static void
735bfa_fcs_port_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
736{
737 struct bfa_fcs_port_ns_s *ns = ns_cbarg;
738 struct bfa_fcs_port_s *port = ns->port;
739 struct fchs_s fchs;
740 int len;
741 struct bfa_fcxp_s *fcxp;
742 u8 symbl[256];
743 u8 *psymbl = &symbl[0];
744
745 bfa_os_memset(symbl, 0, sizeof(symbl));
746
747 bfa_trc(port->fcs, port->port_cfg.pwwn);
748
749 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
750 if (!fcxp) {
751 port->stats.ns_rspnid_alloc_wait++;
752 bfa_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
753 bfa_fcs_port_ns_send_rspn_id, ns);
754 return;
755 }
756 ns->fcxp = fcxp;
757
758 /*
759 * for V-Port, form a Port Symbolic Name
760 */
761 if (port->vport) {
762 /**For Vports,
763 * we append the vport's port symbolic name to that of the base port.
764 */
765
766 strncpy((char *)psymbl,
767 (char *)
768 &(bfa_fcs_port_get_psym_name
769 (bfa_fcs_get_base_port(port->fcs))),
770 strlen((char *)
771 &bfa_fcs_port_get_psym_name(bfa_fcs_get_base_port
772 (port->fcs))));
773
774 /*
775 * Ensure we have a null terminating string.
776 */
777 ((char *)
778 psymbl)[strlen((char *)
779 &bfa_fcs_port_get_psym_name
780 (bfa_fcs_get_base_port(port->fcs)))] = 0;
781
782 strncat((char *)psymbl,
783 (char *)&(bfa_fcs_port_get_psym_name(port)),
784 strlen((char *)&bfa_fcs_port_get_psym_name(port)));
785 } else {
786 psymbl = (u8 *) &(bfa_fcs_port_get_psym_name(port));
787 }
788
789 len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
790 bfa_fcs_port_get_fcid(port), 0, psymbl);
791
792 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
793 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_rspn_id_response,
794 (void *)ns, FC_MAX_PDUSZ, FC_FCCT_TOV);
795
796 port->stats.ns_rspnid_sent++;
797
798 bfa_sm_send_event(ns, NSSM_EVENT_RSPNID_SENT);
799}
800
801static void
802bfa_fcs_port_ns_rspn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
803 void *cbarg, bfa_status_t req_status,
804 u32 rsp_len, u32 resid_len,
805 struct fchs_s *rsp_fchs)
806{
807 struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)cbarg;
808 struct bfa_fcs_port_s *port = ns->port;
809 struct ct_hdr_s *cthdr = NULL;
810
811 bfa_trc(port->fcs, port->port_cfg.pwwn);
812
813 /*
814 * Sanity Checks
815 */
816 if (req_status != BFA_STATUS_OK) {
817 bfa_trc(port->fcs, req_status);
818 port->stats.ns_rspnid_rsp_err++;
819 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
820 return;
821 }
822
823 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
824 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
825
826 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
827 port->stats.ns_rspnid_accepts++;
828 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
829 return;
830 }
831
832 port->stats.ns_rspnid_rejects++;
833 bfa_trc(port->fcs, cthdr->reason_code);
834 bfa_trc(port->fcs, cthdr->exp_code);
835 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
836}
837
838/**
839 * Register FC4-Types
840 * TBD, Need to retrieve this from the OS driver, in case IPFC is enabled ?
841 */
842static void
843bfa_fcs_port_ns_send_rft_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
844{
845 struct bfa_fcs_port_ns_s *ns = ns_cbarg;
846 struct bfa_fcs_port_s *port = ns->port;
847 struct fchs_s fchs;
848 int len;
849 struct bfa_fcxp_s *fcxp;
850
851 bfa_trc(port->fcs, port->port_cfg.pwwn);
852
853 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
854 if (!fcxp) {
855 port->stats.ns_rftid_alloc_wait++;
856 bfa_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
857 bfa_fcs_port_ns_send_rft_id, ns);
858 return;
859 }
860 ns->fcxp = fcxp;
861
862 len = fc_rftid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
863 bfa_fcs_port_get_fcid(port), 0,
864 port->port_cfg.roles);
865
866 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
867 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_rft_id_response,
868 (void *)ns, FC_MAX_PDUSZ, FC_FCCT_TOV);
869
870 port->stats.ns_rftid_sent++;
871 bfa_sm_send_event(ns, NSSM_EVENT_RFTID_SENT);
872}
873
874static void
875bfa_fcs_port_ns_rft_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
876 void *cbarg, bfa_status_t req_status,
877 u32 rsp_len, u32 resid_len,
878 struct fchs_s *rsp_fchs)
879{
880 struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)cbarg;
881 struct bfa_fcs_port_s *port = ns->port;
882 struct ct_hdr_s *cthdr = NULL;
883
884 bfa_trc(port->fcs, port->port_cfg.pwwn);
885
886 /*
887 * Sanity Checks
888 */
889 if (req_status != BFA_STATUS_OK) {
890 bfa_trc(port->fcs, req_status);
891 port->stats.ns_rftid_rsp_err++;
892 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
893 return;
894 }
895
896 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
897 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
898
899 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
900 port->stats.ns_rftid_accepts++;
901 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
902 return;
903 }
904
905 port->stats.ns_rftid_rejects++;
906 bfa_trc(port->fcs, cthdr->reason_code);
907 bfa_trc(port->fcs, cthdr->exp_code);
908 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
909}
910
911/**
912* Register FC4-Features : Should be done after RFT_ID
913 */
914static void
915bfa_fcs_port_ns_send_rff_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
916{
917 struct bfa_fcs_port_ns_s *ns = ns_cbarg;
918 struct bfa_fcs_port_s *port = ns->port;
919 struct fchs_s fchs;
920 int len;
921 struct bfa_fcxp_s *fcxp;
922 u8 fc4_ftrs = 0;
923
924 bfa_trc(port->fcs, port->port_cfg.pwwn);
925
926 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
927 if (!fcxp) {
928 port->stats.ns_rffid_alloc_wait++;
929 bfa_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
930 bfa_fcs_port_ns_send_rff_id, ns);
931 return;
932 }
933 ns->fcxp = fcxp;
934
935 if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port))
936 fc4_ftrs = FC_GS_FCP_FC4_FEATURE_INITIATOR;
937 else if (BFA_FCS_VPORT_IS_TARGET_MODE(ns->port))
938 fc4_ftrs = FC_GS_FCP_FC4_FEATURE_TARGET;
939
940 len = fc_rffid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
941 bfa_fcs_port_get_fcid(port), 0, FC_TYPE_FCP,
942 fc4_ftrs);
943
944 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
945 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_rff_id_response,
946 (void *)ns, FC_MAX_PDUSZ, FC_FCCT_TOV);
947
948 port->stats.ns_rffid_sent++;
949 bfa_sm_send_event(ns, NSSM_EVENT_RFFID_SENT);
950}
951
952static void
953bfa_fcs_port_ns_rff_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
954 void *cbarg, bfa_status_t req_status,
955 u32 rsp_len, u32 resid_len,
956 struct fchs_s *rsp_fchs)
957{
958 struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)cbarg;
959 struct bfa_fcs_port_s *port = ns->port;
960 struct ct_hdr_s *cthdr = NULL;
961
962 bfa_trc(port->fcs, port->port_cfg.pwwn);
963
964 /*
965 * Sanity Checks
966 */
967 if (req_status != BFA_STATUS_OK) {
968 bfa_trc(port->fcs, req_status);
969 port->stats.ns_rffid_rsp_err++;
970 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
971 return;
972 }
973
974 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
975 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
976
977 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
978 port->stats.ns_rffid_accepts++;
979 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
980 return;
981 }
982
983 port->stats.ns_rffid_rejects++;
984 bfa_trc(port->fcs, cthdr->reason_code);
985 bfa_trc(port->fcs, cthdr->exp_code);
986
987 if (cthdr->reason_code == CT_RSN_NOT_SUPP) {
988 /*
989 * if this command is not supported, we don't retry
990 */
991 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
992 } else {
993 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
994 }
995}
996
997/**
998 * Query Fabric for FC4-Types Devices.
999 *
1000* TBD : Need to use a local (FCS private) response buffer, since the response
1001 * can be larger than 2K.
1002 */
1003static void
1004bfa_fcs_port_ns_send_gid_ft(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1005{
1006 struct bfa_fcs_port_ns_s *ns = ns_cbarg;
1007 struct bfa_fcs_port_s *port = ns->port;
1008 struct fchs_s fchs;
1009 int len;
1010 struct bfa_fcxp_s *fcxp;
1011
1012 bfa_trc(port->fcs, port->pid);
1013
1014 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
1015 if (!fcxp) {
1016 port->stats.ns_gidft_alloc_wait++;
1017 bfa_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
1018 bfa_fcs_port_ns_send_gid_ft, ns);
1019 return;
1020 }
1021 ns->fcxp = fcxp;
1022
1023 /*
1024 * This query is only initiated for FCP initiator mode.
1025 */
1026 len = fc_gid_ft_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), ns->port->pid,
1027 FC_TYPE_FCP);
1028
1029 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1030 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_gid_ft_response,
1031 (void *)ns, bfa_fcxp_get_maxrsp(port->fcs->bfa),
1032 FC_FCCT_TOV);
1033
1034 port->stats.ns_gidft_sent++;
1035
1036 bfa_sm_send_event(ns, NSSM_EVENT_GIDFT_SENT);
1037}
1038
1039static void
1040bfa_fcs_port_ns_gid_ft_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
1041 void *cbarg, bfa_status_t req_status,
1042 u32 rsp_len, u32 resid_len,
1043 struct fchs_s *rsp_fchs)
1044{
1045 struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)cbarg;
1046 struct bfa_fcs_port_s *port = ns->port;
1047 struct ct_hdr_s *cthdr = NULL;
1048 u32 n_pids;
1049
1050 bfa_trc(port->fcs, port->port_cfg.pwwn);
1051
1052 /*
1053 * Sanity Checks
1054 */
1055 if (req_status != BFA_STATUS_OK) {
1056 bfa_trc(port->fcs, req_status);
1057 port->stats.ns_gidft_rsp_err++;
1058 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
1059 return;
1060 }
1061
1062 if (resid_len != 0) {
1063 /*
1064 * TBD : we will need to allocate a larger buffer & retry the
1065 * command
1066 */
1067 bfa_trc(port->fcs, rsp_len);
1068 bfa_trc(port->fcs, resid_len);
1069 return;
1070 }
1071
1072 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
1073 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
1074
1075 switch (cthdr->cmd_rsp_code) {
1076
1077 case CT_RSP_ACCEPT:
1078
1079 port->stats.ns_gidft_accepts++;
1080 n_pids = (fc_get_ctresp_pyld_len(rsp_len) / sizeof(u32));
1081 bfa_trc(port->fcs, n_pids);
1082 bfa_fcs_port_ns_process_gidft_pids(port,
1083 (u32 *) (cthdr + 1),
1084 n_pids);
1085 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
1086 break;
1087
1088 case CT_RSP_REJECT:
1089
1090 /*
1091 * Check the reason code & explanation.
1092 * There may not have been any FC4 devices in the fabric
1093 */
1094 port->stats.ns_gidft_rejects++;
1095 bfa_trc(port->fcs, cthdr->reason_code);
1096 bfa_trc(port->fcs, cthdr->exp_code);
1097
1098 if ((cthdr->reason_code == CT_RSN_UNABLE_TO_PERF)
1099 && (cthdr->exp_code == CT_NS_EXP_FT_NOT_REG)) {
1100
1101 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
1102 } else {
1103 /*
1104 * for all other errors, retry
1105 */
1106 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
1107 }
1108 break;
1109
1110 default:
1111 port->stats.ns_gidft_unknown_rsp++;
1112 bfa_trc(port->fcs, cthdr->cmd_rsp_code);
1113 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
1114 }
1115}
1116
1117/**
1118 * This routine will be called by bfa_timer on timer timeouts.
1119 *
1120 * param[in] port - pointer to bfa_fcs_port_t.
1121 *
1122 * return
1123 * void
1124 *
1125* Special Considerations:
1126 *
1127 * note
1128 */
1129static void
1130bfa_fcs_port_ns_timeout(void *arg)
1131{
1132 struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)arg;
1133
1134 ns->port->stats.ns_timeouts++;
1135 bfa_sm_send_event(ns, NSSM_EVENT_TIMEOUT);
1136}
1137
1138/*
1139 * Process the PID list in GID_FT response
1140 */
1141static void
1142bfa_fcs_port_ns_process_gidft_pids(struct bfa_fcs_port_s *port,
1143 u32 *pid_buf, u32 n_pids)
1144{
1145 struct fcgs_gidft_resp_s *gidft_entry;
1146 struct bfa_fcs_rport_s *rport;
1147 u32 ii;
1148
1149 for (ii = 0; ii < n_pids; ii++) {
1150 gidft_entry = (struct fcgs_gidft_resp_s *) &pid_buf[ii];
1151
1152 if (gidft_entry->pid == port->pid)
1153 continue;
1154
1155 /*
1156 * Check if this rport already exists
1157 */
1158 rport = bfa_fcs_port_get_rport_by_pid(port, gidft_entry->pid);
1159 if (rport == NULL) {
1160 /*
1161 * this is a new device. create rport
1162 */
1163 rport = bfa_fcs_rport_create(port, gidft_entry->pid);
1164 } else {
1165 /*
1166 * this rport already exists
1167 */
1168 bfa_fcs_rport_scn(rport);
1169 }
1170
1171 bfa_trc(port->fcs, gidft_entry->pid);
1172
1173 /*
1174 * if the last entry bit is set, bail out.
1175 */
1176 if (gidft_entry->last)
1177 return;
1178 }
1179}
1180
1181/**
1182 * fcs_ns_public FCS nameserver public interfaces
1183 */
1184
1185/*
1186 * Functions called by port/fab.
1187 * These will send relevant Events to the ns state machine.
1188 */
1189void
1190bfa_fcs_port_ns_init(struct bfa_fcs_port_s *port)
1191{
1192 struct bfa_fcs_port_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
1193
1194 ns->port = port;
1195 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
1196}
1197
1198void
1199bfa_fcs_port_ns_offline(struct bfa_fcs_port_s *port)
1200{
1201 struct bfa_fcs_port_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
1202
1203 ns->port = port;
1204 bfa_sm_send_event(ns, NSSM_EVENT_PORT_OFFLINE);
1205}
1206
1207void
1208bfa_fcs_port_ns_online(struct bfa_fcs_port_s *port)
1209{
1210 struct bfa_fcs_port_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
1211
1212 ns->port = port;
1213 bfa_sm_send_event(ns, NSSM_EVENT_PORT_ONLINE);
1214}
1215
1216void
1217bfa_fcs_port_ns_query(struct bfa_fcs_port_s *port)
1218{
1219 struct bfa_fcs_port_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
1220
1221 bfa_trc(port->fcs, port->pid);
1222 bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY);
1223}
1224
1225static void
1226bfa_fcs_port_ns_boot_target_disc(struct bfa_fcs_port_s *port)
1227{
1228
1229 struct bfa_fcs_rport_s *rport;
1230 u8 nwwns;
1231 wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX];
1232 int ii;
1233
1234 bfa_iocfc_get_bootwwns(port->fcs->bfa, &nwwns, wwns);
1235
1236 for (ii = 0; ii < nwwns; ++ii) {
1237 rport = bfa_fcs_rport_create_by_wwn(port, wwns[ii]);
1238 bfa_assert(rport);
1239 }
1240}
1241
1242
diff --git a/drivers/scsi/bfa/plog.c b/drivers/scsi/bfa/plog.c
deleted file mode 100644
index fcb8864d3276..000000000000
--- a/drivers/scsi/bfa/plog.c
+++ /dev/null
@@ -1,184 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa_os_inc.h>
19#include <cs/bfa_plog.h>
20#include <cs/bfa_debug.h>
21
22static int
23plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
24{
25 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT)
26 && (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
27 return 1;
28
29 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT)
30 && (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
31 return 1;
32
33 return 0;
34}
35
36static void
37bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
38{
39 u16 tail;
40 struct bfa_plog_rec_s *pl_recp;
41
42 if (plog->plog_enabled == 0)
43 return;
44
45 if (plkd_validate_logrec(pl_rec)) {
46 bfa_assert(0);
47 return;
48 }
49
50 tail = plog->tail;
51
52 pl_recp = &(plog->plog_recs[tail]);
53
54 bfa_os_memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
55
56 pl_recp->tv = BFA_TRC_TS(plog);
57 BFA_PL_LOG_REC_INCR(plog->tail);
58
59 if (plog->head == plog->tail)
60 BFA_PL_LOG_REC_INCR(plog->head);
61}
62
63void
64bfa_plog_init(struct bfa_plog_s *plog)
65{
66 bfa_os_memset((char *)plog, 0, sizeof(struct bfa_plog_s));
67
68 bfa_os_memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
69 plog->head = plog->tail = 0;
70 plog->plog_enabled = 1;
71}
72
73void
74bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
75 enum bfa_plog_eid event,
76 u16 misc, char *log_str)
77{
78 struct bfa_plog_rec_s lp;
79
80 if (plog->plog_enabled) {
81 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
82 lp.mid = mid;
83 lp.eid = event;
84 lp.log_type = BFA_PL_LOG_TYPE_STRING;
85 lp.misc = misc;
86 strncpy(lp.log_entry.string_log, log_str,
87 BFA_PL_STRING_LOG_SZ - 1);
88 lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
89 bfa_plog_add(plog, &lp);
90 }
91}
92
93void
94bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
95 enum bfa_plog_eid event,
96 u16 misc, u32 *intarr, u32 num_ints)
97{
98 struct bfa_plog_rec_s lp;
99 u32 i;
100
101 if (num_ints > BFA_PL_INT_LOG_SZ)
102 num_ints = BFA_PL_INT_LOG_SZ;
103
104 if (plog->plog_enabled) {
105 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
106 lp.mid = mid;
107 lp.eid = event;
108 lp.log_type = BFA_PL_LOG_TYPE_INT;
109 lp.misc = misc;
110
111 for (i = 0; i < num_ints; i++)
112 bfa_os_assign(lp.log_entry.int_log[i],
113 intarr[i]);
114
115 lp.log_num_ints = (u8) num_ints;
116
117 bfa_plog_add(plog, &lp);
118 }
119}
120
121void
122bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
123 enum bfa_plog_eid event,
124 u16 misc, struct fchs_s *fchdr)
125{
126 struct bfa_plog_rec_s lp;
127 u32 *tmp_int = (u32 *) fchdr;
128 u32 ints[BFA_PL_INT_LOG_SZ];
129
130 if (plog->plog_enabled) {
131 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
132
133 ints[0] = tmp_int[0];
134 ints[1] = tmp_int[1];
135 ints[2] = tmp_int[4];
136
137 bfa_plog_intarr(plog, mid, event, misc, ints, 3);
138 }
139}
140
141void
142bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
143 enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
144 u32 pld_w0)
145{
146 struct bfa_plog_rec_s lp;
147 u32 *tmp_int = (u32 *) fchdr;
148 u32 ints[BFA_PL_INT_LOG_SZ];
149
150 if (plog->plog_enabled) {
151 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
152
153 ints[0] = tmp_int[0];
154 ints[1] = tmp_int[1];
155 ints[2] = tmp_int[4];
156 ints[3] = pld_w0;
157
158 bfa_plog_intarr(plog, mid, event, misc, ints, 4);
159 }
160}
161
162void
163bfa_plog_clear(struct bfa_plog_s *plog)
164{
165 plog->head = plog->tail = 0;
166}
167
168void
169bfa_plog_enable(struct bfa_plog_s *plog)
170{
171 plog->plog_enabled = 1;
172}
173
174void
175bfa_plog_disable(struct bfa_plog_s *plog)
176{
177 plog->plog_enabled = 0;
178}
179
180bfa_boolean_t
181bfa_plog_get_setting(struct bfa_plog_s *plog)
182{
183 return (bfa_boolean_t)plog->plog_enabled;
184}
diff --git a/drivers/scsi/bfa/rport_api.c b/drivers/scsi/bfa/rport_api.c
deleted file mode 100644
index 15e0c470afd9..000000000000
--- a/drivers/scsi/bfa/rport_api.c
+++ /dev/null
@@ -1,185 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#include <bfa.h>
18#include <bfa_svc.h>
19#include "fcs_vport.h"
20#include "fcs_lport.h"
21#include "fcs_rport.h"
22#include "fcs_trcmod.h"
23
24BFA_TRC_FILE(FCS, RPORT_API);
25
26/**
27 * rport_api.c Remote port implementation.
28 */
29
30/**
31 * fcs_rport_api FCS rport API.
32 */
33
34/**
35 * Direct API to add a target by port wwn. This interface is used, for
36 * example, by bios when target pwwn is known from boot lun configuration.
37 */
38bfa_status_t
39bfa_fcs_rport_add(struct bfa_fcs_port_s *port, wwn_t *pwwn,
40 struct bfa_fcs_rport_s *rport,
41 struct bfad_rport_s *rport_drv)
42{
43 bfa_trc(port->fcs, *pwwn);
44
45 return BFA_STATUS_OK;
46}
47
48/**
49 * Direct API to remove a target and its associated resources. This
50 * interface is used, for example, by vmware driver to remove target
51 * ports from the target list for a VM.
52 */
53bfa_status_t
54bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport_in)
55{
56
57 struct bfa_fcs_rport_s *rport;
58
59 bfa_trc(rport_in->fcs, rport_in->pwwn);
60
61 rport = bfa_fcs_port_get_rport_by_pwwn(rport_in->port, rport_in->pwwn);
62 if (rport == NULL) {
63 /*
64 * TBD Error handling
65 */
66 bfa_trc(rport_in->fcs, rport_in->pid);
67 return BFA_STATUS_UNKNOWN_RWWN;
68 }
69
70 /*
71 * TBD if this remote port is online, send a logo
72 */
73 return BFA_STATUS_OK;
74
75}
76
77/**
78 * Remote device status for display/debug.
79 */
80void
81bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
82 struct bfa_rport_attr_s *rport_attr)
83{
84 struct bfa_rport_qos_attr_s qos_attr;
85 struct bfa_fcs_port_s *port = rport->port;
86 enum bfa_pport_speed rport_speed = rport->rpf.rpsc_speed;
87
88 bfa_os_memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
89
90 rport_attr->pid = rport->pid;
91 rport_attr->pwwn = rport->pwwn;
92 rport_attr->nwwn = rport->nwwn;
93 rport_attr->cos_supported = rport->fc_cos;
94 rport_attr->df_sz = rport->maxfrsize;
95 rport_attr->state = bfa_fcs_rport_get_state(rport);
96 rport_attr->fc_cos = rport->fc_cos;
97 rport_attr->cisc = rport->cisc;
98 rport_attr->scsi_function = rport->scsi_function;
99 rport_attr->curr_speed = rport->rpf.rpsc_speed;
100 rport_attr->assigned_speed = rport->rpf.assigned_speed;
101
102 bfa_rport_get_qos_attr(rport->bfa_rport, &qos_attr);
103 rport_attr->qos_attr = qos_attr;
104
105 rport_attr->trl_enforced = BFA_FALSE;
106
107 if (bfa_fcport_is_ratelim(port->fcs->bfa)) {
108 if (rport_speed == BFA_PPORT_SPEED_UNKNOWN) {
109 /* Use default ratelim speed setting */
110 rport_speed =
111 bfa_fcport_get_ratelim_speed(rport->fcs->bfa);
112 }
113 if (rport_speed < bfa_fcs_port_get_rport_max_speed(port))
114 rport_attr->trl_enforced = BFA_TRUE;
115 }
116
117 /*
118 * TODO
119 * rport->symname
120 */
121}
122
123/**
124 * Per remote device statistics.
125 */
126void
127bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport,
128 struct bfa_rport_stats_s *stats)
129{
130 *stats = rport->stats;
131}
132
133void
134bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport)
135{
136 bfa_os_memset((char *)&rport->stats, 0,
137 sizeof(struct bfa_rport_stats_s));
138}
139
140struct bfa_fcs_rport_s *
141bfa_fcs_rport_lookup(struct bfa_fcs_port_s *port, wwn_t rpwwn)
142{
143 struct bfa_fcs_rport_s *rport;
144
145 rport = bfa_fcs_port_get_rport_by_pwwn(port, rpwwn);
146 if (rport == NULL) {
147 /*
148 * TBD Error handling
149 */
150 }
151
152 return rport;
153}
154
155struct bfa_fcs_rport_s *
156bfa_fcs_rport_lookup_by_nwwn(struct bfa_fcs_port_s *port, wwn_t rnwwn)
157{
158 struct bfa_fcs_rport_s *rport;
159
160 rport = bfa_fcs_port_get_rport_by_nwwn(port, rnwwn);
161 if (rport == NULL) {
162 /*
163 * TBD Error handling
164 */
165 }
166
167 return rport;
168}
169
170/*
171 * This API is to set the Rport's speed. Should be used when RPSC is not
172 * supported by the rport.
173 */
174void
175bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport,
176 enum bfa_pport_speed speed)
177{
178 rport->rpf.assigned_speed = speed;
179
180 /* Set this speed in f/w only if the RPSC speed is not available */
181 if (rport->rpf.rpsc_speed == BFA_PPORT_SPEED_UNKNOWN)
182 bfa_rport_speed(rport->bfa_rport, speed);
183}
184
185
diff --git a/drivers/scsi/bfa/rport_ftrs.c b/drivers/scsi/bfa/rport_ftrs.c
deleted file mode 100644
index f2a9361ce9a4..000000000000
--- a/drivers/scsi/bfa/rport_ftrs.c
+++ /dev/null
@@ -1,379 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * rport_ftrs.c Remote port features (RPF) implementation.
20 */
21
22#include <bfa.h>
23#include <bfa_svc.h>
24#include "fcbuild.h"
25#include "fcs_rport.h"
26#include "fcs_lport.h"
27#include "fcs_trcmod.h"
28#include "fcs_fcxp.h"
29#include "fcs.h"
30
31BFA_TRC_FILE(FCS, RPORT_FTRS);
32
33#define BFA_FCS_RPF_RETRIES (3)
34#define BFA_FCS_RPF_RETRY_TIMEOUT (1000) /* 1 sec (In millisecs) */
35
36static void bfa_fcs_rpf_send_rpsc2(void *rport_cbarg,
37 struct bfa_fcxp_s *fcxp_alloced);
38static void bfa_fcs_rpf_rpsc2_response(void *fcsarg,
39 struct bfa_fcxp_s *fcxp, void *cbarg,
40 bfa_status_t req_status, u32 rsp_len,
41 u32 resid_len,
42 struct fchs_s *rsp_fchs);
43static void bfa_fcs_rpf_timeout(void *arg);
44
45/**
46 * fcs_rport_ftrs_sm FCS rport state machine events
47 */
48
49enum rpf_event {
50 RPFSM_EVENT_RPORT_OFFLINE = 1, /* Rport offline */
51 RPFSM_EVENT_RPORT_ONLINE = 2, /* Rport online */
52 RPFSM_EVENT_FCXP_SENT = 3, /* Frame from has been sent */
53 RPFSM_EVENT_TIMEOUT = 4, /* Rport SM timeout event */
54 RPFSM_EVENT_RPSC_COMP = 5,
55 RPFSM_EVENT_RPSC_FAIL = 6,
56 RPFSM_EVENT_RPSC_ERROR = 7,
57};
58
59static void bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf,
60 enum rpf_event event);
61static void bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf,
62 enum rpf_event event);
63static void bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf,
64 enum rpf_event event);
65static void bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf,
66 enum rpf_event event);
67static void bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf,
68 enum rpf_event event);
69static void bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf,
70 enum rpf_event event);
71
72static void
73bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
74{
75 struct bfa_fcs_rport_s *rport = rpf->rport;
76 struct bfa_fcs_fabric_s *fabric = &rport->fcs->fabric;
77
78 bfa_trc(rport->fcs, rport->pwwn);
79 bfa_trc(rport->fcs, rport->pid);
80 bfa_trc(rport->fcs, event);
81
82 switch (event) {
83 case RPFSM_EVENT_RPORT_ONLINE:
84 /* Send RPSC2 to a Brocade fabric only. */
85 if ((!BFA_FCS_PID_IS_WKA(rport->pid)) &&
86 ((bfa_lps_is_brcd_fabric(rport->port->fabric->lps)) ||
87 (bfa_fcs_fabric_get_switch_oui(fabric) ==
88 BFA_FCS_BRCD_SWITCH_OUI))) {
89 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
90 rpf->rpsc_retries = 0;
91 bfa_fcs_rpf_send_rpsc2(rpf, NULL);
92 }
93 break;
94
95 case RPFSM_EVENT_RPORT_OFFLINE:
96 break;
97
98 default:
99 bfa_sm_fault(rport->fcs, event);
100 }
101}
102
103static void
104bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
105{
106 struct bfa_fcs_rport_s *rport = rpf->rport;
107
108 bfa_trc(rport->fcs, event);
109
110 switch (event) {
111 case RPFSM_EVENT_FCXP_SENT:
112 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc);
113 break;
114
115 case RPFSM_EVENT_RPORT_OFFLINE:
116 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
117 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rpf->fcxp_wqe);
118 rpf->rpsc_retries = 0;
119 break;
120
121 default:
122 bfa_sm_fault(rport->fcs, event);
123 }
124}
125
126static void
127bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
128{
129 struct bfa_fcs_rport_s *rport = rpf->rport;
130
131 bfa_trc(rport->fcs, rport->pid);
132 bfa_trc(rport->fcs, event);
133
134 switch (event) {
135 case RPFSM_EVENT_RPSC_COMP:
136 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online);
137 /* Update speed info in f/w via BFA */
138 if (rpf->rpsc_speed != BFA_PPORT_SPEED_UNKNOWN)
139 bfa_rport_speed(rport->bfa_rport, rpf->rpsc_speed);
140 else if (rpf->assigned_speed != BFA_PPORT_SPEED_UNKNOWN)
141 bfa_rport_speed(rport->bfa_rport, rpf->assigned_speed);
142 break;
143
144 case RPFSM_EVENT_RPSC_FAIL:
145 /* RPSC not supported by rport */
146 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online);
147 break;
148
149 case RPFSM_EVENT_RPSC_ERROR:
150 /* need to retry...delayed a bit. */
151 if (rpf->rpsc_retries++ < BFA_FCS_RPF_RETRIES) {
152 bfa_timer_start(rport->fcs->bfa, &rpf->timer,
153 bfa_fcs_rpf_timeout, rpf,
154 BFA_FCS_RPF_RETRY_TIMEOUT);
155 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_retry);
156 } else {
157 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online);
158 }
159 break;
160
161 case RPFSM_EVENT_RPORT_OFFLINE:
162 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
163 bfa_fcxp_discard(rpf->fcxp);
164 rpf->rpsc_retries = 0;
165 break;
166
167 default:
168 bfa_sm_fault(rport->fcs, event);
169 }
170}
171
172static void
173bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
174{
175 struct bfa_fcs_rport_s *rport = rpf->rport;
176
177 bfa_trc(rport->fcs, rport->pid);
178 bfa_trc(rport->fcs, event);
179
180 switch (event) {
181 case RPFSM_EVENT_TIMEOUT:
182 /* re-send the RPSC */
183 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
184 bfa_fcs_rpf_send_rpsc2(rpf, NULL);
185 break;
186
187 case RPFSM_EVENT_RPORT_OFFLINE:
188 bfa_timer_stop(&rpf->timer);
189 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
190 rpf->rpsc_retries = 0;
191 break;
192
193 default:
194 bfa_sm_fault(rport->fcs, event);
195 }
196}
197
198static void
199bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
200{
201 struct bfa_fcs_rport_s *rport = rpf->rport;
202
203 bfa_trc(rport->fcs, rport->pwwn);
204 bfa_trc(rport->fcs, rport->pid);
205 bfa_trc(rport->fcs, event);
206
207 switch (event) {
208 case RPFSM_EVENT_RPORT_OFFLINE:
209 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
210 rpf->rpsc_retries = 0;
211 break;
212
213 default:
214 bfa_sm_fault(rport->fcs, event);
215 }
216}
217
218static void
219bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
220{
221 struct bfa_fcs_rport_s *rport = rpf->rport;
222
223 bfa_trc(rport->fcs, rport->pwwn);
224 bfa_trc(rport->fcs, rport->pid);
225 bfa_trc(rport->fcs, event);
226
227 switch (event) {
228 case RPFSM_EVENT_RPORT_ONLINE:
229 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
230 bfa_fcs_rpf_send_rpsc2(rpf, NULL);
231 break;
232
233 case RPFSM_EVENT_RPORT_OFFLINE:
234 break;
235
236 default:
237 bfa_sm_fault(rport->fcs, event);
238 }
239}
240/**
241 * Called when Rport is created.
242 */
243void bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport)
244{
245 struct bfa_fcs_rpf_s *rpf = &rport->rpf;
246
247 bfa_trc(rport->fcs, rport->pid);
248 rpf->rport = rport;
249
250 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_uninit);
251}
252
253/**
254 * Called when Rport becomes online
255 */
256void bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport)
257{
258 bfa_trc(rport->fcs, rport->pid);
259
260 if (__fcs_min_cfg(rport->port->fcs))
261 return;
262
263 if (bfa_fcs_fabric_is_switched(rport->port->fabric))
264 bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_ONLINE);
265}
266
267/**
268 * Called when Rport becomes offline
269 */
270void bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport)
271{
272 bfa_trc(rport->fcs, rport->pid);
273
274 if (__fcs_min_cfg(rport->port->fcs))
275 return;
276
277 rport->rpf.rpsc_speed = 0;
278 bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_OFFLINE);
279}
280
281static void
282bfa_fcs_rpf_timeout(void *arg)
283{
284 struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *) arg;
285 struct bfa_fcs_rport_s *rport = rpf->rport;
286
287 bfa_trc(rport->fcs, rport->pid);
288 bfa_sm_send_event(rpf, RPFSM_EVENT_TIMEOUT);
289}
290
291static void
292bfa_fcs_rpf_send_rpsc2(void *rpf_cbarg, struct bfa_fcxp_s *fcxp_alloced)
293{
294 struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *)rpf_cbarg;
295 struct bfa_fcs_rport_s *rport = rpf->rport;
296 struct bfa_fcs_port_s *port = rport->port;
297 struct fchs_s fchs;
298 int len;
299 struct bfa_fcxp_s *fcxp;
300
301 bfa_trc(rport->fcs, rport->pwwn);
302
303 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
304 if (!fcxp) {
305 bfa_fcxp_alloc_wait(port->fcs->bfa, &rpf->fcxp_wqe,
306 bfa_fcs_rpf_send_rpsc2, rpf);
307 return;
308 }
309 rpf->fcxp = fcxp;
310
311 len = fc_rpsc2_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
312 bfa_fcs_port_get_fcid(port), &rport->pid, 1);
313
314 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
315 FC_CLASS_3, len, &fchs, bfa_fcs_rpf_rpsc2_response,
316 rpf, FC_MAX_PDUSZ, FC_ELS_TOV);
317 rport->stats.rpsc_sent++;
318 bfa_sm_send_event(rpf, RPFSM_EVENT_FCXP_SENT);
319
320}
321
322static void
323bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
324 bfa_status_t req_status, u32 rsp_len,
325 u32 resid_len, struct fchs_s *rsp_fchs)
326{
327 struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *) cbarg;
328 struct bfa_fcs_rport_s *rport = rpf->rport;
329 struct fc_ls_rjt_s *ls_rjt;
330 struct fc_rpsc2_acc_s *rpsc2_acc;
331 u16 num_ents;
332
333 bfa_trc(rport->fcs, req_status);
334
335 if (req_status != BFA_STATUS_OK) {
336 bfa_trc(rport->fcs, req_status);
337 if (req_status == BFA_STATUS_ETIMER)
338 rport->stats.rpsc_failed++;
339 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR);
340 return;
341 }
342
343 rpsc2_acc = (struct fc_rpsc2_acc_s *) BFA_FCXP_RSP_PLD(fcxp);
344 if (rpsc2_acc->els_cmd == FC_ELS_ACC) {
345 rport->stats.rpsc_accs++;
346 num_ents = bfa_os_ntohs(rpsc2_acc->num_pids);
347 bfa_trc(rport->fcs, num_ents);
348 if (num_ents > 0) {
349 bfa_assert(rpsc2_acc->port_info[0].pid != rport->pid);
350 bfa_trc(rport->fcs,
351 bfa_os_ntohs(rpsc2_acc->port_info[0].pid));
352 bfa_trc(rport->fcs,
353 bfa_os_ntohs(rpsc2_acc->port_info[0].speed));
354 bfa_trc(rport->fcs,
355 bfa_os_ntohs(rpsc2_acc->port_info[0].index));
356 bfa_trc(rport->fcs,
357 rpsc2_acc->port_info[0].type);
358
359 if (rpsc2_acc->port_info[0].speed == 0) {
360 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR);
361 return;
362 }
363
364 rpf->rpsc_speed = fc_rpsc_operspeed_to_bfa_speed(
365 bfa_os_ntohs(rpsc2_acc->port_info[0].speed));
366
367 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_COMP);
368 }
369 } else {
370 ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
371 bfa_trc(rport->fcs, ls_rjt->reason_code);
372 bfa_trc(rport->fcs, ls_rjt->reason_code_expl);
373 rport->stats.rpsc_rejects++;
374 if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP)
375 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_FAIL);
376 else
377 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR);
378 }
379}
diff --git a/drivers/scsi/bfa/scn.c b/drivers/scsi/bfa/scn.c
deleted file mode 100644
index 8a60129e6307..000000000000
--- a/drivers/scsi/bfa/scn.c
+++ /dev/null
@@ -1,482 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfa_svc.h>
20#include "fcs_lport.h"
21#include "fcs_rport.h"
22#include "fcs_ms.h"
23#include "fcs_trcmod.h"
24#include "fcs_fcxp.h"
25#include "fcs.h"
26#include "lport_priv.h"
27
28BFA_TRC_FILE(FCS, SCN);
29
30#define FC_QOS_RSCN_EVENT 0x0c
31#define FC_FABRIC_NAME_RSCN_EVENT 0x0d
32
33/*
34 * forward declarations
35 */
36static void bfa_fcs_port_scn_send_scr(void *scn_cbarg,
37 struct bfa_fcxp_s *fcxp_alloced);
38static void bfa_fcs_port_scn_scr_response(void *fcsarg,
39 struct bfa_fcxp_s *fcxp,
40 void *cbarg,
41 bfa_status_t req_status,
42 u32 rsp_len,
43 u32 resid_len,
44 struct fchs_s *rsp_fchs);
45static void bfa_fcs_port_scn_send_ls_acc(struct bfa_fcs_port_s *port,
46 struct fchs_s *rx_fchs);
47static void bfa_fcs_port_scn_timeout(void *arg);
48
49/**
50 * fcs_scm_sm FCS SCN state machine
51 */
52
53/**
54 * VPort SCN State Machine events
55 */
56enum port_scn_event {
57 SCNSM_EVENT_PORT_ONLINE = 1,
58 SCNSM_EVENT_PORT_OFFLINE = 2,
59 SCNSM_EVENT_RSP_OK = 3,
60 SCNSM_EVENT_RSP_ERROR = 4,
61 SCNSM_EVENT_TIMEOUT = 5,
62 SCNSM_EVENT_SCR_SENT = 6,
63};
64
65static void bfa_fcs_port_scn_sm_offline(struct bfa_fcs_port_scn_s *scn,
66 enum port_scn_event event);
67static void bfa_fcs_port_scn_sm_sending_scr(struct bfa_fcs_port_scn_s *scn,
68 enum port_scn_event event);
69static void bfa_fcs_port_scn_sm_scr(struct bfa_fcs_port_scn_s *scn,
70 enum port_scn_event event);
71static void bfa_fcs_port_scn_sm_scr_retry(struct bfa_fcs_port_scn_s *scn,
72 enum port_scn_event event);
73static void bfa_fcs_port_scn_sm_online(struct bfa_fcs_port_scn_s *scn,
74 enum port_scn_event event);
75
76/**
77 * Starting state - awaiting link up.
78 */
79static void
80bfa_fcs_port_scn_sm_offline(struct bfa_fcs_port_scn_s *scn,
81 enum port_scn_event event)
82{
83 switch (event) {
84 case SCNSM_EVENT_PORT_ONLINE:
85 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_sending_scr);
86 bfa_fcs_port_scn_send_scr(scn, NULL);
87 break;
88
89 case SCNSM_EVENT_PORT_OFFLINE:
90 break;
91
92 default:
93 bfa_sm_fault(scn->port->fcs, event);
94 }
95}
96
97static void
98bfa_fcs_port_scn_sm_sending_scr(struct bfa_fcs_port_scn_s *scn,
99 enum port_scn_event event)
100{
101 switch (event) {
102 case SCNSM_EVENT_SCR_SENT:
103 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_scr);
104 break;
105
106 case SCNSM_EVENT_PORT_OFFLINE:
107 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_offline);
108 bfa_fcxp_walloc_cancel(scn->port->fcs->bfa, &scn->fcxp_wqe);
109 break;
110
111 default:
112 bfa_sm_fault(scn->port->fcs, event);
113 }
114}
115
116static void
117bfa_fcs_port_scn_sm_scr(struct bfa_fcs_port_scn_s *scn,
118 enum port_scn_event event)
119{
120 struct bfa_fcs_port_s *port = scn->port;
121
122 switch (event) {
123 case SCNSM_EVENT_RSP_OK:
124 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_online);
125 break;
126
127 case SCNSM_EVENT_RSP_ERROR:
128 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_scr_retry);
129 bfa_timer_start(port->fcs->bfa, &scn->timer,
130 bfa_fcs_port_scn_timeout, scn,
131 BFA_FCS_RETRY_TIMEOUT);
132 break;
133
134 case SCNSM_EVENT_PORT_OFFLINE:
135 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_offline);
136 bfa_fcxp_discard(scn->fcxp);
137 break;
138
139 default:
140 bfa_sm_fault(scn->port->fcs, event);
141 }
142}
143
144static void
145bfa_fcs_port_scn_sm_scr_retry(struct bfa_fcs_port_scn_s *scn,
146 enum port_scn_event event)
147{
148 switch (event) {
149 case SCNSM_EVENT_TIMEOUT:
150 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_sending_scr);
151 bfa_fcs_port_scn_send_scr(scn, NULL);
152 break;
153
154 case SCNSM_EVENT_PORT_OFFLINE:
155 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_offline);
156 bfa_timer_stop(&scn->timer);
157 break;
158
159 default:
160 bfa_sm_fault(scn->port->fcs, event);
161 }
162}
163
164static void
165bfa_fcs_port_scn_sm_online(struct bfa_fcs_port_scn_s *scn,
166 enum port_scn_event event)
167{
168 switch (event) {
169 case SCNSM_EVENT_PORT_OFFLINE:
170 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_offline);
171 break;
172
173 default:
174 bfa_sm_fault(scn->port->fcs, event);
175 }
176}
177
178
179
180/**
181 * fcs_scn_private FCS SCN private functions
182 */
183
184/**
185 * This routine will be called to send a SCR command.
186 */
187static void
188bfa_fcs_port_scn_send_scr(void *scn_cbarg, struct bfa_fcxp_s *fcxp_alloced)
189{
190 struct bfa_fcs_port_scn_s *scn = scn_cbarg;
191 struct bfa_fcs_port_s *port = scn->port;
192 struct fchs_s fchs;
193 int len;
194 struct bfa_fcxp_s *fcxp;
195
196 bfa_trc(port->fcs, port->pid);
197 bfa_trc(port->fcs, port->port_cfg.pwwn);
198
199 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
200 if (!fcxp) {
201 bfa_fcxp_alloc_wait(port->fcs->bfa, &scn->fcxp_wqe,
202 bfa_fcs_port_scn_send_scr, scn);
203 return;
204 }
205 scn->fcxp = fcxp;
206
207 /*
208 * Handle VU registrations for Base port only
209 */
210 if ((!port->vport) && bfa_ioc_get_fcmode(&port->fcs->bfa->ioc)) {
211 len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
212 bfa_lps_is_brcd_fabric(port->fabric->lps),
213 port->pid, 0);
214 } else {
215 len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), BFA_FALSE,
216 port->pid, 0);
217 }
218
219 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
220 FC_CLASS_3, len, &fchs, bfa_fcs_port_scn_scr_response,
221 (void *)scn, FC_MAX_PDUSZ, FC_ELS_TOV);
222
223 bfa_sm_send_event(scn, SCNSM_EVENT_SCR_SENT);
224}
225
226static void
227bfa_fcs_port_scn_scr_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
228 void *cbarg, bfa_status_t req_status,
229 u32 rsp_len, u32 resid_len,
230 struct fchs_s *rsp_fchs)
231{
232 struct bfa_fcs_port_scn_s *scn = (struct bfa_fcs_port_scn_s *)cbarg;
233 struct bfa_fcs_port_s *port = scn->port;
234 struct fc_els_cmd_s *els_cmd;
235 struct fc_ls_rjt_s *ls_rjt;
236
237 bfa_trc(port->fcs, port->port_cfg.pwwn);
238
239 /*
240 * Sanity Checks
241 */
242 if (req_status != BFA_STATUS_OK) {
243 bfa_trc(port->fcs, req_status);
244 bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR);
245 return;
246 }
247
248 els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
249
250 switch (els_cmd->els_code) {
251
252 case FC_ELS_ACC:
253 bfa_sm_send_event(scn, SCNSM_EVENT_RSP_OK);
254 break;
255
256 case FC_ELS_LS_RJT:
257
258 ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
259
260 bfa_trc(port->fcs, ls_rjt->reason_code);
261 bfa_trc(port->fcs, ls_rjt->reason_code_expl);
262
263 bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR);
264 break;
265
266 default:
267 bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR);
268 }
269}
270
271/*
272 * Send a LS Accept
273 */
274static void
275bfa_fcs_port_scn_send_ls_acc(struct bfa_fcs_port_s *port,
276 struct fchs_s *rx_fchs)
277{
278 struct fchs_s fchs;
279 struct bfa_fcxp_s *fcxp;
280 struct bfa_rport_s *bfa_rport = NULL;
281 int len;
282
283 bfa_trc(port->fcs, rx_fchs->s_id);
284
285 fcxp = bfa_fcs_fcxp_alloc(port->fcs);
286 if (!fcxp)
287 return;
288
289 len = fc_ls_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id,
290 bfa_fcs_port_get_fcid(port), rx_fchs->ox_id);
291
292 bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
293 BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
294 FC_MAX_PDUSZ, 0);
295}
296
297/**
298 * This routine will be called by bfa_timer on timer timeouts.
299 *
300 * param[in] vport - pointer to bfa_fcs_port_t.
301 * param[out] vport_status - pointer to return vport status in
302 *
303 * return
304 * void
305 *
306* Special Considerations:
307 *
308 * note
309 */
310static void
311bfa_fcs_port_scn_timeout(void *arg)
312{
313 struct bfa_fcs_port_scn_s *scn = (struct bfa_fcs_port_scn_s *)arg;
314
315 bfa_sm_send_event(scn, SCNSM_EVENT_TIMEOUT);
316}
317
318
319
320/**
321 * fcs_scn_public FCS state change notification public interfaces
322 */
323
324/*
325 * Functions called by port/fab
326 */
327void
328bfa_fcs_port_scn_init(struct bfa_fcs_port_s *port)
329{
330 struct bfa_fcs_port_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port);
331
332 scn->port = port;
333 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_offline);
334}
335
336void
337bfa_fcs_port_scn_offline(struct bfa_fcs_port_s *port)
338{
339 struct bfa_fcs_port_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port);
340
341 scn->port = port;
342 bfa_sm_send_event(scn, SCNSM_EVENT_PORT_OFFLINE);
343}
344
345void
346bfa_fcs_port_scn_online(struct bfa_fcs_port_s *port)
347{
348 struct bfa_fcs_port_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port);
349
350 scn->port = port;
351 bfa_sm_send_event(scn, SCNSM_EVENT_PORT_ONLINE);
352}
353
354static void
355bfa_fcs_port_scn_portid_rscn(struct bfa_fcs_port_s *port, u32 rpid)
356{
357 struct bfa_fcs_rport_s *rport;
358
359 bfa_trc(port->fcs, rpid);
360
361 /**
362 * If this is an unknown device, then it just came online.
363 * Otherwise let rport handle the RSCN event.
364 */
365 rport = bfa_fcs_port_get_rport_by_pid(port, rpid);
366 if (rport == NULL) {
367 /*
368 * If min cfg mode is enabled, we donot need to
369 * discover any new rports.
370 */
371 if (!__fcs_min_cfg(port->fcs))
372 rport = bfa_fcs_rport_create(port, rpid);
373 } else {
374 bfa_fcs_rport_scn(rport);
375 }
376}
377
378/**
379 * rscn format based PID comparison
380 */
381#define __fc_pid_match(__c0, __c1, __fmt) \
382 (((__fmt) == FC_RSCN_FORMAT_FABRIC) || \
383 (((__fmt) == FC_RSCN_FORMAT_DOMAIN) && \
384 ((__c0)[0] == (__c1)[0])) || \
385 (((__fmt) == FC_RSCN_FORMAT_AREA) && \
386 ((__c0)[0] == (__c1)[0]) && \
387 ((__c0)[1] == (__c1)[1])))
388
389static void
390bfa_fcs_port_scn_multiport_rscn(struct bfa_fcs_port_s *port,
391 enum fc_rscn_format format, u32 rscn_pid)
392{
393 struct bfa_fcs_rport_s *rport;
394 struct list_head *qe, *qe_next;
395 u8 *c0, *c1;
396
397 bfa_trc(port->fcs, format);
398 bfa_trc(port->fcs, rscn_pid);
399
400 c0 = (u8 *) &rscn_pid;
401
402 list_for_each_safe(qe, qe_next, &port->rport_q) {
403 rport = (struct bfa_fcs_rport_s *)qe;
404 c1 = (u8 *) &rport->pid;
405 if (__fc_pid_match(c0, c1, format))
406 bfa_fcs_rport_scn(rport);
407 }
408}
409
410void
411bfa_fcs_port_scn_process_rscn(struct bfa_fcs_port_s *port, struct fchs_s *fchs,
412 u32 len)
413{
414 struct fc_rscn_pl_s *rscn = (struct fc_rscn_pl_s *) (fchs + 1);
415 int num_entries;
416 u32 rscn_pid;
417 bfa_boolean_t nsquery = BFA_FALSE;
418 int i = 0;
419
420 num_entries =
421 (bfa_os_ntohs(rscn->payldlen) -
422 sizeof(u32)) / sizeof(rscn->event[0]);
423
424 bfa_trc(port->fcs, num_entries);
425
426 port->stats.num_rscn++;
427
428 bfa_fcs_port_scn_send_ls_acc(port, fchs);
429
430 for (i = 0; i < num_entries; i++) {
431 rscn_pid = rscn->event[i].portid;
432
433 bfa_trc(port->fcs, rscn->event[i].format);
434 bfa_trc(port->fcs, rscn_pid);
435
436 switch (rscn->event[i].format) {
437 case FC_RSCN_FORMAT_PORTID:
438 if (rscn->event[i].qualifier == FC_QOS_RSCN_EVENT) {
439 /*
440 * Ignore this event. f/w would have processed
441 * it
442 */
443 bfa_trc(port->fcs, rscn_pid);
444 } else {
445 port->stats.num_portid_rscn++;
446 bfa_fcs_port_scn_portid_rscn(port, rscn_pid);
447 }
448 break;
449
450 case FC_RSCN_FORMAT_FABRIC:
451 if (rscn->event[i].qualifier ==
452 FC_FABRIC_NAME_RSCN_EVENT) {
453 bfa_fcs_port_ms_fabric_rscn(port);
454 break;
455 }
456 /*
457 * !!!!!!!!! Fall Through !!!!!!!!!!!!!
458 */
459
460 case FC_RSCN_FORMAT_AREA:
461 case FC_RSCN_FORMAT_DOMAIN:
462 nsquery = BFA_TRUE;
463 bfa_fcs_port_scn_multiport_rscn(port,
464 rscn->event[i].format,
465 rscn_pid);
466 break;
467
468 default:
469 bfa_assert(0);
470 nsquery = BFA_TRUE;
471 }
472 }
473
474 /**
475 * If any of area, domain or fabric RSCN is received, do a fresh discovery
476 * to find new devices.
477 */
478 if (nsquery)
479 bfa_fcs_port_ns_query(port);
480}
481
482
diff --git a/drivers/scsi/bfa/vfapi.c b/drivers/scsi/bfa/vfapi.c
deleted file mode 100644
index 391a4790bebd..000000000000
--- a/drivers/scsi/bfa/vfapi.c
+++ /dev/null
@@ -1,292 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * vfapi.c Fabric module implementation.
20 */
21
22#include "fcs_fabric.h"
23#include "fcs_trcmod.h"
24
25BFA_TRC_FILE(FCS, VFAPI);
26
27/**
28 * fcs_vf_api virtual fabrics API
29 */
30
31/**
32 * Enable VF mode.
33 *
34 * @param[in] fcs fcs module instance
35 * @param[in] vf_id default vf_id of port, FC_VF_ID_NULL
36 * to use standard default vf_id of 1.
37 *
38 * @retval BFA_STATUS_OK vf mode is enabled
39 * @retval BFA_STATUS_BUSY Port is active. Port must be disabled
40 * before VF mode can be enabled.
41 */
42bfa_status_t
43bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id)
44{
45 return BFA_STATUS_OK;
46}
47
48/**
49 * Disable VF mode.
50 *
51 * @param[in] fcs fcs module instance
52 *
53 * @retval BFA_STATUS_OK vf mode is disabled
54 * @retval BFA_STATUS_BUSY VFs are present and being used. All
55 * VFs must be deleted before disabling
56 * VF mode.
57 */
58bfa_status_t
59bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs)
60{
61 return BFA_STATUS_OK;
62}
63
64/**
65 * Create a new VF instance.
66 *
67 * A new VF is created using the given VF configuration. A VF is identified
68 * by VF id. No duplicate VF creation is allowed with the same VF id. Once
69 * a VF is created, VF is automatically started after link initialization
70 * and EVFP exchange is completed.
71 *
72 * param[in] vf - FCS vf data structure. Memory is
73 * allocated by caller (driver)
74 * param[in] fcs - FCS module
75 * param[in] vf_cfg - VF configuration
76 * param[in] vf_drv - Opaque handle back to the driver's
77 * virtual vf structure
78 *
79 * retval BFA_STATUS_OK VF creation is successful
80 * retval BFA_STATUS_FAILED VF creation failed
81 * retval BFA_STATUS_EEXIST A VF exists with the given vf_id
82 */
83bfa_status_t
84bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs, u16 vf_id,
85 struct bfa_port_cfg_s *port_cfg, struct bfad_vf_s *vf_drv)
86{
87 bfa_trc(fcs, vf_id);
88 return BFA_STATUS_OK;
89}
90
91/**
92 * Use this function to delete a BFA VF object. VF object should
93 * be stopped before this function call.
94 *
95 * param[in] vf - pointer to bfa_vf_t.
96 *
97 * retval BFA_STATUS_OK On vf deletion success
98 * retval BFA_STATUS_BUSY VF is not in a stopped state
99 * retval BFA_STATUS_INPROGRESS VF deletion in in progress
100 */
101bfa_status_t
102bfa_fcs_vf_delete(bfa_fcs_vf_t *vf)
103{
104 bfa_trc(vf->fcs, vf->vf_id);
105 return BFA_STATUS_OK;
106}
107
108/**
109 * Start participation in VF. This triggers login to the virtual fabric.
110 *
111 * param[in] vf - pointer to bfa_vf_t.
112 *
113 * return None
114 */
115void
116bfa_fcs_vf_start(bfa_fcs_vf_t *vf)
117{
118 bfa_trc(vf->fcs, vf->vf_id);
119}
120
121/**
122 * Logout with the virtual fabric.
123 *
124 * param[in] vf - pointer to bfa_vf_t.
125 *
126 * retval BFA_STATUS_OK On success.
127 * retval BFA_STATUS_INPROGRESS VF is being stopped.
128 */
129bfa_status_t
130bfa_fcs_vf_stop(bfa_fcs_vf_t *vf)
131{
132 bfa_trc(vf->fcs, vf->vf_id);
133 return BFA_STATUS_OK;
134}
135
136/**
137 * Returns attributes of the given VF.
138 *
139 * param[in] vf pointer to bfa_vf_t.
140 * param[out] vf_attr vf attributes returned
141 *
142 * return None
143 */
144void
145bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr)
146{
147 bfa_trc(vf->fcs, vf->vf_id);
148}
149
150/**
151 * Return statistics associated with the given vf.
152 *
153 * param[in] vf pointer to bfa_vf_t.
154 * param[out] vf_stats vf statistics returned
155 *
156 * @return None
157 */
158void
159bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf, struct bfa_vf_stats_s *vf_stats)
160{
161 bfa_os_memcpy(vf_stats, &vf->stats, sizeof(struct bfa_vf_stats_s));
162 return;
163}
164
165void
166/**
167 * clear statistics associated with the given vf.
168 *
169 * param[in] vf pointer to bfa_vf_t.
170 *
171 * @return None
172 */
173bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf)
174{
175 bfa_os_memset(&vf->stats, 0, sizeof(struct bfa_vf_stats_s));
176 return;
177}
178
179/**
180 * Returns FCS vf structure for a given vf_id.
181 *
182 * param[in] vf_id - VF_ID
183 *
184 * return
185 * If lookup succeeds, retuns fcs vf object, otherwise returns NULL
186 */
187bfa_fcs_vf_t *
188bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id)
189{
190 bfa_trc(fcs, vf_id);
191 if (vf_id == FC_VF_ID_NULL)
192 return &fcs->fabric;
193
194 /**
195 * @todo vf support
196 */
197
198 return NULL;
199}
200
201/**
202 * Returns driver VF structure for a given FCS vf.
203 *
204 * param[in] vf - pointer to bfa_vf_t
205 *
206 * return Driver VF structure
207 */
208struct bfad_vf_s *
209bfa_fcs_vf_get_drv_vf(bfa_fcs_vf_t *vf)
210{
211 bfa_assert(vf);
212 bfa_trc(vf->fcs, vf->vf_id);
213 return vf->vf_drv;
214}
215
216/**
217 * Return the list of VFs configured.
218 *
219 * param[in] fcs fcs module instance
220 * param[out] vf_ids returned list of vf_ids
221 * param[in,out] nvfs in:size of vf_ids array,
222 * out:total elements present,
223 * actual elements returned is limited by the size
224 *
225 * return Driver VF structure
226 */
227void
228bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs)
229{
230 bfa_trc(fcs, *nvfs);
231}
232
233/**
234 * Return the list of all VFs visible from fabric.
235 *
236 * param[in] fcs fcs module instance
237 * param[out] vf_ids returned list of vf_ids
238 * param[in,out] nvfs in:size of vf_ids array,
239 * out:total elements present,
240 * actual elements returned is limited by the size
241 *
242 * return Driver VF structure
243 */
244void
245bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs)
246{
247 bfa_trc(fcs, *nvfs);
248}
249
250/**
251 * Return the list of local logical ports present in the given VF.
252 *
253 * param[in] vf vf for which logical ports are returned
254 * param[out] lpwwn returned logical port wwn list
255 * param[in,out] nlports in:size of lpwwn list;
256 * out:total elements present,
257 * actual elements returned is limited by the size
258 *
259 */
260void
261bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t lpwwn[], int *nlports)
262{
263 struct list_head *qe;
264 struct bfa_fcs_vport_s *vport;
265 int i;
266 struct bfa_fcs_s *fcs;
267
268 if (vf == NULL || lpwwn == NULL || *nlports == 0)
269 return;
270
271 fcs = vf->fcs;
272
273 bfa_trc(fcs, vf->vf_id);
274 bfa_trc(fcs, (u32) *nlports);
275
276 i = 0;
277 lpwwn[i++] = vf->bport.port_cfg.pwwn;
278
279 list_for_each(qe, &vf->vport_q) {
280 if (i >= *nlports)
281 break;
282
283 vport = (struct bfa_fcs_vport_s *) qe;
284 lpwwn[i++] = vport->lport.port_cfg.pwwn;
285 }
286
287 bfa_trc(fcs, i);
288 *nlports = i;
289 return;
290}
291
292
diff --git a/drivers/scsi/bfa/vport.c b/drivers/scsi/bfa/vport.c
deleted file mode 100644
index b378ec79d386..000000000000
--- a/drivers/scsi/bfa/vport.c
+++ /dev/null
@@ -1,903 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcs_vport.c FCS virtual port state machine
20 */
21
22#include <bfa.h>
23#include <bfa_svc.h>
24#include <fcbuild.h>
25#include "fcs_fabric.h"
26#include "fcs_lport.h"
27#include "fcs_vport.h"
28#include "fcs_trcmod.h"
29#include "fcs.h"
30#include <aen/bfa_aen_lport.h>
31
32BFA_TRC_FILE(FCS, VPORT);
33
34#define __vport_fcs(__vp) ((__vp)->lport.fcs)
35#define __vport_pwwn(__vp) ((__vp)->lport.port_cfg.pwwn)
36#define __vport_nwwn(__vp) ((__vp)->lport.port_cfg.nwwn)
37#define __vport_bfa(__vp) ((__vp)->lport.fcs->bfa)
38#define __vport_fcid(__vp) ((__vp)->lport.pid)
39#define __vport_fabric(__vp) ((__vp)->lport.fabric)
40#define __vport_vfid(__vp) ((__vp)->lport.fabric->vf_id)
41
42#define BFA_FCS_VPORT_MAX_RETRIES 5
43/*
44 * Forward declarations
45 */
46static void bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport);
47static void bfa_fcs_vport_timeout(void *vport_arg);
48static void bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport);
49static void bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport);
50
51/**
52 * fcs_vport_sm FCS virtual port state machine
53 */
54
55/**
56 * VPort State Machine events
57 */
58enum bfa_fcs_vport_event {
59 BFA_FCS_VPORT_SM_CREATE = 1, /* vport create event */
60 BFA_FCS_VPORT_SM_DELETE = 2, /* vport delete event */
61 BFA_FCS_VPORT_SM_START = 3, /* vport start request */
62 BFA_FCS_VPORT_SM_STOP = 4, /* stop: unsupported */
63 BFA_FCS_VPORT_SM_ONLINE = 5, /* fabric online */
64 BFA_FCS_VPORT_SM_OFFLINE = 6, /* fabric offline event */
65 BFA_FCS_VPORT_SM_FRMSENT = 7, /* fdisc/logo sent events */
66 BFA_FCS_VPORT_SM_RSP_OK = 8, /* good response */
67 BFA_FCS_VPORT_SM_RSP_ERROR = 9, /* error/bad response */
68 BFA_FCS_VPORT_SM_TIMEOUT = 10, /* delay timer event */
69 BFA_FCS_VPORT_SM_DELCOMP = 11, /* lport delete completion */
70 BFA_FCS_VPORT_SM_RSP_DUP_WWN = 12, /* Dup wnn error */
71 BFA_FCS_VPORT_SM_RSP_FAILED = 13, /* non-retryable failure */
72};
73
74static void bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
75 enum bfa_fcs_vport_event event);
76static void bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
77 enum bfa_fcs_vport_event event);
78static void bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
79 enum bfa_fcs_vport_event event);
80static void bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
81 enum bfa_fcs_vport_event event);
82static void bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
83 enum bfa_fcs_vport_event event);
84static void bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
85 enum bfa_fcs_vport_event event);
86static void bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport,
87 enum bfa_fcs_vport_event event);
88static void bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
89 enum bfa_fcs_vport_event event);
90static void bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
91 enum bfa_fcs_vport_event event);
92static void bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport,
93 enum bfa_fcs_vport_event event);
94
95static struct bfa_sm_table_s vport_sm_table[] = {
96 {BFA_SM(bfa_fcs_vport_sm_uninit), BFA_FCS_VPORT_UNINIT},
97 {BFA_SM(bfa_fcs_vport_sm_created), BFA_FCS_VPORT_CREATED},
98 {BFA_SM(bfa_fcs_vport_sm_offline), BFA_FCS_VPORT_OFFLINE},
99 {BFA_SM(bfa_fcs_vport_sm_fdisc), BFA_FCS_VPORT_FDISC},
100 {BFA_SM(bfa_fcs_vport_sm_fdisc_retry), BFA_FCS_VPORT_FDISC_RETRY},
101 {BFA_SM(bfa_fcs_vport_sm_online), BFA_FCS_VPORT_ONLINE},
102 {BFA_SM(bfa_fcs_vport_sm_deleting), BFA_FCS_VPORT_DELETING},
103 {BFA_SM(bfa_fcs_vport_sm_cleanup), BFA_FCS_VPORT_CLEANUP},
104 {BFA_SM(bfa_fcs_vport_sm_logo), BFA_FCS_VPORT_LOGO},
105 {BFA_SM(bfa_fcs_vport_sm_error), BFA_FCS_VPORT_ERROR}
106};
107
108/**
109 * Beginning state.
110 */
111static void
112bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
113 enum bfa_fcs_vport_event event)
114{
115 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
116 bfa_trc(__vport_fcs(vport), event);
117
118 switch (event) {
119 case BFA_FCS_VPORT_SM_CREATE:
120 bfa_sm_set_state(vport, bfa_fcs_vport_sm_created);
121 bfa_fcs_fabric_addvport(__vport_fabric(vport), vport);
122 break;
123
124 default:
125 bfa_sm_fault(__vport_fcs(vport), event);
126 }
127}
128
129/**
130 * Created state - a start event is required to start up the state machine.
131 */
132static void
133bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
134 enum bfa_fcs_vport_event event)
135{
136 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
137 bfa_trc(__vport_fcs(vport), event);
138
139 switch (event) {
140 case BFA_FCS_VPORT_SM_START:
141 if (bfa_fcs_fabric_is_online(__vport_fabric(vport))
142 && bfa_fcs_fabric_npiv_capable(__vport_fabric(vport))) {
143 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
144 bfa_fcs_vport_do_fdisc(vport);
145 } else {
146 /**
147 * Fabric is offline or not NPIV capable, stay in
148 * offline state.
149 */
150 vport->vport_stats.fab_no_npiv++;
151 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
152 }
153 break;
154
155 case BFA_FCS_VPORT_SM_DELETE:
156 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
157 bfa_fcs_port_delete(&vport->lport);
158 break;
159
160 case BFA_FCS_VPORT_SM_ONLINE:
161 case BFA_FCS_VPORT_SM_OFFLINE:
162 /**
163 * Ignore ONLINE/OFFLINE events from fabric till vport is started.
164 */
165 break;
166
167 default:
168 bfa_sm_fault(__vport_fcs(vport), event);
169 }
170}
171
172/**
173 * Offline state - awaiting ONLINE event from fabric SM.
174 */
175static void
176bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
177 enum bfa_fcs_vport_event event)
178{
179 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
180 bfa_trc(__vport_fcs(vport), event);
181
182 switch (event) {
183 case BFA_FCS_VPORT_SM_DELETE:
184 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
185 bfa_fcs_port_delete(&vport->lport);
186 break;
187
188 case BFA_FCS_VPORT_SM_ONLINE:
189 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
190 vport->fdisc_retries = 0;
191 bfa_fcs_vport_do_fdisc(vport);
192 break;
193
194 case BFA_FCS_VPORT_SM_OFFLINE:
195 /*
196 * This can happen if the vport couldn't be initialzied due
197 * the fact that the npiv was not enabled on the switch. In
198 * that case we will put the vport in offline state. However,
199 * the link can go down and cause the this event to be sent when
200 * we are already offline. Ignore it.
201 */
202 break;
203
204 default:
205 bfa_sm_fault(__vport_fcs(vport), event);
206 }
207}
208
209/**
210 * FDISC is sent and awaiting reply from fabric.
211 */
212static void
213bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
214 enum bfa_fcs_vport_event event)
215{
216 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
217 bfa_trc(__vport_fcs(vport), event);
218
219 switch (event) {
220 case BFA_FCS_VPORT_SM_DELETE:
221 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
222 bfa_lps_discard(vport->lps);
223 bfa_fcs_port_delete(&vport->lport);
224 break;
225
226 case BFA_FCS_VPORT_SM_OFFLINE:
227 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
228 bfa_lps_discard(vport->lps);
229 break;
230
231 case BFA_FCS_VPORT_SM_RSP_OK:
232 bfa_sm_set_state(vport, bfa_fcs_vport_sm_online);
233 bfa_fcs_port_online(&vport->lport);
234 break;
235
236 case BFA_FCS_VPORT_SM_RSP_ERROR:
237 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc_retry);
238 bfa_timer_start(__vport_bfa(vport), &vport->timer,
239 bfa_fcs_vport_timeout, vport,
240 BFA_FCS_RETRY_TIMEOUT);
241 break;
242
243 case BFA_FCS_VPORT_SM_RSP_FAILED:
244 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
245 break;
246
247 case BFA_FCS_VPORT_SM_RSP_DUP_WWN:
248 bfa_sm_set_state(vport, bfa_fcs_vport_sm_error);
249 break;
250
251 default:
252 bfa_sm_fault(__vport_fcs(vport), event);
253 }
254}
255
256/**
257 * FDISC attempt failed - a timer is active to retry FDISC.
258 */
259static void
260bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
261 enum bfa_fcs_vport_event event)
262{
263 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
264 bfa_trc(__vport_fcs(vport), event);
265
266 switch (event) {
267 case BFA_FCS_VPORT_SM_DELETE:
268 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
269 bfa_timer_stop(&vport->timer);
270 bfa_fcs_port_delete(&vport->lport);
271 break;
272
273 case BFA_FCS_VPORT_SM_OFFLINE:
274 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
275 bfa_timer_stop(&vport->timer);
276 break;
277
278 case BFA_FCS_VPORT_SM_TIMEOUT:
279 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
280 vport->vport_stats.fdisc_retries++;
281 vport->fdisc_retries++;
282 bfa_fcs_vport_do_fdisc(vport);
283 break;
284
285 default:
286 bfa_sm_fault(__vport_fcs(vport), event);
287 }
288}
289
290/**
291 * Vport is online (FDISC is complete).
292 */
293static void
294bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
295 enum bfa_fcs_vport_event event)
296{
297 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
298 bfa_trc(__vport_fcs(vport), event);
299
300 switch (event) {
301 case BFA_FCS_VPORT_SM_DELETE:
302 bfa_sm_set_state(vport, bfa_fcs_vport_sm_deleting);
303 bfa_fcs_port_delete(&vport->lport);
304 break;
305
306 case BFA_FCS_VPORT_SM_OFFLINE:
307 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
308 bfa_lps_discard(vport->lps);
309 bfa_fcs_port_offline(&vport->lport);
310 break;
311
312 default:
313 bfa_sm_fault(__vport_fcs(vport), event);
314 }
315}
316
317/**
318 * Vport is being deleted - awaiting lport delete completion to send
319 * LOGO to fabric.
320 */
321static void
322bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport,
323 enum bfa_fcs_vport_event event)
324{
325 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
326 bfa_trc(__vport_fcs(vport), event);
327
328 switch (event) {
329 case BFA_FCS_VPORT_SM_DELETE:
330 break;
331
332 case BFA_FCS_VPORT_SM_DELCOMP:
333 bfa_sm_set_state(vport, bfa_fcs_vport_sm_logo);
334 bfa_fcs_vport_do_logo(vport);
335 break;
336
337 case BFA_FCS_VPORT_SM_OFFLINE:
338 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
339 break;
340
341 default:
342 bfa_sm_fault(__vport_fcs(vport), event);
343 }
344}
345
346/**
347 * Error State.
348 * This state will be set when the Vport Creation fails due to errors like
349 * Dup WWN. In this state only operation allowed is a Vport Delete.
350 */
351static void
352bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport,
353 enum bfa_fcs_vport_event event)
354{
355 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
356 bfa_trc(__vport_fcs(vport), event);
357
358 switch (event) {
359 case BFA_FCS_VPORT_SM_DELETE:
360 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
361 bfa_fcs_port_delete(&vport->lport);
362
363 break;
364
365 default:
366 bfa_trc(__vport_fcs(vport), event);
367 }
368}
369
370/**
371 * Lport cleanup is in progress since vport is being deleted. Fabric is
372 * offline, so no LOGO is needed to complete vport deletion.
373 */
374static void
375bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
376 enum bfa_fcs_vport_event event)
377{
378 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
379 bfa_trc(__vport_fcs(vport), event);
380
381 switch (event) {
382 case BFA_FCS_VPORT_SM_DELCOMP:
383 bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
384 bfa_fcs_vport_free(vport);
385 break;
386
387 case BFA_FCS_VPORT_SM_DELETE:
388 break;
389
390 default:
391 bfa_sm_fault(__vport_fcs(vport), event);
392 }
393}
394
395/**
396 * LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup
397 * is done.
398 */
399static void
400bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
401 enum bfa_fcs_vport_event event)
402{
403 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
404 bfa_trc(__vport_fcs(vport), event);
405
406 switch (event) {
407 case BFA_FCS_VPORT_SM_OFFLINE:
408 bfa_lps_discard(vport->lps);
409 /*
410 * !!! fall through !!!
411 */
412
413 case BFA_FCS_VPORT_SM_RSP_OK:
414 case BFA_FCS_VPORT_SM_RSP_ERROR:
415 bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
416 bfa_fcs_vport_free(vport);
417 break;
418
419 case BFA_FCS_VPORT_SM_DELETE:
420 break;
421
422 default:
423 bfa_sm_fault(__vport_fcs(vport), event);
424 }
425}
426
427
428
429/**
430 * fcs_vport_private FCS virtual port private functions
431 */
432
433/**
434 * Send AEN notification
435 */
436static void
437bfa_fcs_vport_aen_post(bfa_fcs_lport_t *port, enum bfa_lport_aen_event event)
438{
439 union bfa_aen_data_u aen_data;
440 struct bfa_log_mod_s *logmod = port->fcs->logm;
441 enum bfa_port_role role = port->port_cfg.roles;
442 wwn_t lpwwn = bfa_fcs_port_get_pwwn(port);
443 char lpwwn_ptr[BFA_STRING_32];
444 char *role_str[BFA_PORT_ROLE_FCP_MAX / 2 + 1] =
445 { "Initiator", "Target", "IPFC" };
446
447 wwn2str(lpwwn_ptr, lpwwn);
448
449 bfa_assert(role <= BFA_PORT_ROLE_FCP_MAX);
450
451 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, event), lpwwn_ptr,
452 role_str[role/2]);
453
454 aen_data.lport.vf_id = port->fabric->vf_id;
455 aen_data.lport.roles = role;
456 aen_data.lport.ppwwn =
457 bfa_fcs_port_get_pwwn(bfa_fcs_get_base_port(port->fcs));
458 aen_data.lport.lpwwn = lpwwn;
459}
460
461/**
462 * This routine will be called to send a FDISC command.
463 */
464static void
465bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport)
466{
467 bfa_lps_fdisc(vport->lps, vport,
468 bfa_fcport_get_maxfrsize(__vport_bfa(vport)),
469 __vport_pwwn(vport), __vport_nwwn(vport));
470 vport->vport_stats.fdisc_sent++;
471}
472
473static void
474bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
475{
476 u8 lsrjt_rsn = bfa_lps_get_lsrjt_rsn(vport->lps);
477 u8 lsrjt_expl = bfa_lps_get_lsrjt_expl(vport->lps);
478
479 bfa_trc(__vport_fcs(vport), lsrjt_rsn);
480 bfa_trc(__vport_fcs(vport), lsrjt_expl);
481
482 /*
483 * For certain reason codes, we don't want to retry.
484 */
485 switch (bfa_lps_get_lsrjt_expl(vport->lps)) {
486 case FC_LS_RJT_EXP_INV_PORT_NAME: /* by brocade */
487 case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */
488 if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
489 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
490 else {
491 bfa_fcs_vport_aen_post(&vport->lport,
492 BFA_LPORT_AEN_NPIV_DUP_WWN);
493 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_DUP_WWN);
494 }
495 break;
496
497 case FC_LS_RJT_EXP_INSUFF_RES:
498 /*
499 * This means max logins per port/switch setting on the
500 * switch was exceeded.
501 */
502 if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
503 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
504 else {
505 bfa_fcs_vport_aen_post(&vport->lport,
506 BFA_LPORT_AEN_NPIV_FABRIC_MAX);
507 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED);
508 }
509 break;
510
511 default:
512 if (vport->fdisc_retries == 0) /* Print only once */
513 bfa_fcs_vport_aen_post(&vport->lport,
514 BFA_LPORT_AEN_NPIV_UNKNOWN);
515 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
516 }
517}
518
519/**
520 * Called to send a logout to the fabric. Used when a V-Port is
521 * deleted/stopped.
522 */
523static void
524bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport)
525{
526 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
527
528 vport->vport_stats.logo_sent++;
529 bfa_lps_fdisclogo(vport->lps);
530}
531
532/**
533 * This routine will be called by bfa_timer on timer timeouts.
534 *
535 * param[in] vport - pointer to bfa_fcs_vport_t.
536 * param[out] vport_status - pointer to return vport status in
537 *
538 * return
539 * void
540 *
541* Special Considerations:
542 *
543 * note
544 */
545static void
546bfa_fcs_vport_timeout(void *vport_arg)
547{
548 struct bfa_fcs_vport_s *vport = (struct bfa_fcs_vport_s *)vport_arg;
549
550 vport->vport_stats.fdisc_timeouts++;
551 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_TIMEOUT);
552}
553
554static void
555bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport)
556{
557 bfa_fcs_fabric_delvport(__vport_fabric(vport), vport);
558 bfa_fcb_vport_delete(vport->vport_drv);
559 bfa_lps_delete(vport->lps);
560}
561
562
563
564/**
565 * fcs_vport_public FCS virtual port public interfaces
566 */
567
568/**
569 * Online notification from fabric SM.
570 */
571void
572bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport)
573{
574 vport->vport_stats.fab_online++;
575 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
576}
577
578/**
579 * Offline notification from fabric SM.
580 */
581void
582bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport)
583{
584 vport->vport_stats.fab_offline++;
585 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE);
586}
587
588/**
589 * Cleanup notification from fabric SM on link timer expiry.
590 */
591void
592bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport)
593{
594 vport->vport_stats.fab_cleanup++;
595}
596
597/**
598 * delete notification from fabric SM. To be invoked from within FCS.
599 */
600void
601bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport)
602{
603 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE);
604}
605
606/**
607 * Delete completion callback from associated lport
608 */
609void
610bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport)
611{
612 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELCOMP);
613}
614
615/**
616 * fcs_vport_api Virtual port API
617 */
618
619/**
620 * Use this function to instantiate a new FCS vport object. This
621 * function will not trigger any HW initialization process (which will be
622 * done in vport_start() call)
623 *
624 * param[in] vport - pointer to bfa_fcs_vport_t. This space
625 * needs to be allocated by the driver.
626 * param[in] fcs - FCS instance
627 * param[in] vport_cfg - vport configuration
628 * param[in] vf_id - VF_ID if vport is created within a VF.
629 * FC_VF_ID_NULL to specify base fabric.
630 * param[in] vport_drv - Opaque handle back to the driver's vport
631 * structure
632 *
633 * retval BFA_STATUS_OK - on success.
634 * retval BFA_STATUS_FAILED - on failure.
635 */
636bfa_status_t
637bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
638 u16 vf_id, struct bfa_port_cfg_s *vport_cfg,
639 struct bfad_vport_s *vport_drv)
640{
641 if (vport_cfg->pwwn == 0)
642 return BFA_STATUS_INVALID_WWN;
643
644 if (bfa_fcs_port_get_pwwn(&fcs->fabric.bport) == vport_cfg->pwwn)
645 return BFA_STATUS_VPORT_WWN_BP;
646
647 if (bfa_fcs_vport_lookup(fcs, vf_id, vport_cfg->pwwn) != NULL)
648 return BFA_STATUS_VPORT_EXISTS;
649
650 if (bfa_fcs_fabric_vport_count(&fcs->fabric) ==
651 bfa_lps_get_max_vport(fcs->bfa))
652 return BFA_STATUS_VPORT_MAX;
653
654 vport->lps = bfa_lps_alloc(fcs->bfa);
655 if (!vport->lps)
656 return BFA_STATUS_VPORT_MAX;
657
658 vport->vport_drv = vport_drv;
659 vport_cfg->preboot_vp = BFA_FALSE;
660 bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
661
662 bfa_fcs_lport_attach(&vport->lport, fcs, vf_id, vport);
663 bfa_fcs_lport_init(&vport->lport, vport_cfg);
664
665 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_CREATE);
666
667 return BFA_STATUS_OK;
668}
669
670/**
671 * Use this function to instantiate a new FCS PBC vport object. This
672 * function will not trigger any HW initialization process (which will be
673 * done in vport_start() call)
674 *
675 * param[in] vport - pointer to bfa_fcs_vport_t. This space
676 * needs to be allocated by the driver.
677 * param[in] fcs - FCS instance
678 * param[in] vport_cfg - vport configuration
679 * param[in] vf_id - VF_ID if vport is created within a VF.
680 * FC_VF_ID_NULL to specify base fabric.
681 * param[in] vport_drv - Opaque handle back to the driver's vport
682 * structure
683 *
684 * retval BFA_STATUS_OK - on success.
685 * retval BFA_STATUS_FAILED - on failure.
686 */
687bfa_status_t
688bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
689 uint16_t vf_id, struct bfa_port_cfg_s *vport_cfg,
690 struct bfad_vport_s *vport_drv)
691{
692 bfa_status_t rc;
693
694 rc = bfa_fcs_vport_create(vport, fcs, vf_id, vport_cfg, vport_drv);
695 vport->lport.port_cfg.preboot_vp = BFA_TRUE;
696
697 return rc;
698}
699
700/**
701 * Use this function initialize the vport.
702 *
703 * @param[in] vport - pointer to bfa_fcs_vport_t.
704 *
705 * @returns None
706 */
707bfa_status_t
708bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport)
709{
710 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_START);
711
712 return BFA_STATUS_OK;
713}
714
715/**
716 * Use this function quiese the vport object. This function will return
717 * immediately, when the vport is actually stopped, the
718 * bfa_drv_vport_stop_cb() will be called.
719 *
720 * param[in] vport - pointer to bfa_fcs_vport_t.
721 *
722 * return None
723 */
724bfa_status_t
725bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport)
726{
727 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOP);
728
729 return BFA_STATUS_OK;
730}
731
732/**
733 * Use this function to delete a vport object. Fabric object should
734 * be stopped before this function call.
735 *
736 * Donot invoke this from within FCS
737 *
738 * param[in] vport - pointer to bfa_fcs_vport_t.
739 *
740 * return None
741 */
742bfa_status_t
743bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport)
744{
745 if (vport->lport.port_cfg.preboot_vp)
746 return BFA_STATUS_PBC;
747
748 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE);
749
750 return BFA_STATUS_OK;
751}
752
753/**
754 * Use this function to get vport's current status info.
755 *
756 * param[in] vport pointer to bfa_fcs_vport_t.
757 * param[out] attr pointer to return vport attributes
758 *
759 * return None
760 */
761void
762bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
763 struct bfa_vport_attr_s *attr)
764{
765 if (vport == NULL || attr == NULL)
766 return;
767
768 bfa_os_memset(attr, 0, sizeof(struct bfa_vport_attr_s));
769
770 bfa_fcs_port_get_attr(&vport->lport, &attr->port_attr);
771 attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm);
772}
773
774/**
775 * Use this function to get vport's statistics.
776 *
777 * param[in] vport pointer to bfa_fcs_vport_t.
778 * param[out] stats pointer to return vport statistics in
779 *
780 * return None
781 */
782void
783bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
784 struct bfa_vport_stats_s *stats)
785{
786 *stats = vport->vport_stats;
787}
788
789/**
790 * Use this function to clear vport's statistics.
791 *
792 * param[in] vport pointer to bfa_fcs_vport_t.
793 *
794 * return None
795 */
796void
797bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport)
798{
799 bfa_os_memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
800}
801
802/**
803 * Lookup a virtual port. Excludes base port from lookup.
804 */
805struct bfa_fcs_vport_s *
806bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t vpwwn)
807{
808 struct bfa_fcs_vport_s *vport;
809 struct bfa_fcs_fabric_s *fabric;
810
811 bfa_trc(fcs, vf_id);
812 bfa_trc(fcs, vpwwn);
813
814 fabric = bfa_fcs_vf_lookup(fcs, vf_id);
815 if (!fabric) {
816 bfa_trc(fcs, vf_id);
817 return NULL;
818 }
819
820 vport = bfa_fcs_fabric_vport_lookup(fabric, vpwwn);
821 return vport;
822}
823
824/**
825 * FDISC Response
826 */
827void
828bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status)
829{
830 struct bfa_fcs_vport_s *vport = uarg;
831
832 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
833 bfa_trc(__vport_fcs(vport), status);
834
835 switch (status) {
836 case BFA_STATUS_OK:
837 /*
838 * Initialize the V-Port fields
839 */
840 __vport_fcid(vport) = bfa_lps_get_pid(vport->lps);
841 vport->vport_stats.fdisc_accepts++;
842 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
843 break;
844
845 case BFA_STATUS_INVALID_MAC:
846 /*
847 * Only for CNA
848 */
849 vport->vport_stats.fdisc_acc_bad++;
850 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
851
852 break;
853
854 case BFA_STATUS_EPROTOCOL:
855 switch (bfa_lps_get_extstatus(vport->lps)) {
856 case BFA_EPROTO_BAD_ACCEPT:
857 vport->vport_stats.fdisc_acc_bad++;
858 break;
859
860 case BFA_EPROTO_UNKNOWN_RSP:
861 vport->vport_stats.fdisc_unknown_rsp++;
862 break;
863
864 default:
865 break;
866 }
867
868 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
869 break;
870
871 case BFA_STATUS_FABRIC_RJT:
872 vport->vport_stats.fdisc_rejects++;
873 bfa_fcs_vport_fdisc_rejected(vport);
874 break;
875
876 default:
877 vport->vport_stats.fdisc_rsp_err++;
878 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
879 }
880}
881
882/**
883 * LOGO response
884 */
885void
886bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg)
887{
888 struct bfa_fcs_vport_s *vport = uarg;
889 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
890}
891
892/**
893 * Received clear virtual link
894 */
895void
896bfa_cb_lps_cvl_event(void *bfad, void *uarg)
897{
898 struct bfa_fcs_vport_s *vport = uarg;
899
900 /* Send an Offline followed by an ONLINE */
901 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE);
902 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
903}
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 00c033511cbf..b6345d91bb66 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -753,7 +753,7 @@ extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn,
753extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn, 753extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn,
754 struct bnx2i_cmd *cmnd); 754 struct bnx2i_cmd *cmnd);
755extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn, 755extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn,
756 struct iscsi_task *mtask, u32 ttt, 756 struct iscsi_task *mtask,
757 char *datap, int data_len, int unsol); 757 char *datap, int data_len, int unsol);
758extern int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn, 758extern int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn,
759 struct iscsi_task *mtask); 759 struct iscsi_task *mtask);
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index d23fc256d585..90cef716b796 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -385,7 +385,6 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
385 struct bnx2i_cmd *bnx2i_cmd; 385 struct bnx2i_cmd *bnx2i_cmd;
386 struct bnx2i_tmf_request *tmfabort_wqe; 386 struct bnx2i_tmf_request *tmfabort_wqe;
387 u32 dword; 387 u32 dword;
388 u32 scsi_lun[2];
389 388
390 bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data; 389 bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
391 tmfabort_hdr = (struct iscsi_tm *)mtask->hdr; 390 tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
@@ -393,38 +392,41 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
393 bnx2i_conn->ep->qp.sq_prod_qe; 392 bnx2i_conn->ep->qp.sq_prod_qe;
394 393
395 tmfabort_wqe->op_code = tmfabort_hdr->opcode; 394 tmfabort_wqe->op_code = tmfabort_hdr->opcode;
396 tmfabort_wqe->op_attr = 0; 395 tmfabort_wqe->op_attr = tmfabort_hdr->flags;
397 tmfabort_wqe->op_attr =
398 ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK;
399 396
400 tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14)); 397 tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14));
401 tmfabort_wqe->reserved2 = 0; 398 tmfabort_wqe->reserved2 = 0;
402 tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn); 399 tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn);
403 400
404 ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt); 401 switch (tmfabort_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) {
405 if (!ctask || !ctask->sc) 402 case ISCSI_TM_FUNC_ABORT_TASK:
406 /* 403 case ISCSI_TM_FUNC_TASK_REASSIGN:
407 * the iscsi layer must have completed the cmd while this 404 ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt);
408 * was starting up. 405 if (!ctask || !ctask->sc)
409 * 406 /*
410 * Note: In the case of a SCSI cmd timeout, the task's sc 407 * the iscsi layer must have completed the cmd while
411 * is still active; hence ctask->sc != 0 408 * was starting up.
412 * In this case, the task must be aborted 409 *
413 */ 410 * Note: In the case of a SCSI cmd timeout, the task's
414 return 0; 411 * sc is still active; hence ctask->sc != 0
415 412 * In this case, the task must be aborted
416 ref_sc = ctask->sc; 413 */
417 414 return 0;
418 /* Retrieve LUN directly from the ref_sc */ 415
419 int_to_scsilun(ref_sc->device->lun, (struct scsi_lun *) scsi_lun); 416 ref_sc = ctask->sc;
420 tmfabort_wqe->lun[0] = be32_to_cpu(scsi_lun[0]); 417 if (ref_sc->sc_data_direction == DMA_TO_DEVICE)
421 tmfabort_wqe->lun[1] = be32_to_cpu(scsi_lun[1]); 418 dword = (ISCSI_TASK_TYPE_WRITE <<
422 419 ISCSI_CMD_REQUEST_TYPE_SHIFT);
423 if (ref_sc->sc_data_direction == DMA_TO_DEVICE) 420 else
424 dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT); 421 dword = (ISCSI_TASK_TYPE_READ <<
425 else 422 ISCSI_CMD_REQUEST_TYPE_SHIFT);
426 dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT); 423 tmfabort_wqe->ref_itt = (dword |
427 tmfabort_wqe->ref_itt = (dword | (tmfabort_hdr->rtt & ISCSI_ITT_MASK)); 424 (tmfabort_hdr->rtt & ISCSI_ITT_MASK));
425 break;
426 default:
427 tmfabort_wqe->ref_itt = RESERVED_ITT;
428 }
429 memcpy(tmfabort_wqe->lun, tmfabort_hdr->lun, sizeof(struct scsi_lun));
428 tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn); 430 tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn);
429 431
430 tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma; 432 tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
@@ -464,7 +466,6 @@ int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *bnx2i_conn,
464 * @conn: iscsi connection 466 * @conn: iscsi connection
465 * @cmd: driver command structure which is requesting 467 * @cmd: driver command structure which is requesting
466 * a WQE to sent to chip for further processing 468 * a WQE to sent to chip for further processing
467 * @ttt: TTT to be used when building pdu header
468 * @datap: payload buffer pointer 469 * @datap: payload buffer pointer
469 * @data_len: payload data length 470 * @data_len: payload data length
470 * @unsol: indicated whether nopout pdu is unsolicited pdu or 471 * @unsol: indicated whether nopout pdu is unsolicited pdu or
@@ -473,7 +474,7 @@ int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *bnx2i_conn,
473 * prepare and post a nopout request WQE to CNIC firmware 474 * prepare and post a nopout request WQE to CNIC firmware
474 */ 475 */
475int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn, 476int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
476 struct iscsi_task *task, u32 ttt, 477 struct iscsi_task *task,
477 char *datap, int data_len, int unsol) 478 char *datap, int data_len, int unsol)
478{ 479{
479 struct bnx2i_endpoint *ep = bnx2i_conn->ep; 480 struct bnx2i_endpoint *ep = bnx2i_conn->ep;
@@ -498,7 +499,7 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
498 nopout_wqe->itt = ((u16)task->itt | 499 nopout_wqe->itt = ((u16)task->itt |
499 (ISCSI_TASK_TYPE_MPATH << 500 (ISCSI_TASK_TYPE_MPATH <<
500 ISCSI_TMF_REQUEST_TYPE_SHIFT)); 501 ISCSI_TMF_REQUEST_TYPE_SHIFT));
501 nopout_wqe->ttt = ttt; 502 nopout_wqe->ttt = nopout_hdr->ttt;
502 nopout_wqe->flags = 0; 503 nopout_wqe->flags = 0;
503 if (!unsol) 504 if (!unsol)
504 nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION; 505 nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index a796f565f383..50c2aa3b8eb1 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -17,15 +17,17 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
17static u32 adapter_count; 17static u32 adapter_count;
18 18
19#define DRV_MODULE_NAME "bnx2i" 19#define DRV_MODULE_NAME "bnx2i"
20#define DRV_MODULE_VERSION "2.1.2" 20#define DRV_MODULE_VERSION "2.1.3"
21#define DRV_MODULE_RELDATE "Jun 28, 2010" 21#define DRV_MODULE_RELDATE "Aug 10, 2010"
22 22
23static char version[] __devinitdata = 23static char version[] __devinitdata =
24 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ 24 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
25 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 25 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
26 26
27 27
28MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com>"); 28MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com> and "
29 "Eddie Wai <eddie.wai@broadcom.com>");
30
29MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711" 31MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711"
30 " iSCSI Driver"); 32 " iSCSI Driver");
31MODULE_LICENSE("GPL"); 33MODULE_LICENSE("GPL");
@@ -167,6 +169,38 @@ void bnx2i_start(void *handle)
167 169
168 170
169/** 171/**
172 * bnx2i_chip_cleanup - local routine to handle chip cleanup
173 * @hba: Adapter instance to register
174 *
175 * Driver checks if adapter still has any active connections before
176 * executing the cleanup process
177 */
178static void bnx2i_chip_cleanup(struct bnx2i_hba *hba)
179{
180 struct bnx2i_endpoint *bnx2i_ep;
181 struct list_head *pos, *tmp;
182
183 if (hba->ofld_conns_active) {
184 /* Stage to force the disconnection
185 * This is the case where the daemon is either slow or
186 * not present
187 */
188 printk(KERN_ALERT "bnx2i: (%s) chip cleanup for %d active "
189 "connections\n", hba->netdev->name,
190 hba->ofld_conns_active);
191 mutex_lock(&hba->net_dev_lock);
192 list_for_each_safe(pos, tmp, &hba->ep_active_list) {
193 bnx2i_ep = list_entry(pos, struct bnx2i_endpoint, link);
194 /* Clean up the chip only */
195 bnx2i_hw_ep_disconnect(bnx2i_ep);
196 bnx2i_ep->cm_sk = NULL;
197 }
198 mutex_unlock(&hba->net_dev_lock);
199 }
200}
201
202
203/**
170 * bnx2i_stop - cnic callback to shutdown adapter instance 204 * bnx2i_stop - cnic callback to shutdown adapter instance
171 * @handle: transparent handle pointing to adapter structure 205 * @handle: transparent handle pointing to adapter structure
172 * 206 *
@@ -176,8 +210,6 @@ void bnx2i_start(void *handle)
176void bnx2i_stop(void *handle) 210void bnx2i_stop(void *handle)
177{ 211{
178 struct bnx2i_hba *hba = handle; 212 struct bnx2i_hba *hba = handle;
179 struct list_head *pos, *tmp;
180 struct bnx2i_endpoint *bnx2i_ep;
181 int conns_active; 213 int conns_active;
182 214
183 /* check if cleanup happened in GOING_DOWN context */ 215 /* check if cleanup happened in GOING_DOWN context */
@@ -198,24 +230,7 @@ void bnx2i_stop(void *handle)
198 if (hba->ofld_conns_active == conns_active) 230 if (hba->ofld_conns_active == conns_active)
199 break; 231 break;
200 } 232 }
201 if (hba->ofld_conns_active) { 233 bnx2i_chip_cleanup(hba);
202 /* Stage to force the disconnection
203 * This is the case where the daemon is either slow or
204 * not present
205 */
206 printk(KERN_ALERT "bnx2i: Wait timeout, force all eps "
207 "to disconnect (%d)\n", hba->ofld_conns_active);
208 mutex_lock(&hba->net_dev_lock);
209 list_for_each_safe(pos, tmp, &hba->ep_active_list) {
210 bnx2i_ep = list_entry(pos, struct bnx2i_endpoint, link);
211 /* Clean up the chip only */
212 bnx2i_hw_ep_disconnect(bnx2i_ep);
213 }
214 mutex_unlock(&hba->net_dev_lock);
215 if (hba->ofld_conns_active)
216 printk(KERN_ERR "bnx2i: EP disconnect timeout (%d)!\n",
217 hba->ofld_conns_active);
218 }
219 234
220 /* This flag should be cleared last so that ep_disconnect() gracefully 235 /* This flag should be cleared last so that ep_disconnect() gracefully
221 * cleans up connection context 236 * cleans up connection context
@@ -457,6 +472,7 @@ static void __exit bnx2i_mod_exit(void)
457 adapter_count--; 472 adapter_count--;
458 473
459 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { 474 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
475 bnx2i_chip_cleanup(hba);
460 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); 476 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
461 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); 477 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
462 } 478 }
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index a46ccc380ab1..fb50efbce087 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1078,11 +1078,9 @@ static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task)
1078 buf = bnx2i_conn->gen_pdu.req_buf; 1078 buf = bnx2i_conn->gen_pdu.req_buf;
1079 if (data_len) 1079 if (data_len)
1080 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, 1080 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
1081 RESERVED_ITT,
1082 buf, data_len, 1); 1081 buf, data_len, 1);
1083 else 1082 else
1084 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, 1083 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
1085 RESERVED_ITT,
1086 NULL, 0, 1); 1084 NULL, 0, 1);
1087 break; 1085 break;
1088 case ISCSI_OP_LOGOUT: 1086 case ISCSI_OP_LOGOUT:
@@ -1955,6 +1953,9 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
1955 if (!cnic) 1953 if (!cnic)
1956 return 0; 1954 return 0;
1957 1955
1956 if (bnx2i_ep->state == EP_STATE_IDLE)
1957 return 0;
1958
1958 if (!bnx2i_ep_tcp_conn_active(bnx2i_ep)) 1959 if (!bnx2i_ep_tcp_conn_active(bnx2i_ep))
1959 goto destroy_conn; 1960 goto destroy_conn;
1960 1961
@@ -1998,11 +1999,13 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
1998 else 1999 else
1999 close_ret = cnic->cm_abort(bnx2i_ep->cm_sk); 2000 close_ret = cnic->cm_abort(bnx2i_ep->cm_sk);
2000 2001
2002 /* No longer allow CFC delete if cm_close/abort fails the request */
2001 if (close_ret) 2003 if (close_ret)
2002 bnx2i_ep->state = EP_STATE_DISCONN_COMPL; 2004 printk(KERN_ALERT "bnx2i: %s close/abort(%d) returned %d\n",
2003 2005 bnx2i_ep->hba->netdev->name, close, close_ret);
2004 /* wait for option-2 conn teardown */ 2006 else
2005 wait_event_interruptible(bnx2i_ep->ofld_wait, 2007 /* wait for option-2 conn teardown */
2008 wait_event_interruptible(bnx2i_ep->ofld_wait,
2006 bnx2i_ep->state != EP_STATE_DISCONN_START); 2009 bnx2i_ep->state != EP_STATE_DISCONN_START);
2007 2010
2008 if (signal_pending(current)) 2011 if (signal_pending(current))
diff --git a/drivers/scsi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgb3i/cxgb3i.h
deleted file mode 100644
index e3133b58e594..000000000000
--- a/drivers/scsi/cxgb3i/cxgb3i.h
+++ /dev/null
@@ -1,161 +0,0 @@
1/*
2 * cxgb3i.h: Chelsio S3xx iSCSI driver.
3 *
4 * Copyright (c) 2008 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 */
12
13#ifndef __CXGB3I_H__
14#define __CXGB3I_H__
15
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/errno.h>
19#include <linux/types.h>
20#include <linux/list.h>
21#include <linux/netdevice.h>
22#include <linux/scatterlist.h>
23#include <linux/skbuff.h>
24#include <scsi/libiscsi_tcp.h>
25
26/* from cxgb3 LLD */
27#include "common.h"
28#include "t3_cpl.h"
29#include "t3cdev.h"
30#include "cxgb3_ctl_defs.h"
31#include "cxgb3_offload.h"
32#include "firmware_exports.h"
33
34#include "cxgb3i_offload.h"
35#include "cxgb3i_ddp.h"
36
37#define CXGB3I_SCSI_HOST_QDEPTH 1024
38#define CXGB3I_MAX_TARGET CXGB3I_MAX_CONN
39#define CXGB3I_MAX_LUN 512
40#define ISCSI_PDU_NONPAYLOAD_MAX \
41 (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE + 2*ISCSI_DIGEST_SIZE)
42
43struct cxgb3i_adapter;
44struct cxgb3i_hba;
45struct cxgb3i_endpoint;
46
47/**
48 * struct cxgb3i_hba - cxgb3i iscsi structure (per port)
49 *
50 * @snic: cxgb3i adapter containing this port
51 * @ndev: pointer to netdev structure
52 * @shost: pointer to scsi host structure
53 */
54struct cxgb3i_hba {
55 struct cxgb3i_adapter *snic;
56 struct net_device *ndev;
57 struct Scsi_Host *shost;
58};
59
60/**
61 * struct cxgb3i_adapter - cxgb3i adapter structure (per pci)
62 *
63 * @listhead: list head to link elements
64 * @lock: lock for this structure
65 * @tdev: pointer to t3cdev used by cxgb3 driver
66 * @pdev: pointer to pci dev
67 * @hba_cnt: # of hbas (the same as # of ports)
68 * @hba: all the hbas on this adapter
69 * @flags: bit flag for adapter event/status
70 * @tx_max_size: max. tx packet size supported
71 * @rx_max_size: max. rx packet size supported
72 * @tag_format: ddp tag format settings
73 */
74#define CXGB3I_ADAPTER_FLAG_RESET 0x1
75struct cxgb3i_adapter {
76 struct list_head list_head;
77 spinlock_t lock;
78 struct t3cdev *tdev;
79 struct pci_dev *pdev;
80 unsigned char hba_cnt;
81 struct cxgb3i_hba *hba[MAX_NPORTS];
82
83 unsigned int flags;
84 unsigned int tx_max_size;
85 unsigned int rx_max_size;
86
87 struct cxgb3i_tag_format tag_format;
88};
89
90/**
91 * struct cxgb3i_conn - cxgb3i iscsi connection
92 *
93 * @listhead: list head to link elements
94 * @cep: pointer to iscsi_endpoint structure
95 * @conn: pointer to iscsi_conn structure
96 * @hba: pointer to the hba this conn. is going through
97 * @task_idx_bits: # of bits needed for session->cmds_max
98 */
99struct cxgb3i_conn {
100 struct list_head list_head;
101 struct cxgb3i_endpoint *cep;
102 struct iscsi_conn *conn;
103 struct cxgb3i_hba *hba;
104 unsigned int task_idx_bits;
105};
106
107/**
108 * struct cxgb3i_endpoint - iscsi tcp endpoint
109 *
110 * @c3cn: the h/w tcp connection representation
111 * @hba: pointer to the hba this conn. is going through
112 * @cconn: pointer to the associated cxgb3i iscsi connection
113 */
114struct cxgb3i_endpoint {
115 struct s3_conn *c3cn;
116 struct cxgb3i_hba *hba;
117 struct cxgb3i_conn *cconn;
118};
119
120/**
121 * struct cxgb3i_task_data - private iscsi task data
122 *
123 * @nr_frags: # of coalesced page frags (from scsi sgl)
124 * @frags: coalesced page frags (from scsi sgl)
125 * @skb: tx pdu skb
126 * @offset: data offset for the next pdu
127 * @count: max. possible pdu payload
128 * @sgoffset: offset to the first sg entry for a given offset
129 */
130#define MAX_PDU_FRAGS ((ULP2_MAX_PDU_PAYLOAD + 512 - 1) / 512)
131struct cxgb3i_task_data {
132 unsigned short nr_frags;
133 skb_frag_t frags[MAX_PDU_FRAGS];
134 struct sk_buff *skb;
135 unsigned int offset;
136 unsigned int count;
137 unsigned int sgoffset;
138};
139
140int cxgb3i_iscsi_init(void);
141void cxgb3i_iscsi_cleanup(void);
142
143struct cxgb3i_adapter *cxgb3i_adapter_find_by_tdev(struct t3cdev *);
144void cxgb3i_adapter_open(struct t3cdev *);
145void cxgb3i_adapter_close(struct t3cdev *);
146
147struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *,
148 struct net_device *);
149void cxgb3i_hba_host_remove(struct cxgb3i_hba *);
150
151int cxgb3i_pdu_init(void);
152void cxgb3i_pdu_cleanup(void);
153void cxgb3i_conn_cleanup_task(struct iscsi_task *);
154int cxgb3i_conn_alloc_pdu(struct iscsi_task *, u8);
155int cxgb3i_conn_init_pdu(struct iscsi_task *, unsigned int, unsigned int);
156int cxgb3i_conn_xmit_pdu(struct iscsi_task *);
157
158void cxgb3i_release_itt(struct iscsi_task *task, itt_t hdr_itt);
159int cxgb3i_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt);
160
161#endif
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.c b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
deleted file mode 100644
index be0e23042c76..000000000000
--- a/drivers/scsi/cxgb3i/cxgb3i_ddp.c
+++ /dev/null
@@ -1,773 +0,0 @@
1/*
2 * cxgb3i_ddp.c: Chelsio S3xx iSCSI DDP Manager.
3 *
4 * Copyright (c) 2008 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 */
12
13#include <linux/slab.h>
14#include <linux/skbuff.h>
15#include <linux/scatterlist.h>
16
17/* from cxgb3 LLD */
18#include "common.h"
19#include "t3_cpl.h"
20#include "t3cdev.h"
21#include "cxgb3_ctl_defs.h"
22#include "cxgb3_offload.h"
23#include "firmware_exports.h"
24
25#include "cxgb3i_ddp.h"
26
27#define ddp_log_error(fmt...) printk(KERN_ERR "cxgb3i_ddp: ERR! " fmt)
28#define ddp_log_warn(fmt...) printk(KERN_WARNING "cxgb3i_ddp: WARN! " fmt)
29#define ddp_log_info(fmt...) printk(KERN_INFO "cxgb3i_ddp: " fmt)
30
31#ifdef __DEBUG_CXGB3I_DDP__
32#define ddp_log_debug(fmt, args...) \
33 printk(KERN_INFO "cxgb3i_ddp: %s - " fmt, __func__ , ## args)
34#else
35#define ddp_log_debug(fmt...)
36#endif
37
38/*
39 * iSCSI Direct Data Placement
40 *
41 * T3 h/w can directly place the iSCSI Data-In or Data-Out PDU's payload into
42 * pre-posted final destination host-memory buffers based on the Initiator
43 * Task Tag (ITT) in Data-In or Target Task Tag (TTT) in Data-Out PDUs.
44 *
45 * The host memory address is programmed into h/w in the format of pagepod
46 * entries.
47 * The location of the pagepod entry is encoded into ddp tag which is used or
48 * is the base for ITT/TTT.
49 */
50
51#define DDP_PGIDX_MAX 4
52#define DDP_THRESHOLD 2048
53static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4};
54static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16};
55static unsigned char page_idx = DDP_PGIDX_MAX;
56
57/*
58 * functions to program the pagepod in h/w
59 */
60static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr)
61{
62 struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head;
63
64 req->wr.wr_lo = 0;
65 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS));
66 req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) |
67 V_ULPTX_CMD(ULP_MEM_WRITE));
68 req->len = htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE >> 5) |
69 V_ULPTX_NFLITS((PPOD_SIZE >> 3) + 1));
70}
71
72static int set_ddp_map(struct cxgb3i_ddp_info *ddp, struct pagepod_hdr *hdr,
73 unsigned int idx, unsigned int npods,
74 struct cxgb3i_gather_list *gl)
75{
76 unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
77 int i;
78
79 for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
80 struct sk_buff *skb = ddp->gl_skb[idx];
81 struct pagepod *ppod;
82 int j, pidx;
83
84 /* hold on to the skb until we clear the ddp mapping */
85 skb_get(skb);
86
87 ulp_mem_io_set_hdr(skb, pm_addr);
88 ppod = (struct pagepod *)
89 (skb->head + sizeof(struct ulp_mem_io));
90 memcpy(&(ppod->hdr), hdr, sizeof(struct pagepod));
91 for (pidx = 4 * i, j = 0; j < 5; ++j, ++pidx)
92 ppod->addr[j] = pidx < gl->nelem ?
93 cpu_to_be64(gl->phys_addr[pidx]) : 0UL;
94
95 skb->priority = CPL_PRIORITY_CONTROL;
96 cxgb3_ofld_send(ddp->tdev, skb);
97 }
98 return 0;
99}
100
101static void clear_ddp_map(struct cxgb3i_ddp_info *ddp, unsigned int tag,
102 unsigned int idx, unsigned int npods)
103{
104 unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
105 int i;
106
107 for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
108 struct sk_buff *skb = ddp->gl_skb[idx];
109
110 if (!skb) {
111 ddp_log_error("ddp tag 0x%x, 0x%x, %d/%u, skb NULL.\n",
112 tag, idx, i, npods);
113 continue;
114 }
115 ddp->gl_skb[idx] = NULL;
116 memset((skb->head + sizeof(struct ulp_mem_io)), 0, PPOD_SIZE);
117 ulp_mem_io_set_hdr(skb, pm_addr);
118 skb->priority = CPL_PRIORITY_CONTROL;
119 cxgb3_ofld_send(ddp->tdev, skb);
120 }
121}
122
123static inline int ddp_find_unused_entries(struct cxgb3i_ddp_info *ddp,
124 unsigned int start, unsigned int max,
125 unsigned int count,
126 struct cxgb3i_gather_list *gl)
127{
128 unsigned int i, j, k;
129
130 /* not enough entries */
131 if ((max - start) < count)
132 return -EBUSY;
133
134 max -= count;
135 spin_lock(&ddp->map_lock);
136 for (i = start; i < max;) {
137 for (j = 0, k = i; j < count; j++, k++) {
138 if (ddp->gl_map[k])
139 break;
140 }
141 if (j == count) {
142 for (j = 0, k = i; j < count; j++, k++)
143 ddp->gl_map[k] = gl;
144 spin_unlock(&ddp->map_lock);
145 return i;
146 }
147 i += j + 1;
148 }
149 spin_unlock(&ddp->map_lock);
150 return -EBUSY;
151}
152
153static inline void ddp_unmark_entries(struct cxgb3i_ddp_info *ddp,
154 int start, int count)
155{
156 spin_lock(&ddp->map_lock);
157 memset(&ddp->gl_map[start], 0,
158 count * sizeof(struct cxgb3i_gather_list *));
159 spin_unlock(&ddp->map_lock);
160}
161
162static inline void ddp_free_gl_skb(struct cxgb3i_ddp_info *ddp,
163 int idx, int count)
164{
165 int i;
166
167 for (i = 0; i < count; i++, idx++)
168 if (ddp->gl_skb[idx]) {
169 kfree_skb(ddp->gl_skb[idx]);
170 ddp->gl_skb[idx] = NULL;
171 }
172}
173
174static inline int ddp_alloc_gl_skb(struct cxgb3i_ddp_info *ddp, int idx,
175 int count, gfp_t gfp)
176{
177 int i;
178
179 for (i = 0; i < count; i++) {
180 struct sk_buff *skb = alloc_skb(sizeof(struct ulp_mem_io) +
181 PPOD_SIZE, gfp);
182 if (skb) {
183 ddp->gl_skb[idx + i] = skb;
184 skb_put(skb, sizeof(struct ulp_mem_io) + PPOD_SIZE);
185 } else {
186 ddp_free_gl_skb(ddp, idx, i);
187 return -ENOMEM;
188 }
189 }
190 return 0;
191}
192
193/**
194 * cxgb3i_ddp_find_page_index - return ddp page index for a given page size
195 * @pgsz: page size
196 * return the ddp page index, if no match is found return DDP_PGIDX_MAX.
197 */
198int cxgb3i_ddp_find_page_index(unsigned long pgsz)
199{
200 int i;
201
202 for (i = 0; i < DDP_PGIDX_MAX; i++) {
203 if (pgsz == (1UL << ddp_page_shift[i]))
204 return i;
205 }
206 ddp_log_debug("ddp page size 0x%lx not supported.\n", pgsz);
207 return DDP_PGIDX_MAX;
208}
209
210/**
211 * cxgb3i_ddp_adjust_page_table - adjust page table with PAGE_SIZE
212 * return the ddp page index, if no match is found return DDP_PGIDX_MAX.
213 */
214int cxgb3i_ddp_adjust_page_table(void)
215{
216 int i;
217 unsigned int base_order, order;
218
219 if (PAGE_SIZE < (1UL << ddp_page_shift[0])) {
220 ddp_log_info("PAGE_SIZE 0x%lx too small, min. 0x%lx.\n",
221 PAGE_SIZE, 1UL << ddp_page_shift[0]);
222 return -EINVAL;
223 }
224
225 base_order = get_order(1UL << ddp_page_shift[0]);
226 order = get_order(1 << PAGE_SHIFT);
227 for (i = 0; i < DDP_PGIDX_MAX; i++) {
228 /* first is the kernel page size, then just doubling the size */
229 ddp_page_order[i] = order - base_order + i;
230 ddp_page_shift[i] = PAGE_SHIFT + i;
231 }
232 return 0;
233}
234
235static inline void ddp_gl_unmap(struct pci_dev *pdev,
236 struct cxgb3i_gather_list *gl)
237{
238 int i;
239
240 for (i = 0; i < gl->nelem; i++)
241 pci_unmap_page(pdev, gl->phys_addr[i], PAGE_SIZE,
242 PCI_DMA_FROMDEVICE);
243}
244
245static inline int ddp_gl_map(struct pci_dev *pdev,
246 struct cxgb3i_gather_list *gl)
247{
248 int i;
249
250 for (i = 0; i < gl->nelem; i++) {
251 gl->phys_addr[i] = pci_map_page(pdev, gl->pages[i], 0,
252 PAGE_SIZE,
253 PCI_DMA_FROMDEVICE);
254 if (unlikely(pci_dma_mapping_error(pdev, gl->phys_addr[i])))
255 goto unmap;
256 }
257
258 return i;
259
260unmap:
261 if (i) {
262 unsigned int nelem = gl->nelem;
263
264 gl->nelem = i;
265 ddp_gl_unmap(pdev, gl);
266 gl->nelem = nelem;
267 }
268 return -ENOMEM;
269}
270
271/**
272 * cxgb3i_ddp_make_gl - build ddp page buffer list
273 * @xferlen: total buffer length
274 * @sgl: page buffer scatter-gather list
275 * @sgcnt: # of page buffers
276 * @pdev: pci_dev, used for pci map
277 * @gfp: allocation mode
278 *
279 * construct a ddp page buffer list from the scsi scattergather list.
280 * coalesce buffers as much as possible, and obtain dma addresses for
281 * each page.
282 *
283 * Return the cxgb3i_gather_list constructed from the page buffers if the
284 * memory can be used for ddp. Return NULL otherwise.
285 */
286struct cxgb3i_gather_list *cxgb3i_ddp_make_gl(unsigned int xferlen,
287 struct scatterlist *sgl,
288 unsigned int sgcnt,
289 struct pci_dev *pdev,
290 gfp_t gfp)
291{
292 struct cxgb3i_gather_list *gl;
293 struct scatterlist *sg = sgl;
294 struct page *sgpage = sg_page(sg);
295 unsigned int sglen = sg->length;
296 unsigned int sgoffset = sg->offset;
297 unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >>
298 PAGE_SHIFT;
299 int i = 1, j = 0;
300
301 if (xferlen < DDP_THRESHOLD) {
302 ddp_log_debug("xfer %u < threshold %u, no ddp.\n",
303 xferlen, DDP_THRESHOLD);
304 return NULL;
305 }
306
307 gl = kzalloc(sizeof(struct cxgb3i_gather_list) +
308 npages * (sizeof(dma_addr_t) + sizeof(struct page *)),
309 gfp);
310 if (!gl)
311 return NULL;
312
313 gl->pages = (struct page **)&gl->phys_addr[npages];
314 gl->length = xferlen;
315 gl->offset = sgoffset;
316 gl->pages[0] = sgpage;
317
318 sg = sg_next(sg);
319 while (sg) {
320 struct page *page = sg_page(sg);
321
322 if (sgpage == page && sg->offset == sgoffset + sglen)
323 sglen += sg->length;
324 else {
325 /* make sure the sgl is fit for ddp:
326 * each has the same page size, and
327 * all of the middle pages are used completely
328 */
329 if ((j && sgoffset) ||
330 ((i != sgcnt - 1) &&
331 ((sglen + sgoffset) & ~PAGE_MASK)))
332 goto error_out;
333
334 j++;
335 if (j == gl->nelem || sg->offset)
336 goto error_out;
337 gl->pages[j] = page;
338 sglen = sg->length;
339 sgoffset = sg->offset;
340 sgpage = page;
341 }
342 i++;
343 sg = sg_next(sg);
344 }
345 gl->nelem = ++j;
346
347 if (ddp_gl_map(pdev, gl) < 0)
348 goto error_out;
349
350 return gl;
351
352error_out:
353 kfree(gl);
354 return NULL;
355}
356
357/**
358 * cxgb3i_ddp_release_gl - release a page buffer list
359 * @gl: a ddp page buffer list
360 * @pdev: pci_dev used for pci_unmap
361 * free a ddp page buffer list resulted from cxgb3i_ddp_make_gl().
362 */
363void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list *gl,
364 struct pci_dev *pdev)
365{
366 ddp_gl_unmap(pdev, gl);
367 kfree(gl);
368}
369
370/**
371 * cxgb3i_ddp_tag_reserve - set up ddp for a data transfer
372 * @tdev: t3cdev adapter
373 * @tid: connection id
374 * @tformat: tag format
375 * @tagp: contains s/w tag initially, will be updated with ddp/hw tag
376 * @gl: the page momory list
377 * @gfp: allocation mode
378 *
379 * ddp setup for a given page buffer list and construct the ddp tag.
380 * return 0 if success, < 0 otherwise.
381 */
382int cxgb3i_ddp_tag_reserve(struct t3cdev *tdev, unsigned int tid,
383 struct cxgb3i_tag_format *tformat, u32 *tagp,
384 struct cxgb3i_gather_list *gl, gfp_t gfp)
385{
386 struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi;
387 struct pagepod_hdr hdr;
388 unsigned int npods;
389 int idx = -1;
390 int err = -ENOMEM;
391 u32 sw_tag = *tagp;
392 u32 tag;
393
394 if (page_idx >= DDP_PGIDX_MAX || !ddp || !gl || !gl->nelem ||
395 gl->length < DDP_THRESHOLD) {
396 ddp_log_debug("pgidx %u, xfer %u/%u, NO ddp.\n",
397 page_idx, gl->length, DDP_THRESHOLD);
398 return -EINVAL;
399 }
400
401 npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
402
403 if (ddp->idx_last == ddp->nppods)
404 idx = ddp_find_unused_entries(ddp, 0, ddp->nppods, npods, gl);
405 else {
406 idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1,
407 ddp->nppods, npods, gl);
408 if (idx < 0 && ddp->idx_last >= npods) {
409 idx = ddp_find_unused_entries(ddp, 0,
410 min(ddp->idx_last + npods, ddp->nppods),
411 npods, gl);
412 }
413 }
414 if (idx < 0) {
415 ddp_log_debug("xferlen %u, gl %u, npods %u NO DDP.\n",
416 gl->length, gl->nelem, npods);
417 return idx;
418 }
419
420 err = ddp_alloc_gl_skb(ddp, idx, npods, gfp);
421 if (err < 0)
422 goto unmark_entries;
423
424 tag = cxgb3i_ddp_tag_base(tformat, sw_tag);
425 tag |= idx << PPOD_IDX_SHIFT;
426
427 hdr.rsvd = 0;
428 hdr.vld_tid = htonl(F_PPOD_VALID | V_PPOD_TID(tid));
429 hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask);
430 hdr.maxoffset = htonl(gl->length);
431 hdr.pgoffset = htonl(gl->offset);
432
433 err = set_ddp_map(ddp, &hdr, idx, npods, gl);
434 if (err < 0)
435 goto free_gl_skb;
436
437 ddp->idx_last = idx;
438 ddp_log_debug("xfer %u, gl %u,%u, tid 0x%x, 0x%x -> 0x%x(%u,%u).\n",
439 gl->length, gl->nelem, gl->offset, tid, sw_tag, tag,
440 idx, npods);
441 *tagp = tag;
442 return 0;
443
444free_gl_skb:
445 ddp_free_gl_skb(ddp, idx, npods);
446unmark_entries:
447 ddp_unmark_entries(ddp, idx, npods);
448 return err;
449}
450
451/**
452 * cxgb3i_ddp_tag_release - release a ddp tag
453 * @tdev: t3cdev adapter
454 * @tag: ddp tag
455 * ddp cleanup for a given ddp tag and release all the resources held
456 */
457void cxgb3i_ddp_tag_release(struct t3cdev *tdev, u32 tag)
458{
459 struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi;
460 u32 idx;
461
462 if (!ddp) {
463 ddp_log_error("release ddp tag 0x%x, ddp NULL.\n", tag);
464 return;
465 }
466
467 idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask;
468 if (idx < ddp->nppods) {
469 struct cxgb3i_gather_list *gl = ddp->gl_map[idx];
470 unsigned int npods;
471
472 if (!gl || !gl->nelem) {
473 ddp_log_error("release 0x%x, idx 0x%x, gl 0x%p, %u.\n",
474 tag, idx, gl, gl ? gl->nelem : 0);
475 return;
476 }
477 npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
478 ddp_log_debug("ddp tag 0x%x, release idx 0x%x, npods %u.\n",
479 tag, idx, npods);
480 clear_ddp_map(ddp, tag, idx, npods);
481 ddp_unmark_entries(ddp, idx, npods);
482 cxgb3i_ddp_release_gl(gl, ddp->pdev);
483 } else
484 ddp_log_error("ddp tag 0x%x, idx 0x%x > max 0x%x.\n",
485 tag, idx, ddp->nppods);
486}
487
488static int setup_conn_pgidx(struct t3cdev *tdev, unsigned int tid, int pg_idx,
489 int reply)
490{
491 struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
492 GFP_KERNEL);
493 struct cpl_set_tcb_field *req;
494 u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0;
495
496 if (!skb)
497 return -ENOMEM;
498
499 /* set up ulp submode and page size */
500 req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
501 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
502 req->wr.wr_lo = 0;
503 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
504 req->reply = V_NO_REPLY(reply ? 0 : 1);
505 req->cpu_idx = 0;
506 req->word = htons(31);
507 req->mask = cpu_to_be64(0xF0000000);
508 req->val = cpu_to_be64(val << 28);
509 skb->priority = CPL_PRIORITY_CONTROL;
510
511 cxgb3_ofld_send(tdev, skb);
512 return 0;
513}
514
515/**
516 * cxgb3i_setup_conn_host_pagesize - setup the conn.'s ddp page size
517 * @tdev: t3cdev adapter
518 * @tid: connection id
519 * @reply: request reply from h/w
520 * set up the ddp page size based on the host PAGE_SIZE for a connection
521 * identified by tid
522 */
523int cxgb3i_setup_conn_host_pagesize(struct t3cdev *tdev, unsigned int tid,
524 int reply)
525{
526 return setup_conn_pgidx(tdev, tid, page_idx, reply);
527}
528
529/**
530 * cxgb3i_setup_conn_pagesize - setup the conn.'s ddp page size
531 * @tdev: t3cdev adapter
532 * @tid: connection id
533 * @reply: request reply from h/w
534 * @pgsz: ddp page size
535 * set up the ddp page size for a connection identified by tid
536 */
537int cxgb3i_setup_conn_pagesize(struct t3cdev *tdev, unsigned int tid,
538 int reply, unsigned long pgsz)
539{
540 int pgidx = cxgb3i_ddp_find_page_index(pgsz);
541
542 return setup_conn_pgidx(tdev, tid, pgidx, reply);
543}
544
545/**
546 * cxgb3i_setup_conn_digest - setup conn. digest setting
547 * @tdev: t3cdev adapter
548 * @tid: connection id
549 * @hcrc: header digest enabled
550 * @dcrc: data digest enabled
551 * @reply: request reply from h/w
552 * set up the iscsi digest settings for a connection identified by tid
553 */
554int cxgb3i_setup_conn_digest(struct t3cdev *tdev, unsigned int tid,
555 int hcrc, int dcrc, int reply)
556{
557 struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
558 GFP_KERNEL);
559 struct cpl_set_tcb_field *req;
560 u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0);
561
562 if (!skb)
563 return -ENOMEM;
564
565 /* set up ulp submode and page size */
566 req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
567 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
568 req->wr.wr_lo = 0;
569 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
570 req->reply = V_NO_REPLY(reply ? 0 : 1);
571 req->cpu_idx = 0;
572 req->word = htons(31);
573 req->mask = cpu_to_be64(0x0F000000);
574 req->val = cpu_to_be64(val << 24);
575 skb->priority = CPL_PRIORITY_CONTROL;
576
577 cxgb3_ofld_send(tdev, skb);
578 return 0;
579}
580
581
582/**
583 * cxgb3i_adapter_ddp_info - read the adapter's ddp information
584 * @tdev: t3cdev adapter
585 * @tformat: tag format
586 * @txsz: max tx pdu payload size, filled in by this func.
587 * @rxsz: max rx pdu payload size, filled in by this func.
588 * setup the tag format for a given iscsi entity
589 */
590int cxgb3i_adapter_ddp_info(struct t3cdev *tdev,
591 struct cxgb3i_tag_format *tformat,
592 unsigned int *txsz, unsigned int *rxsz)
593{
594 struct cxgb3i_ddp_info *ddp;
595 unsigned char idx_bits;
596
597 if (!tformat)
598 return -EINVAL;
599
600 if (!tdev->ulp_iscsi)
601 return -EINVAL;
602
603 ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi;
604
605 idx_bits = 32 - tformat->sw_bits;
606 tformat->rsvd_bits = ddp->idx_bits;
607 tformat->rsvd_shift = PPOD_IDX_SHIFT;
608 tformat->rsvd_mask = (1 << tformat->rsvd_bits) - 1;
609
610 ddp_log_info("tag format: sw %u, rsvd %u,%u, mask 0x%x.\n",
611 tformat->sw_bits, tformat->rsvd_bits,
612 tformat->rsvd_shift, tformat->rsvd_mask);
613
614 *txsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
615 ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN);
616 *rxsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
617 ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN);
618 ddp_log_info("max payload size: %u/%u, %u/%u.\n",
619 *txsz, ddp->max_txsz, *rxsz, ddp->max_rxsz);
620 return 0;
621}
622
623/**
624 * cxgb3i_ddp_cleanup - release the cxgb3 adapter's ddp resource
625 * @tdev: t3cdev adapter
626 * release all the resource held by the ddp pagepod manager for a given
627 * adapter if needed
628 */
629
630static void ddp_cleanup(struct kref *kref)
631{
632 struct cxgb3i_ddp_info *ddp = container_of(kref,
633 struct cxgb3i_ddp_info,
634 refcnt);
635 int i = 0;
636
637 ddp_log_info("kref release ddp 0x%p, t3dev 0x%p.\n", ddp, ddp->tdev);
638
639 ddp->tdev->ulp_iscsi = NULL;
640 while (i < ddp->nppods) {
641 struct cxgb3i_gather_list *gl = ddp->gl_map[i];
642 if (gl) {
643 int npods = (gl->nelem + PPOD_PAGES_MAX - 1)
644 >> PPOD_PAGES_SHIFT;
645 ddp_log_info("t3dev 0x%p, ddp %d + %d.\n",
646 ddp->tdev, i, npods);
647 kfree(gl);
648 ddp_free_gl_skb(ddp, i, npods);
649 i += npods;
650 } else
651 i++;
652 }
653 cxgb3i_free_big_mem(ddp);
654}
655
656void cxgb3i_ddp_cleanup(struct t3cdev *tdev)
657{
658 struct cxgb3i_ddp_info *ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi;
659
660 ddp_log_info("t3dev 0x%p, release ddp 0x%p.\n", tdev, ddp);
661 if (ddp)
662 kref_put(&ddp->refcnt, ddp_cleanup);
663}
664
665/**
666 * ddp_init - initialize the cxgb3 adapter's ddp resource
667 * @tdev: t3cdev adapter
668 * initialize the ddp pagepod manager for a given adapter
669 */
670static void ddp_init(struct t3cdev *tdev)
671{
672 struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi;
673 struct ulp_iscsi_info uinfo;
674 unsigned int ppmax, bits;
675 int i, err;
676
677 if (ddp) {
678 kref_get(&ddp->refcnt);
679 ddp_log_warn("t3dev 0x%p, ddp 0x%p already set up.\n",
680 tdev, tdev->ulp_iscsi);
681 return;
682 }
683
684 err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo);
685 if (err < 0) {
686 ddp_log_error("%s, failed to get iscsi param err=%d.\n",
687 tdev->name, err);
688 return;
689 }
690
691 ppmax = (uinfo.ulimit - uinfo.llimit + 1) >> PPOD_SIZE_SHIFT;
692 bits = __ilog2_u32(ppmax) + 1;
693 if (bits > PPOD_IDX_MAX_SIZE)
694 bits = PPOD_IDX_MAX_SIZE;
695 ppmax = (1 << (bits - 1)) - 1;
696
697 ddp = cxgb3i_alloc_big_mem(sizeof(struct cxgb3i_ddp_info) +
698 ppmax *
699 (sizeof(struct cxgb3i_gather_list *) +
700 sizeof(struct sk_buff *)),
701 GFP_KERNEL);
702 if (!ddp) {
703 ddp_log_warn("%s unable to alloc ddp 0x%d, ddp disabled.\n",
704 tdev->name, ppmax);
705 return;
706 }
707 ddp->gl_map = (struct cxgb3i_gather_list **)(ddp + 1);
708 ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) +
709 ppmax *
710 sizeof(struct cxgb3i_gather_list *));
711 spin_lock_init(&ddp->map_lock);
712 kref_init(&ddp->refcnt);
713
714 ddp->tdev = tdev;
715 ddp->pdev = uinfo.pdev;
716 ddp->max_txsz = min_t(unsigned int, uinfo.max_txsz, ULP2_MAX_PKT_SIZE);
717 ddp->max_rxsz = min_t(unsigned int, uinfo.max_rxsz, ULP2_MAX_PKT_SIZE);
718 ddp->llimit = uinfo.llimit;
719 ddp->ulimit = uinfo.ulimit;
720 ddp->nppods = ppmax;
721 ddp->idx_last = ppmax;
722 ddp->idx_bits = bits;
723 ddp->idx_mask = (1 << bits) - 1;
724 ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1;
725
726 uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
727 for (i = 0; i < DDP_PGIDX_MAX; i++)
728 uinfo.pgsz_factor[i] = ddp_page_order[i];
729 uinfo.ulimit = uinfo.llimit + (ppmax << PPOD_SIZE_SHIFT);
730
731 err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo);
732 if (err < 0) {
733 ddp_log_warn("%s unable to set iscsi param err=%d, "
734 "ddp disabled.\n", tdev->name, err);
735 goto free_ddp_map;
736 }
737
738 tdev->ulp_iscsi = ddp;
739
740 ddp_log_info("tdev 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u,"
741 " %u/%u.\n",
742 tdev, ppmax, ddp->idx_bits, ddp->idx_mask,
743 ddp->rsvd_tag_mask, ddp->max_txsz, uinfo.max_txsz,
744 ddp->max_rxsz, uinfo.max_rxsz);
745 return;
746
747free_ddp_map:
748 cxgb3i_free_big_mem(ddp);
749}
750
751/**
752 * cxgb3i_ddp_init - initialize ddp functions
753 */
754void cxgb3i_ddp_init(struct t3cdev *tdev)
755{
756 if (page_idx == DDP_PGIDX_MAX) {
757 page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE);
758
759 if (page_idx == DDP_PGIDX_MAX) {
760 ddp_log_info("system PAGE_SIZE %lu, update hw.\n",
761 PAGE_SIZE);
762 if (cxgb3i_ddp_adjust_page_table() < 0) {
763 ddp_log_info("PAGE_SIZE %lu, ddp disabled.\n",
764 PAGE_SIZE);
765 return;
766 }
767 page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE);
768 }
769 ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n",
770 PAGE_SIZE, page_idx);
771 }
772 ddp_init(tdev);
773}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.h b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
deleted file mode 100644
index 6761b329124d..000000000000
--- a/drivers/scsi/cxgb3i/cxgb3i_ddp.h
+++ /dev/null
@@ -1,312 +0,0 @@
1/*
2 * cxgb3i_ddp.h: Chelsio S3xx iSCSI DDP Manager.
3 *
4 * Copyright (c) 2008 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 */
12
13#ifndef __CXGB3I_ULP2_DDP_H__
14#define __CXGB3I_ULP2_DDP_H__
15
16#include <linux/slab.h>
17#include <linux/vmalloc.h>
18
19/**
20 * struct cxgb3i_tag_format - cxgb3i ulp tag format for an iscsi entity
21 *
22 * @sw_bits: # of bits used by iscsi software layer
23 * @rsvd_bits: # of bits used by h/w
24 * @rsvd_shift: h/w bits shift left
25 * @rsvd_mask: reserved bit mask
26 */
27struct cxgb3i_tag_format {
28 unsigned char sw_bits;
29 unsigned char rsvd_bits;
30 unsigned char rsvd_shift;
31 unsigned char filler[1];
32 u32 rsvd_mask;
33};
34
35/**
36 * struct cxgb3i_gather_list - cxgb3i direct data placement memory
37 *
38 * @tag: ddp tag
39 * @length: total data buffer length
40 * @offset: initial offset to the 1st page
41 * @nelem: # of pages
42 * @pages: page pointers
43 * @phys_addr: physical address
44 */
45struct cxgb3i_gather_list {
46 u32 tag;
47 unsigned int length;
48 unsigned int offset;
49 unsigned int nelem;
50 struct page **pages;
51 dma_addr_t phys_addr[0];
52};
53
54/**
55 * struct cxgb3i_ddp_info - cxgb3i direct data placement for pdu payload
56 *
57 * @list: list head to link elements
58 * @refcnt: ref. count
59 * @tdev: pointer to t3cdev used by cxgb3 driver
60 * @max_txsz: max tx packet size for ddp
61 * @max_rxsz: max rx packet size for ddp
62 * @llimit: lower bound of the page pod memory
63 * @ulimit: upper bound of the page pod memory
64 * @nppods: # of page pod entries
65 * @idx_last: page pod entry last used
66 * @idx_bits: # of bits the pagepod index would take
67 * @idx_mask: pagepod index mask
68 * @rsvd_tag_mask: tag mask
69 * @map_lock: lock to synchonize access to the page pod map
70 * @gl_map: ddp memory gather list
71 * @gl_skb: skb used to program the pagepod
72 */
73struct cxgb3i_ddp_info {
74 struct list_head list;
75 struct kref refcnt;
76 struct t3cdev *tdev;
77 struct pci_dev *pdev;
78 unsigned int max_txsz;
79 unsigned int max_rxsz;
80 unsigned int llimit;
81 unsigned int ulimit;
82 unsigned int nppods;
83 unsigned int idx_last;
84 unsigned char idx_bits;
85 unsigned char filler[3];
86 u32 idx_mask;
87 u32 rsvd_tag_mask;
88 spinlock_t map_lock;
89 struct cxgb3i_gather_list **gl_map;
90 struct sk_buff **gl_skb;
91};
92
93#define ISCSI_PDU_NONPAYLOAD_LEN 312 /* bhs(48) + ahs(256) + digest(8) */
94#define ULP2_MAX_PKT_SIZE 16224
95#define ULP2_MAX_PDU_PAYLOAD (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_LEN)
96#define PPOD_PAGES_MAX 4
97#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */
98
99/*
100 * struct pagepod_hdr, pagepod - pagepod format
101 */
102struct pagepod_hdr {
103 u32 vld_tid;
104 u32 pgsz_tag_clr;
105 u32 maxoffset;
106 u32 pgoffset;
107 u64 rsvd;
108};
109
110struct pagepod {
111 struct pagepod_hdr hdr;
112 u64 addr[PPOD_PAGES_MAX + 1];
113};
114
115#define PPOD_SIZE sizeof(struct pagepod) /* 64 */
116#define PPOD_SIZE_SHIFT 6
117
118#define PPOD_COLOR_SHIFT 0
119#define PPOD_COLOR_SIZE 6
120#define PPOD_COLOR_MASK ((1 << PPOD_COLOR_SIZE) - 1)
121
122#define PPOD_IDX_SHIFT PPOD_COLOR_SIZE
123#define PPOD_IDX_MAX_SIZE 24
124
125#define S_PPOD_TID 0
126#define M_PPOD_TID 0xFFFFFF
127#define V_PPOD_TID(x) ((x) << S_PPOD_TID)
128
129#define S_PPOD_VALID 24
130#define V_PPOD_VALID(x) ((x) << S_PPOD_VALID)
131#define F_PPOD_VALID V_PPOD_VALID(1U)
132
133#define S_PPOD_COLOR 0
134#define M_PPOD_COLOR 0x3F
135#define V_PPOD_COLOR(x) ((x) << S_PPOD_COLOR)
136
137#define S_PPOD_TAG 6
138#define M_PPOD_TAG 0xFFFFFF
139#define V_PPOD_TAG(x) ((x) << S_PPOD_TAG)
140
141#define S_PPOD_PGSZ 30
142#define M_PPOD_PGSZ 0x3
143#define V_PPOD_PGSZ(x) ((x) << S_PPOD_PGSZ)
144
145/*
146 * large memory chunk allocation/release
147 * use vmalloc() if kmalloc() fails
148 */
149static inline void *cxgb3i_alloc_big_mem(unsigned int size,
150 gfp_t gfp)
151{
152 void *p = kmalloc(size, gfp);
153 if (!p)
154 p = vmalloc(size);
155 if (p)
156 memset(p, 0, size);
157 return p;
158}
159
160static inline void cxgb3i_free_big_mem(void *addr)
161{
162 if (is_vmalloc_addr(addr))
163 vfree(addr);
164 else
165 kfree(addr);
166}
167
168/*
169 * cxgb3i ddp tag are 32 bits, it consists of reserved bits used by h/w and
170 * non-reserved bits that can be used by the iscsi s/w.
171 * The reserved bits are identified by the rsvd_bits and rsvd_shift fields
172 * in struct cxgb3i_tag_format.
173 *
174 * The upper most reserved bit can be used to check if a tag is ddp tag or not:
175 * if the bit is 0, the tag is a valid ddp tag
176 */
177
178/**
179 * cxgb3i_is_ddp_tag - check if a given tag is a hw/ddp tag
180 * @tformat: tag format information
181 * @tag: tag to be checked
182 *
183 * return true if the tag is a ddp tag, false otherwise.
184 */
185static inline int cxgb3i_is_ddp_tag(struct cxgb3i_tag_format *tformat, u32 tag)
186{
187 return !(tag & (1 << (tformat->rsvd_bits + tformat->rsvd_shift - 1)));
188}
189
190/**
191 * cxgb3i_sw_tag_usable - check if s/w tag has enough bits left for hw bits
192 * @tformat: tag format information
193 * @sw_tag: s/w tag to be checked
194 *
195 * return true if the tag can be used for hw ddp tag, false otherwise.
196 */
197static inline int cxgb3i_sw_tag_usable(struct cxgb3i_tag_format *tformat,
198 u32 sw_tag)
199{
200 sw_tag >>= (32 - tformat->rsvd_bits);
201 return !sw_tag;
202}
203
204/**
205 * cxgb3i_set_non_ddp_tag - mark a given s/w tag as an invalid ddp tag
206 * @tformat: tag format information
207 * @sw_tag: s/w tag to be checked
208 *
209 * insert 1 at the upper most reserved bit to mark it as an invalid ddp tag.
210 */
211static inline u32 cxgb3i_set_non_ddp_tag(struct cxgb3i_tag_format *tformat,
212 u32 sw_tag)
213{
214 unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1;
215 u32 mask = (1 << shift) - 1;
216
217 if (sw_tag && (sw_tag & ~mask)) {
218 u32 v1 = sw_tag & ((1 << shift) - 1);
219 u32 v2 = (sw_tag >> (shift - 1)) << shift;
220
221 return v2 | v1 | 1 << shift;
222 }
223 return sw_tag | 1 << shift;
224}
225
226/**
227 * cxgb3i_ddp_tag_base - shift s/w tag bits so that reserved bits are not used
228 * @tformat: tag format information
229 * @sw_tag: s/w tag to be checked
230 */
231static inline u32 cxgb3i_ddp_tag_base(struct cxgb3i_tag_format *tformat,
232 u32 sw_tag)
233{
234 u32 mask = (1 << tformat->rsvd_shift) - 1;
235
236 if (sw_tag && (sw_tag & ~mask)) {
237 u32 v1 = sw_tag & mask;
238 u32 v2 = sw_tag >> tformat->rsvd_shift;
239
240 v2 <<= tformat->rsvd_shift + tformat->rsvd_bits;
241 return v2 | v1;
242 }
243 return sw_tag;
244}
245
246/**
247 * cxgb3i_tag_rsvd_bits - get the reserved bits used by the h/w
248 * @tformat: tag format information
249 * @tag: tag to be checked
250 *
251 * return the reserved bits in the tag
252 */
253static inline u32 cxgb3i_tag_rsvd_bits(struct cxgb3i_tag_format *tformat,
254 u32 tag)
255{
256 if (cxgb3i_is_ddp_tag(tformat, tag))
257 return (tag >> tformat->rsvd_shift) & tformat->rsvd_mask;
258 return 0;
259}
260
261/**
262 * cxgb3i_tag_nonrsvd_bits - get the non-reserved bits used by the s/w
263 * @tformat: tag format information
264 * @tag: tag to be checked
265 *
266 * return the non-reserved bits in the tag.
267 */
268static inline u32 cxgb3i_tag_nonrsvd_bits(struct cxgb3i_tag_format *tformat,
269 u32 tag)
270{
271 unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1;
272 u32 v1, v2;
273
274 if (cxgb3i_is_ddp_tag(tformat, tag)) {
275 v1 = tag & ((1 << tformat->rsvd_shift) - 1);
276 v2 = (tag >> (shift + 1)) << tformat->rsvd_shift;
277 } else {
278 u32 mask = (1 << shift) - 1;
279
280 tag &= ~(1 << shift);
281 v1 = tag & mask;
282 v2 = (tag >> 1) & ~mask;
283 }
284 return v1 | v2;
285}
286
287int cxgb3i_ddp_tag_reserve(struct t3cdev *, unsigned int tid,
288 struct cxgb3i_tag_format *, u32 *tag,
289 struct cxgb3i_gather_list *, gfp_t gfp);
290void cxgb3i_ddp_tag_release(struct t3cdev *, u32 tag);
291
292struct cxgb3i_gather_list *cxgb3i_ddp_make_gl(unsigned int xferlen,
293 struct scatterlist *sgl,
294 unsigned int sgcnt,
295 struct pci_dev *pdev,
296 gfp_t gfp);
297void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list *gl,
298 struct pci_dev *pdev);
299
300int cxgb3i_setup_conn_host_pagesize(struct t3cdev *, unsigned int tid,
301 int reply);
302int cxgb3i_setup_conn_pagesize(struct t3cdev *, unsigned int tid, int reply,
303 unsigned long pgsz);
304int cxgb3i_setup_conn_digest(struct t3cdev *, unsigned int tid,
305 int hcrc, int dcrc, int reply);
306int cxgb3i_ddp_find_page_index(unsigned long pgsz);
307int cxgb3i_adapter_ddp_info(struct t3cdev *, struct cxgb3i_tag_format *,
308 unsigned int *txsz, unsigned int *rxsz);
309
310void cxgb3i_ddp_init(struct t3cdev *);
311void cxgb3i_ddp_cleanup(struct t3cdev *);
312#endif
diff --git a/drivers/scsi/cxgb3i/cxgb3i_init.c b/drivers/scsi/cxgb3i/cxgb3i_init.c
deleted file mode 100644
index 685af3698518..000000000000
--- a/drivers/scsi/cxgb3i/cxgb3i_init.c
+++ /dev/null
@@ -1,132 +0,0 @@
1/* cxgb3i_init.c: Chelsio S3xx iSCSI driver.
2 *
3 * Copyright (c) 2008 Chelsio Communications, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Karen Xie (kxie@chelsio.com)
10 */
11
12#include "cxgb3i.h"
13
14#define DRV_MODULE_NAME "cxgb3i"
15#define DRV_MODULE_VERSION "1.0.2"
16#define DRV_MODULE_RELDATE "Mar. 2009"
17
18static char version[] =
19 "Chelsio S3xx iSCSI Driver " DRV_MODULE_NAME
20 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
21
22MODULE_AUTHOR("Karen Xie <kxie@chelsio.com>");
23MODULE_DESCRIPTION("Chelsio S3xx iSCSI Driver");
24MODULE_LICENSE("GPL");
25MODULE_VERSION(DRV_MODULE_VERSION);
26
27static void open_s3_dev(struct t3cdev *);
28static void close_s3_dev(struct t3cdev *);
29static void s3_event_handler(struct t3cdev *tdev, u32 event, u32 port);
30
31static cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS];
32static struct cxgb3_client t3c_client = {
33 .name = "iscsi_cxgb3",
34 .handlers = cxgb3i_cpl_handlers,
35 .add = open_s3_dev,
36 .remove = close_s3_dev,
37 .event_handler = s3_event_handler,
38};
39
40/**
41 * open_s3_dev - register with cxgb3 LLD
42 * @t3dev: cxgb3 adapter instance
43 */
44static void open_s3_dev(struct t3cdev *t3dev)
45{
46 static int vers_printed;
47
48 if (!vers_printed) {
49 printk(KERN_INFO "%s", version);
50 vers_printed = 1;
51 }
52
53 cxgb3i_ddp_init(t3dev);
54 cxgb3i_sdev_add(t3dev, &t3c_client);
55 cxgb3i_adapter_open(t3dev);
56}
57
58/**
59 * close_s3_dev - de-register with cxgb3 LLD
60 * @t3dev: cxgb3 adapter instance
61 */
62static void close_s3_dev(struct t3cdev *t3dev)
63{
64 cxgb3i_adapter_close(t3dev);
65 cxgb3i_sdev_remove(t3dev);
66 cxgb3i_ddp_cleanup(t3dev);
67}
68
69static void s3_event_handler(struct t3cdev *tdev, u32 event, u32 port)
70{
71 struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(tdev);
72
73 cxgb3i_log_info("snic 0x%p, tdev 0x%p, event 0x%x, port 0x%x.\n",
74 snic, tdev, event, port);
75 if (!snic)
76 return;
77
78 switch (event) {
79 case OFFLOAD_STATUS_DOWN:
80 snic->flags |= CXGB3I_ADAPTER_FLAG_RESET;
81 break;
82 case OFFLOAD_STATUS_UP:
83 snic->flags &= ~CXGB3I_ADAPTER_FLAG_RESET;
84 break;
85 }
86}
87
88/**
89 * cxgb3i_init_module - module init entry point
90 *
91 * initialize any driver wide global data structures and register itself
92 * with the cxgb3 module
93 */
94static int __init cxgb3i_init_module(void)
95{
96 int err;
97
98 err = cxgb3i_sdev_init(cxgb3i_cpl_handlers);
99 if (err < 0)
100 return err;
101
102 err = cxgb3i_iscsi_init();
103 if (err < 0)
104 return err;
105
106 err = cxgb3i_pdu_init();
107 if (err < 0) {
108 cxgb3i_iscsi_cleanup();
109 return err;
110 }
111
112 cxgb3_register_client(&t3c_client);
113
114 return 0;
115}
116
117/**
118 * cxgb3i_exit_module - module cleanup/exit entry point
119 *
120 * go through the driver hba list and for each hba, release any resource held.
121 * and unregisters iscsi transport and the cxgb3 module
122 */
123static void __exit cxgb3i_exit_module(void)
124{
125 cxgb3_unregister_client(&t3c_client);
126 cxgb3i_pdu_cleanup();
127 cxgb3i_iscsi_cleanup();
128 cxgb3i_sdev_cleanup();
129}
130
131module_init(cxgb3i_init_module);
132module_exit(cxgb3i_exit_module);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
deleted file mode 100644
index 7b686abaae64..000000000000
--- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
+++ /dev/null
@@ -1,1018 +0,0 @@
1/* cxgb3i_iscsi.c: Chelsio S3xx iSCSI driver.
2 *
3 * Copyright (c) 2008 Chelsio Communications, Inc.
4 * Copyright (c) 2008 Mike Christie
5 * Copyright (c) 2008 Red Hat, Inc. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Written by: Karen Xie (kxie@chelsio.com)
12 */
13
14#include <linux/inet.h>
15#include <linux/slab.h>
16#include <linux/crypto.h>
17#include <linux/if_vlan.h>
18#include <net/dst.h>
19#include <net/tcp.h>
20#include <scsi/scsi_cmnd.h>
21#include <scsi/scsi_device.h>
22#include <scsi/scsi_eh.h>
23#include <scsi/scsi_host.h>
24#include <scsi/scsi.h>
25#include <scsi/iscsi_proto.h>
26#include <scsi/libiscsi.h>
27#include <scsi/scsi_transport_iscsi.h>
28
29#include "cxgb3i.h"
30#include "cxgb3i_pdu.h"
31
32#ifdef __DEBUG_CXGB3I_TAG__
33#define cxgb3i_tag_debug cxgb3i_log_debug
34#else
35#define cxgb3i_tag_debug(fmt...)
36#endif
37
38#ifdef __DEBUG_CXGB3I_API__
39#define cxgb3i_api_debug cxgb3i_log_debug
40#else
41#define cxgb3i_api_debug(fmt...)
42#endif
43
44/*
45 * align pdu size to multiple of 512 for better performance
46 */
47#define align_pdu_size(n) do { n = (n) & (~511); } while (0)
48
49static struct scsi_transport_template *cxgb3i_scsi_transport;
50static struct scsi_host_template cxgb3i_host_template;
51static struct iscsi_transport cxgb3i_iscsi_transport;
52static unsigned char sw_tag_idx_bits;
53static unsigned char sw_tag_age_bits;
54
55static LIST_HEAD(cxgb3i_snic_list);
56static DEFINE_RWLOCK(cxgb3i_snic_rwlock);
57
58/**
59 * cxgb3i_adpater_find_by_tdev - find the cxgb3i_adapter structure via t3cdev
60 * @tdev: t3cdev pointer
61 */
62struct cxgb3i_adapter *cxgb3i_adapter_find_by_tdev(struct t3cdev *tdev)
63{
64 struct cxgb3i_adapter *snic;
65
66 read_lock(&cxgb3i_snic_rwlock);
67 list_for_each_entry(snic, &cxgb3i_snic_list, list_head) {
68 if (snic->tdev == tdev) {
69 read_unlock(&cxgb3i_snic_rwlock);
70 return snic;
71 }
72 }
73 read_unlock(&cxgb3i_snic_rwlock);
74 return NULL;
75}
76
77static inline int adapter_update(struct cxgb3i_adapter *snic)
78{
79 cxgb3i_log_info("snic 0x%p, t3dev 0x%p, updating.\n",
80 snic, snic->tdev);
81 return cxgb3i_adapter_ddp_info(snic->tdev, &snic->tag_format,
82 &snic->tx_max_size,
83 &snic->rx_max_size);
84}
85
86static int adapter_add(struct cxgb3i_adapter *snic)
87{
88 struct t3cdev *t3dev = snic->tdev;
89 struct adapter *adapter = tdev2adap(t3dev);
90 int i, err;
91
92 snic->pdev = adapter->pdev;
93 snic->tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits;
94
95 err = cxgb3i_adapter_ddp_info(t3dev, &snic->tag_format,
96 &snic->tx_max_size,
97 &snic->rx_max_size);
98 if (err < 0)
99 return err;
100
101 for_each_port(adapter, i) {
102 snic->hba[i] = cxgb3i_hba_host_add(snic, adapter->port[i]);
103 if (!snic->hba[i])
104 return -EINVAL;
105 }
106 snic->hba_cnt = adapter->params.nports;
107
108 /* add to the list */
109 write_lock(&cxgb3i_snic_rwlock);
110 list_add_tail(&snic->list_head, &cxgb3i_snic_list);
111 write_unlock(&cxgb3i_snic_rwlock);
112
113 cxgb3i_log_info("t3dev 0x%p open, snic 0x%p, %u scsi hosts added.\n",
114 t3dev, snic, snic->hba_cnt);
115 return 0;
116}
117
118/**
119 * cxgb3i_adapter_open - init a s3 adapter structure and any h/w settings
120 * @t3dev: t3cdev adapter
121 */
122void cxgb3i_adapter_open(struct t3cdev *t3dev)
123{
124 struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(t3dev);
125 int err;
126
127 if (snic)
128 err = adapter_update(snic);
129 else {
130 snic = kzalloc(sizeof(*snic), GFP_KERNEL);
131 if (snic) {
132 spin_lock_init(&snic->lock);
133 snic->tdev = t3dev;
134 err = adapter_add(snic);
135 } else
136 err = -ENOMEM;
137 }
138
139 if (err < 0) {
140 cxgb3i_log_info("snic 0x%p, f 0x%x, t3dev 0x%p open, err %d.\n",
141 snic, snic ? snic->flags : 0, t3dev, err);
142 if (snic) {
143 snic->flags &= ~CXGB3I_ADAPTER_FLAG_RESET;
144 cxgb3i_adapter_close(t3dev);
145 }
146 }
147}
148
149/**
150 * cxgb3i_adapter_close - release the resources held and cleanup h/w settings
151 * @t3dev: t3cdev adapter
152 */
153void cxgb3i_adapter_close(struct t3cdev *t3dev)
154{
155 struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(t3dev);
156 int i;
157
158 if (!snic || snic->flags & CXGB3I_ADAPTER_FLAG_RESET) {
159 cxgb3i_log_info("t3dev 0x%p close, snic 0x%p, f 0x%x.\n",
160 t3dev, snic, snic ? snic->flags : 0);
161 return;
162 }
163
164 /* remove from the list */
165 write_lock(&cxgb3i_snic_rwlock);
166 list_del(&snic->list_head);
167 write_unlock(&cxgb3i_snic_rwlock);
168
169 for (i = 0; i < snic->hba_cnt; i++) {
170 if (snic->hba[i]) {
171 cxgb3i_hba_host_remove(snic->hba[i]);
172 snic->hba[i] = NULL;
173 }
174 }
175 cxgb3i_log_info("t3dev 0x%p close, snic 0x%p, %u scsi hosts removed.\n",
176 t3dev, snic, snic->hba_cnt);
177 kfree(snic);
178}
179
180/**
181 * cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure via net_device
182 * @t3dev: t3cdev adapter
183 */
184static struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev)
185{
186 struct cxgb3i_adapter *snic;
187 int i;
188
189 if (ndev->priv_flags & IFF_802_1Q_VLAN)
190 ndev = vlan_dev_real_dev(ndev);
191
192 read_lock(&cxgb3i_snic_rwlock);
193 list_for_each_entry(snic, &cxgb3i_snic_list, list_head) {
194 for (i = 0; i < snic->hba_cnt; i++) {
195 if (snic->hba[i]->ndev == ndev) {
196 read_unlock(&cxgb3i_snic_rwlock);
197 return snic->hba[i];
198 }
199 }
200 }
201 read_unlock(&cxgb3i_snic_rwlock);
202 return NULL;
203}
204
205/**
206 * cxgb3i_hba_host_add - register a new host with scsi/iscsi
207 * @snic: the cxgb3i adapter
208 * @ndev: associated net_device
209 */
210struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *snic,
211 struct net_device *ndev)
212{
213 struct cxgb3i_hba *hba;
214 struct Scsi_Host *shost;
215 int err;
216
217 shost = iscsi_host_alloc(&cxgb3i_host_template,
218 sizeof(struct cxgb3i_hba), 1);
219 if (!shost) {
220 cxgb3i_log_info("snic 0x%p, ndev 0x%p, host_alloc failed.\n",
221 snic, ndev);
222 return NULL;
223 }
224
225 shost->transportt = cxgb3i_scsi_transport;
226 shost->max_lun = CXGB3I_MAX_LUN;
227 shost->max_id = CXGB3I_MAX_TARGET;
228 shost->max_channel = 0;
229 shost->max_cmd_len = 16;
230
231 hba = iscsi_host_priv(shost);
232 hba->snic = snic;
233 hba->ndev = ndev;
234 hba->shost = shost;
235
236 pci_dev_get(snic->pdev);
237 err = iscsi_host_add(shost, &snic->pdev->dev);
238 if (err) {
239 cxgb3i_log_info("snic 0x%p, ndev 0x%p, host_add failed.\n",
240 snic, ndev);
241 goto pci_dev_put;
242 }
243
244 cxgb3i_api_debug("shost 0x%p, hba 0x%p, no %u.\n",
245 shost, hba, shost->host_no);
246
247 return hba;
248
249pci_dev_put:
250 pci_dev_put(snic->pdev);
251 scsi_host_put(shost);
252 return NULL;
253}
254
255/**
256 * cxgb3i_hba_host_remove - de-register the host with scsi/iscsi
257 * @hba: the cxgb3i hba
258 */
259void cxgb3i_hba_host_remove(struct cxgb3i_hba *hba)
260{
261 cxgb3i_api_debug("shost 0x%p, hba 0x%p, no %u.\n",
262 hba->shost, hba, hba->shost->host_no);
263 iscsi_host_remove(hba->shost);
264 pci_dev_put(hba->snic->pdev);
265 iscsi_host_free(hba->shost);
266}
267
268/**
269 * cxgb3i_ep_connect - establish TCP connection to target portal
270 * @shost: scsi host to use
271 * @dst_addr: target IP address
272 * @non_blocking: blocking or non-blocking call
273 *
274 * Initiates a TCP/IP connection to the dst_addr
275 */
276static struct iscsi_endpoint *cxgb3i_ep_connect(struct Scsi_Host *shost,
277 struct sockaddr *dst_addr,
278 int non_blocking)
279{
280 struct iscsi_endpoint *ep;
281 struct cxgb3i_endpoint *cep;
282 struct cxgb3i_hba *hba = NULL;
283 struct s3_conn *c3cn = NULL;
284 int err = 0;
285
286 if (shost)
287 hba = iscsi_host_priv(shost);
288
289 cxgb3i_api_debug("shost 0x%p, hba 0x%p.\n", shost, hba);
290
291 c3cn = cxgb3i_c3cn_create();
292 if (!c3cn) {
293 cxgb3i_log_info("ep connect OOM.\n");
294 err = -ENOMEM;
295 goto release_conn;
296 }
297
298 err = cxgb3i_c3cn_connect(hba ? hba->ndev : NULL, c3cn,
299 (struct sockaddr_in *)dst_addr);
300 if (err < 0) {
301 cxgb3i_log_info("ep connect failed.\n");
302 goto release_conn;
303 }
304
305 hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev);
306 if (!hba) {
307 err = -ENOSPC;
308 cxgb3i_log_info("NOT going through cxgbi device.\n");
309 goto release_conn;
310 }
311
312 if (shost && hba != iscsi_host_priv(shost)) {
313 err = -ENOSPC;
314 cxgb3i_log_info("Could not connect through request host%u\n",
315 shost->host_no);
316 goto release_conn;
317 }
318
319 if (c3cn_is_closing(c3cn)) {
320 err = -ENOSPC;
321 cxgb3i_log_info("ep connect unable to connect.\n");
322 goto release_conn;
323 }
324
325 ep = iscsi_create_endpoint(sizeof(*cep));
326 if (!ep) {
327 err = -ENOMEM;
328 cxgb3i_log_info("iscsi alloc ep, OOM.\n");
329 goto release_conn;
330 }
331 cep = ep->dd_data;
332 cep->c3cn = c3cn;
333 cep->hba = hba;
334
335 cxgb3i_api_debug("ep 0x%p, 0x%p, c3cn 0x%p, hba 0x%p.\n",
336 ep, cep, c3cn, hba);
337 return ep;
338
339release_conn:
340 cxgb3i_api_debug("conn 0x%p failed, release.\n", c3cn);
341 if (c3cn)
342 cxgb3i_c3cn_release(c3cn);
343 return ERR_PTR(err);
344}
345
346/**
347 * cxgb3i_ep_poll - polls for TCP connection establishement
348 * @ep: TCP connection (endpoint) handle
349 * @timeout_ms: timeout value in milli secs
350 *
351 * polls for TCP connect request to complete
352 */
353static int cxgb3i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
354{
355 struct cxgb3i_endpoint *cep = ep->dd_data;
356 struct s3_conn *c3cn = cep->c3cn;
357
358 if (!c3cn_is_established(c3cn))
359 return 0;
360 cxgb3i_api_debug("ep 0x%p, c3cn 0x%p established.\n", ep, c3cn);
361 return 1;
362}
363
364/**
365 * cxgb3i_ep_disconnect - teardown TCP connection
366 * @ep: TCP connection (endpoint) handle
367 *
368 * teardown TCP connection
369 */
370static void cxgb3i_ep_disconnect(struct iscsi_endpoint *ep)
371{
372 struct cxgb3i_endpoint *cep = ep->dd_data;
373 struct cxgb3i_conn *cconn = cep->cconn;
374
375 cxgb3i_api_debug("ep 0x%p, cep 0x%p.\n", ep, cep);
376
377 if (cconn && cconn->conn) {
378 /*
379 * stop the xmit path so the xmit_pdu function is
380 * not being called
381 */
382 iscsi_suspend_tx(cconn->conn);
383
384 write_lock_bh(&cep->c3cn->callback_lock);
385 cep->c3cn->user_data = NULL;
386 cconn->cep = NULL;
387 write_unlock_bh(&cep->c3cn->callback_lock);
388 }
389
390 cxgb3i_api_debug("ep 0x%p, cep 0x%p, release c3cn 0x%p.\n",
391 ep, cep, cep->c3cn);
392 cxgb3i_c3cn_release(cep->c3cn);
393 iscsi_destroy_endpoint(ep);
394}
395
396/**
397 * cxgb3i_session_create - create a new iscsi session
398 * @cmds_max: max # of commands
399 * @qdepth: scsi queue depth
400 * @initial_cmdsn: initial iscsi CMDSN for this session
401 *
402 * Creates a new iSCSI session
403 */
404static struct iscsi_cls_session *
405cxgb3i_session_create(struct iscsi_endpoint *ep, u16 cmds_max, u16 qdepth,
406 u32 initial_cmdsn)
407{
408 struct cxgb3i_endpoint *cep;
409 struct cxgb3i_hba *hba;
410 struct Scsi_Host *shost;
411 struct iscsi_cls_session *cls_session;
412 struct iscsi_session *session;
413
414 if (!ep) {
415 cxgb3i_log_error("%s, missing endpoint.\n", __func__);
416 return NULL;
417 }
418
419 cep = ep->dd_data;
420 hba = cep->hba;
421 shost = hba->shost;
422 cxgb3i_api_debug("ep 0x%p, cep 0x%p, hba 0x%p.\n", ep, cep, hba);
423 BUG_ON(hba != iscsi_host_priv(shost));
424
425 cls_session = iscsi_session_setup(&cxgb3i_iscsi_transport, shost,
426 cmds_max, 0,
427 sizeof(struct iscsi_tcp_task) +
428 sizeof(struct cxgb3i_task_data),
429 initial_cmdsn, ISCSI_MAX_TARGET);
430 if (!cls_session)
431 return NULL;
432 session = cls_session->dd_data;
433 if (iscsi_tcp_r2tpool_alloc(session))
434 goto remove_session;
435
436 return cls_session;
437
438remove_session:
439 iscsi_session_teardown(cls_session);
440 return NULL;
441}
442
443/**
444 * cxgb3i_session_destroy - destroys iscsi session
445 * @cls_session: pointer to iscsi cls session
446 *
447 * Destroys an iSCSI session instance and releases its all resources held
448 */
449static void cxgb3i_session_destroy(struct iscsi_cls_session *cls_session)
450{
451 cxgb3i_api_debug("sess 0x%p.\n", cls_session);
452 iscsi_tcp_r2tpool_free(cls_session->dd_data);
453 iscsi_session_teardown(cls_session);
454}
455
456/**
457 * cxgb3i_conn_max_xmit_dlength -- calc the max. xmit pdu segment size
458 * @conn: iscsi connection
459 * check the max. xmit pdu payload, reduce it if needed
460 */
461static inline int cxgb3i_conn_max_xmit_dlength(struct iscsi_conn *conn)
462
463{
464 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
465 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
466 unsigned int max = max(512 * MAX_SKB_FRAGS, SKB_TX_HEADROOM);
467
468 max = min(cconn->hba->snic->tx_max_size, max);
469 if (conn->max_xmit_dlength)
470 conn->max_xmit_dlength = min(conn->max_xmit_dlength, max);
471 else
472 conn->max_xmit_dlength = max;
473 align_pdu_size(conn->max_xmit_dlength);
474 cxgb3i_api_debug("conn 0x%p, max xmit %u.\n",
475 conn, conn->max_xmit_dlength);
476 return 0;
477}
478
479/**
480 * cxgb3i_conn_max_recv_dlength -- check the max. recv pdu segment size
481 * @conn: iscsi connection
482 * return 0 if the value is valid, < 0 otherwise.
483 */
484static inline int cxgb3i_conn_max_recv_dlength(struct iscsi_conn *conn)
485{
486 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
487 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
488 unsigned int max = cconn->hba->snic->rx_max_size;
489
490 align_pdu_size(max);
491 if (conn->max_recv_dlength) {
492 if (conn->max_recv_dlength > max) {
493 cxgb3i_log_error("MaxRecvDataSegmentLength %u too big."
494 " Need to be <= %u.\n",
495 conn->max_recv_dlength, max);
496 return -EINVAL;
497 }
498 conn->max_recv_dlength = min(conn->max_recv_dlength, max);
499 align_pdu_size(conn->max_recv_dlength);
500 } else
501 conn->max_recv_dlength = max;
502 cxgb3i_api_debug("conn 0x%p, max recv %u.\n",
503 conn, conn->max_recv_dlength);
504 return 0;
505}
506
507/**
508 * cxgb3i_conn_create - create iscsi connection instance
509 * @cls_session: pointer to iscsi cls session
510 * @cid: iscsi cid
511 *
512 * Creates a new iSCSI connection instance for a given session
513 */
514static struct iscsi_cls_conn *cxgb3i_conn_create(struct iscsi_cls_session
515 *cls_session, u32 cid)
516{
517 struct iscsi_cls_conn *cls_conn;
518 struct iscsi_conn *conn;
519 struct iscsi_tcp_conn *tcp_conn;
520 struct cxgb3i_conn *cconn;
521
522 cxgb3i_api_debug("sess 0x%p, cid %u.\n", cls_session, cid);
523
524 cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid);
525 if (!cls_conn)
526 return NULL;
527 conn = cls_conn->dd_data;
528 tcp_conn = conn->dd_data;
529 cconn = tcp_conn->dd_data;
530
531 cconn->conn = conn;
532 return cls_conn;
533}
534
535/**
536 * cxgb3i_conn_bind - binds iscsi sess, conn and endpoint together
537 * @cls_session: pointer to iscsi cls session
538 * @cls_conn: pointer to iscsi cls conn
539 * @transport_eph: 64-bit EP handle
540 * @is_leading: leading connection on this session?
541 *
542 * Binds together an iSCSI session, an iSCSI connection and a
543 * TCP connection. This routine returns error code if the TCP
544 * connection does not belong on the device iSCSI sess/conn is bound
545 */
546
547static int cxgb3i_conn_bind(struct iscsi_cls_session *cls_session,
548 struct iscsi_cls_conn *cls_conn,
549 u64 transport_eph, int is_leading)
550{
551 struct iscsi_conn *conn = cls_conn->dd_data;
552 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
553 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
554 struct cxgb3i_adapter *snic;
555 struct iscsi_endpoint *ep;
556 struct cxgb3i_endpoint *cep;
557 struct s3_conn *c3cn;
558 int err;
559
560 ep = iscsi_lookup_endpoint(transport_eph);
561 if (!ep)
562 return -EINVAL;
563
564 /* setup ddp pagesize */
565 cep = ep->dd_data;
566 c3cn = cep->c3cn;
567 snic = cep->hba->snic;
568 err = cxgb3i_setup_conn_host_pagesize(snic->tdev, c3cn->tid, 0);
569 if (err < 0)
570 return err;
571
572 cxgb3i_api_debug("ep 0x%p, cls sess 0x%p, cls conn 0x%p.\n",
573 ep, cls_session, cls_conn);
574
575 err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
576 if (err)
577 return -EINVAL;
578
579 /* calculate the tag idx bits needed for this conn based on cmds_max */
580 cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1;
581 cxgb3i_api_debug("session cmds_max 0x%x, bits %u.\n",
582 conn->session->cmds_max, cconn->task_idx_bits);
583
584 read_lock(&c3cn->callback_lock);
585 c3cn->user_data = conn;
586 cconn->hba = cep->hba;
587 cconn->cep = cep;
588 cep->cconn = cconn;
589 read_unlock(&c3cn->callback_lock);
590
591 cxgb3i_conn_max_xmit_dlength(conn);
592 cxgb3i_conn_max_recv_dlength(conn);
593
594 spin_lock_bh(&conn->session->lock);
595 sprintf(conn->portal_address, "%pI4", &c3cn->daddr.sin_addr.s_addr);
596 conn->portal_port = ntohs(c3cn->daddr.sin_port);
597 spin_unlock_bh(&conn->session->lock);
598
599 /* init recv engine */
600 iscsi_tcp_hdr_recv_prep(tcp_conn);
601
602 return 0;
603}
604
605/**
606 * cxgb3i_conn_get_param - return iscsi connection parameter to caller
607 * @cls_conn: pointer to iscsi cls conn
608 * @param: parameter type identifier
609 * @buf: buffer pointer
610 *
611 * returns iSCSI connection parameters
612 */
613static int cxgb3i_conn_get_param(struct iscsi_cls_conn *cls_conn,
614 enum iscsi_param param, char *buf)
615{
616 struct iscsi_conn *conn = cls_conn->dd_data;
617 int len;
618
619 cxgb3i_api_debug("cls_conn 0x%p, param %d.\n", cls_conn, param);
620
621 switch (param) {
622 case ISCSI_PARAM_CONN_PORT:
623 spin_lock_bh(&conn->session->lock);
624 len = sprintf(buf, "%hu\n", conn->portal_port);
625 spin_unlock_bh(&conn->session->lock);
626 break;
627 case ISCSI_PARAM_CONN_ADDRESS:
628 spin_lock_bh(&conn->session->lock);
629 len = sprintf(buf, "%s\n", conn->portal_address);
630 spin_unlock_bh(&conn->session->lock);
631 break;
632 default:
633 return iscsi_conn_get_param(cls_conn, param, buf);
634 }
635
636 return len;
637}
638
639/**
640 * cxgb3i_conn_set_param - set iscsi connection parameter
641 * @cls_conn: pointer to iscsi cls conn
642 * @param: parameter type identifier
643 * @buf: buffer pointer
644 * @buflen: buffer length
645 *
646 * set iSCSI connection parameters
647 */
648static int cxgb3i_conn_set_param(struct iscsi_cls_conn *cls_conn,
649 enum iscsi_param param, char *buf, int buflen)
650{
651 struct iscsi_conn *conn = cls_conn->dd_data;
652 struct iscsi_session *session = conn->session;
653 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
654 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
655 struct cxgb3i_adapter *snic = cconn->hba->snic;
656 struct s3_conn *c3cn = cconn->cep->c3cn;
657 int value, err = 0;
658
659 switch (param) {
660 case ISCSI_PARAM_HDRDGST_EN:
661 err = iscsi_set_param(cls_conn, param, buf, buflen);
662 if (!err && conn->hdrdgst_en)
663 err = cxgb3i_setup_conn_digest(snic->tdev, c3cn->tid,
664 conn->hdrdgst_en,
665 conn->datadgst_en, 0);
666 break;
667 case ISCSI_PARAM_DATADGST_EN:
668 err = iscsi_set_param(cls_conn, param, buf, buflen);
669 if (!err && conn->datadgst_en)
670 err = cxgb3i_setup_conn_digest(snic->tdev, c3cn->tid,
671 conn->hdrdgst_en,
672 conn->datadgst_en, 0);
673 break;
674 case ISCSI_PARAM_MAX_R2T:
675 sscanf(buf, "%d", &value);
676 if (value <= 0 || !is_power_of_2(value))
677 return -EINVAL;
678 if (session->max_r2t == value)
679 break;
680 iscsi_tcp_r2tpool_free(session);
681 err = iscsi_set_param(cls_conn, param, buf, buflen);
682 if (!err && iscsi_tcp_r2tpool_alloc(session))
683 return -ENOMEM;
684 case ISCSI_PARAM_MAX_RECV_DLENGTH:
685 err = iscsi_set_param(cls_conn, param, buf, buflen);
686 if (!err)
687 err = cxgb3i_conn_max_recv_dlength(conn);
688 break;
689 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
690 err = iscsi_set_param(cls_conn, param, buf, buflen);
691 if (!err)
692 err = cxgb3i_conn_max_xmit_dlength(conn);
693 break;
694 default:
695 return iscsi_set_param(cls_conn, param, buf, buflen);
696 }
697 return err;
698}
699
700/**
701 * cxgb3i_host_set_param - configure host (adapter) related parameters
702 * @shost: scsi host pointer
703 * @param: parameter type identifier
704 * @buf: buffer pointer
705 */
706static int cxgb3i_host_set_param(struct Scsi_Host *shost,
707 enum iscsi_host_param param,
708 char *buf, int buflen)
709{
710 struct cxgb3i_hba *hba = iscsi_host_priv(shost);
711
712 if (!hba->ndev) {
713 shost_printk(KERN_ERR, shost, "Could not set host param. "
714 "Netdev for host not set.\n");
715 return -ENODEV;
716 }
717
718 cxgb3i_api_debug("param %d, buf %s.\n", param, buf);
719
720 switch (param) {
721 case ISCSI_HOST_PARAM_IPADDRESS:
722 {
723 __be32 addr = in_aton(buf);
724 cxgb3i_set_private_ipv4addr(hba->ndev, addr);
725 return 0;
726 }
727 case ISCSI_HOST_PARAM_HWADDRESS:
728 case ISCSI_HOST_PARAM_NETDEV_NAME:
729 /* ignore */
730 return 0;
731 default:
732 return iscsi_host_set_param(shost, param, buf, buflen);
733 }
734}
735
736/**
737 * cxgb3i_host_get_param - returns host (adapter) related parameters
738 * @shost: scsi host pointer
739 * @param: parameter type identifier
740 * @buf: buffer pointer
741 */
742static int cxgb3i_host_get_param(struct Scsi_Host *shost,
743 enum iscsi_host_param param, char *buf)
744{
745 struct cxgb3i_hba *hba = iscsi_host_priv(shost);
746 int len = 0;
747
748 if (!hba->ndev) {
749 shost_printk(KERN_ERR, shost, "Could not set host param. "
750 "Netdev for host not set.\n");
751 return -ENODEV;
752 }
753
754 cxgb3i_api_debug("hba %s, param %d.\n", hba->ndev->name, param);
755
756 switch (param) {
757 case ISCSI_HOST_PARAM_HWADDRESS:
758 len = sysfs_format_mac(buf, hba->ndev->dev_addr, 6);
759 break;
760 case ISCSI_HOST_PARAM_NETDEV_NAME:
761 len = sprintf(buf, "%s\n", hba->ndev->name);
762 break;
763 case ISCSI_HOST_PARAM_IPADDRESS:
764 {
765 __be32 addr;
766
767 addr = cxgb3i_get_private_ipv4addr(hba->ndev);
768 len = sprintf(buf, "%pI4", &addr);
769 break;
770 }
771 default:
772 return iscsi_host_get_param(shost, param, buf);
773 }
774 return len;
775}
776
777/**
778 * cxgb3i_conn_get_stats - returns iSCSI stats
779 * @cls_conn: pointer to iscsi cls conn
780 * @stats: pointer to iscsi statistic struct
781 */
782static void cxgb3i_conn_get_stats(struct iscsi_cls_conn *cls_conn,
783 struct iscsi_stats *stats)
784{
785 struct iscsi_conn *conn = cls_conn->dd_data;
786
787 stats->txdata_octets = conn->txdata_octets;
788 stats->rxdata_octets = conn->rxdata_octets;
789 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
790 stats->dataout_pdus = conn->dataout_pdus_cnt;
791 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
792 stats->datain_pdus = conn->datain_pdus_cnt;
793 stats->r2t_pdus = conn->r2t_pdus_cnt;
794 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
795 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
796 stats->digest_err = 0;
797 stats->timeout_err = 0;
798 stats->custom_length = 1;
799 strcpy(stats->custom[0].desc, "eh_abort_cnt");
800 stats->custom[0].value = conn->eh_abort_cnt;
801}
802
803/**
804 * cxgb3i_parse_itt - get the idx and age bits from a given tag
805 * @conn: iscsi connection
806 * @itt: itt tag
807 * @idx: task index, filled in by this function
808 * @age: session age, filled in by this function
809 */
810static void cxgb3i_parse_itt(struct iscsi_conn *conn, itt_t itt,
811 int *idx, int *age)
812{
813 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
814 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
815 struct cxgb3i_adapter *snic = cconn->hba->snic;
816 u32 tag = ntohl((__force u32) itt);
817 u32 sw_bits;
818
819 sw_bits = cxgb3i_tag_nonrsvd_bits(&snic->tag_format, tag);
820 if (idx)
821 *idx = sw_bits & ((1 << cconn->task_idx_bits) - 1);
822 if (age)
823 *age = (sw_bits >> cconn->task_idx_bits) & ISCSI_AGE_MASK;
824
825 cxgb3i_tag_debug("parse tag 0x%x/0x%x, sw 0x%x, itt 0x%x, age 0x%x.\n",
826 tag, itt, sw_bits, idx ? *idx : 0xFFFFF,
827 age ? *age : 0xFF);
828}
829
830/**
831 * cxgb3i_reserve_itt - generate tag for a give task
832 * @task: iscsi task
833 * @hdr_itt: tag, filled in by this function
834 * Set up ddp for scsi read tasks if possible.
835 */
836int cxgb3i_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt)
837{
838 struct scsi_cmnd *sc = task->sc;
839 struct iscsi_conn *conn = task->conn;
840 struct iscsi_session *sess = conn->session;
841 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
842 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
843 struct cxgb3i_adapter *snic = cconn->hba->snic;
844 struct cxgb3i_tag_format *tformat = &snic->tag_format;
845 u32 sw_tag = (sess->age << cconn->task_idx_bits) | task->itt;
846 u32 tag;
847 int err = -EINVAL;
848
849 if (sc &&
850 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) &&
851 cxgb3i_sw_tag_usable(tformat, sw_tag)) {
852 struct s3_conn *c3cn = cconn->cep->c3cn;
853 struct cxgb3i_gather_list *gl;
854
855 gl = cxgb3i_ddp_make_gl(scsi_in(sc)->length,
856 scsi_in(sc)->table.sgl,
857 scsi_in(sc)->table.nents,
858 snic->pdev,
859 GFP_ATOMIC);
860 if (gl) {
861 tag = sw_tag;
862 err = cxgb3i_ddp_tag_reserve(snic->tdev, c3cn->tid,
863 tformat, &tag,
864 gl, GFP_ATOMIC);
865 if (err < 0)
866 cxgb3i_ddp_release_gl(gl, snic->pdev);
867 }
868 }
869
870 if (err < 0)
871 tag = cxgb3i_set_non_ddp_tag(tformat, sw_tag);
872 /* the itt need to sent in big-endian order */
873 *hdr_itt = (__force itt_t)htonl(tag);
874
875 cxgb3i_tag_debug("new tag 0x%x/0x%x (itt 0x%x, age 0x%x).\n",
876 tag, *hdr_itt, task->itt, sess->age);
877 return 0;
878}
879
880/**
881 * cxgb3i_release_itt - release the tag for a given task
882 * @task: iscsi task
883 * @hdr_itt: tag
884 * If the tag is a ddp tag, release the ddp setup
885 */
886void cxgb3i_release_itt(struct iscsi_task *task, itt_t hdr_itt)
887{
888 struct scsi_cmnd *sc = task->sc;
889 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
890 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
891 struct cxgb3i_adapter *snic = cconn->hba->snic;
892 struct cxgb3i_tag_format *tformat = &snic->tag_format;
893 u32 tag = ntohl((__force u32)hdr_itt);
894
895 cxgb3i_tag_debug("release tag 0x%x.\n", tag);
896
897 if (sc &&
898 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) &&
899 cxgb3i_is_ddp_tag(tformat, tag))
900 cxgb3i_ddp_tag_release(snic->tdev, tag);
901}
902
903/**
904 * cxgb3i_host_template -- Scsi_Host_Template structure
905 * used when registering with the scsi mid layer
906 */
907static struct scsi_host_template cxgb3i_host_template = {
908 .module = THIS_MODULE,
909 .name = "Chelsio S3xx iSCSI Initiator",
910 .proc_name = "cxgb3i",
911 .queuecommand = iscsi_queuecommand,
912 .change_queue_depth = iscsi_change_queue_depth,
913 .can_queue = CXGB3I_SCSI_HOST_QDEPTH,
914 .sg_tablesize = SG_ALL,
915 .max_sectors = 0xFFFF,
916 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
917 .eh_abort_handler = iscsi_eh_abort,
918 .eh_device_reset_handler = iscsi_eh_device_reset,
919 .eh_target_reset_handler = iscsi_eh_recover_target,
920 .target_alloc = iscsi_target_alloc,
921 .use_clustering = DISABLE_CLUSTERING,
922 .this_id = -1,
923};
924
925static struct iscsi_transport cxgb3i_iscsi_transport = {
926 .owner = THIS_MODULE,
927 .name = "cxgb3i",
928 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
929 | CAP_DATADGST | CAP_DIGEST_OFFLOAD |
930 CAP_PADDING_OFFLOAD,
931 .param_mask = ISCSI_MAX_RECV_DLENGTH |
932 ISCSI_MAX_XMIT_DLENGTH |
933 ISCSI_HDRDGST_EN |
934 ISCSI_DATADGST_EN |
935 ISCSI_INITIAL_R2T_EN |
936 ISCSI_MAX_R2T |
937 ISCSI_IMM_DATA_EN |
938 ISCSI_FIRST_BURST |
939 ISCSI_MAX_BURST |
940 ISCSI_PDU_INORDER_EN |
941 ISCSI_DATASEQ_INORDER_EN |
942 ISCSI_ERL |
943 ISCSI_CONN_PORT |
944 ISCSI_CONN_ADDRESS |
945 ISCSI_EXP_STATSN |
946 ISCSI_PERSISTENT_PORT |
947 ISCSI_PERSISTENT_ADDRESS |
948 ISCSI_TARGET_NAME | ISCSI_TPGT |
949 ISCSI_USERNAME | ISCSI_PASSWORD |
950 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
951 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
952 ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
953 ISCSI_PING_TMO | ISCSI_RECV_TMO |
954 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
955 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
956 ISCSI_HOST_INITIATOR_NAME | ISCSI_HOST_NETDEV_NAME,
957 .get_host_param = cxgb3i_host_get_param,
958 .set_host_param = cxgb3i_host_set_param,
959 /* session management */
960 .create_session = cxgb3i_session_create,
961 .destroy_session = cxgb3i_session_destroy,
962 .get_session_param = iscsi_session_get_param,
963 /* connection management */
964 .create_conn = cxgb3i_conn_create,
965 .bind_conn = cxgb3i_conn_bind,
966 .destroy_conn = iscsi_tcp_conn_teardown,
967 .start_conn = iscsi_conn_start,
968 .stop_conn = iscsi_conn_stop,
969 .get_conn_param = cxgb3i_conn_get_param,
970 .set_param = cxgb3i_conn_set_param,
971 .get_stats = cxgb3i_conn_get_stats,
972 /* pdu xmit req. from user space */
973 .send_pdu = iscsi_conn_send_pdu,
974 /* task */
975 .init_task = iscsi_tcp_task_init,
976 .xmit_task = iscsi_tcp_task_xmit,
977 .cleanup_task = cxgb3i_conn_cleanup_task,
978
979 /* pdu */
980 .alloc_pdu = cxgb3i_conn_alloc_pdu,
981 .init_pdu = cxgb3i_conn_init_pdu,
982 .xmit_pdu = cxgb3i_conn_xmit_pdu,
983 .parse_pdu_itt = cxgb3i_parse_itt,
984
985 /* TCP connect/disconnect */
986 .ep_connect = cxgb3i_ep_connect,
987 .ep_poll = cxgb3i_ep_poll,
988 .ep_disconnect = cxgb3i_ep_disconnect,
989 /* Error recovery timeout call */
990 .session_recovery_timedout = iscsi_session_recovery_timedout,
991};
992
993int cxgb3i_iscsi_init(void)
994{
995 sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1;
996 sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1;
997 cxgb3i_log_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n",
998 ISCSI_ITT_MASK, sw_tag_idx_bits,
999 ISCSI_AGE_MASK, sw_tag_age_bits);
1000
1001 cxgb3i_scsi_transport =
1002 iscsi_register_transport(&cxgb3i_iscsi_transport);
1003 if (!cxgb3i_scsi_transport) {
1004 cxgb3i_log_error("Could not register cxgb3i transport.\n");
1005 return -ENODEV;
1006 }
1007 cxgb3i_api_debug("cxgb3i transport 0x%p.\n", cxgb3i_scsi_transport);
1008 return 0;
1009}
1010
1011void cxgb3i_iscsi_cleanup(void)
1012{
1013 if (cxgb3i_scsi_transport) {
1014 cxgb3i_api_debug("cxgb3i transport 0x%p.\n",
1015 cxgb3i_scsi_transport);
1016 iscsi_unregister_transport(&cxgb3i_iscsi_transport);
1017 }
1018}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
deleted file mode 100644
index 3ee13cf9556b..000000000000
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
+++ /dev/null
@@ -1,1944 +0,0 @@
1/*
2 * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management
3 *
4 * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 *
11 * Written by: Dimitris Michailidis (dm@chelsio.com)
12 * Karen Xie (kxie@chelsio.com)
13 */
14
15#include <linux/if_vlan.h>
16#include <linux/slab.h>
17#include <linux/version.h>
18
19#include "cxgb3_defs.h"
20#include "cxgb3_ctl_defs.h"
21#include "firmware_exports.h"
22#include "cxgb3i_offload.h"
23#include "cxgb3i_pdu.h"
24#include "cxgb3i_ddp.h"
25
26#ifdef __DEBUG_C3CN_CONN__
27#define c3cn_conn_debug cxgb3i_log_debug
28#else
29#define c3cn_conn_debug(fmt...)
30#endif
31
32#ifdef __DEBUG_C3CN_TX__
33#define c3cn_tx_debug cxgb3i_log_debug
34#else
35#define c3cn_tx_debug(fmt...)
36#endif
37
38#ifdef __DEBUG_C3CN_RX__
39#define c3cn_rx_debug cxgb3i_log_debug
40#else
41#define c3cn_rx_debug(fmt...)
42#endif
43
44/*
45 * module parameters releated to offloaded iscsi connection
46 */
47static int cxgb3_rcv_win = 256 * 1024;
48module_param(cxgb3_rcv_win, int, 0644);
49MODULE_PARM_DESC(cxgb3_rcv_win, "TCP receive window in bytes (default=256KB)");
50
51static int cxgb3_snd_win = 128 * 1024;
52module_param(cxgb3_snd_win, int, 0644);
53MODULE_PARM_DESC(cxgb3_snd_win, "TCP send window in bytes (default=128KB)");
54
55static int cxgb3_rx_credit_thres = 10 * 1024;
56module_param(cxgb3_rx_credit_thres, int, 0644);
57MODULE_PARM_DESC(rx_credit_thres,
58 "RX credits return threshold in bytes (default=10KB)");
59
60static unsigned int cxgb3_max_connect = 8 * 1024;
61module_param(cxgb3_max_connect, uint, 0644);
62MODULE_PARM_DESC(cxgb3_max_connect, "Max. # of connections (default=8092)");
63
64static unsigned int cxgb3_sport_base = 20000;
65module_param(cxgb3_sport_base, uint, 0644);
66MODULE_PARM_DESC(cxgb3_sport_base, "starting port number (default=20000)");
67
68/*
69 * cxgb3i tcp connection data(per adapter) list
70 */
71static LIST_HEAD(cdata_list);
72static DEFINE_RWLOCK(cdata_rwlock);
73
74static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion);
75static void c3cn_release_offload_resources(struct s3_conn *c3cn);
76
77/*
78 * iscsi source port management
79 *
80 * Find a free source port in the port allocation map. We use a very simple
81 * rotor scheme to look for the next free port.
82 *
83 * If a source port has been specified make sure that it doesn't collide with
84 * our normal source port allocation map. If it's outside the range of our
85 * allocation/deallocation scheme just let them use it.
86 *
87 * If the source port is outside our allocation range, the caller is
88 * responsible for keeping track of their port usage.
89 */
90static int c3cn_get_port(struct s3_conn *c3cn, struct cxgb3i_sdev_data *cdata)
91{
92 unsigned int start;
93 int idx;
94
95 if (!cdata)
96 goto error_out;
97
98 if (c3cn->saddr.sin_port) {
99 cxgb3i_log_error("connect, sin_port NON-ZERO %u.\n",
100 c3cn->saddr.sin_port);
101 return -EADDRINUSE;
102 }
103
104 spin_lock_bh(&cdata->lock);
105 start = idx = cdata->sport_next;
106 do {
107 if (++idx >= cxgb3_max_connect)
108 idx = 0;
109 if (!cdata->sport_conn[idx]) {
110 c3cn->saddr.sin_port = htons(cxgb3_sport_base + idx);
111 cdata->sport_next = idx;
112 cdata->sport_conn[idx] = c3cn;
113 spin_unlock_bh(&cdata->lock);
114
115 c3cn_conn_debug("%s reserve port %u.\n",
116 cdata->cdev->name,
117 cxgb3_sport_base + idx);
118 return 0;
119 }
120 } while (idx != start);
121 spin_unlock_bh(&cdata->lock);
122
123error_out:
124 return -EADDRNOTAVAIL;
125}
126
127static void c3cn_put_port(struct s3_conn *c3cn)
128{
129 if (!c3cn->cdev)
130 return;
131
132 if (c3cn->saddr.sin_port) {
133 struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(c3cn->cdev);
134 int idx = ntohs(c3cn->saddr.sin_port) - cxgb3_sport_base;
135
136 c3cn->saddr.sin_port = 0;
137 if (idx < 0 || idx >= cxgb3_max_connect)
138 return;
139 spin_lock_bh(&cdata->lock);
140 cdata->sport_conn[idx] = NULL;
141 spin_unlock_bh(&cdata->lock);
142 c3cn_conn_debug("%s, release port %u.\n",
143 cdata->cdev->name, cxgb3_sport_base + idx);
144 }
145}
146
147static inline void c3cn_set_flag(struct s3_conn *c3cn, enum c3cn_flags flag)
148{
149 __set_bit(flag, &c3cn->flags);
150 c3cn_conn_debug("c3cn 0x%p, set %d, s %u, f 0x%lx.\n",
151 c3cn, flag, c3cn->state, c3cn->flags);
152}
153
154static inline void c3cn_clear_flag(struct s3_conn *c3cn, enum c3cn_flags flag)
155{
156 __clear_bit(flag, &c3cn->flags);
157 c3cn_conn_debug("c3cn 0x%p, clear %d, s %u, f 0x%lx.\n",
158 c3cn, flag, c3cn->state, c3cn->flags);
159}
160
161static inline int c3cn_flag(struct s3_conn *c3cn, enum c3cn_flags flag)
162{
163 if (c3cn == NULL)
164 return 0;
165 return test_bit(flag, &c3cn->flags);
166}
167
168static void c3cn_set_state(struct s3_conn *c3cn, int state)
169{
170 c3cn_conn_debug("c3cn 0x%p state -> %u.\n", c3cn, state);
171 c3cn->state = state;
172}
173
174static inline void c3cn_hold(struct s3_conn *c3cn)
175{
176 atomic_inc(&c3cn->refcnt);
177}
178
179static inline void c3cn_put(struct s3_conn *c3cn)
180{
181 if (atomic_dec_and_test(&c3cn->refcnt)) {
182 c3cn_conn_debug("free c3cn 0x%p, s %u, f 0x%lx.\n",
183 c3cn, c3cn->state, c3cn->flags);
184 kfree(c3cn);
185 }
186}
187
188static void c3cn_closed(struct s3_conn *c3cn)
189{
190 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
191 c3cn, c3cn->state, c3cn->flags);
192
193 c3cn_put_port(c3cn);
194 c3cn_release_offload_resources(c3cn);
195 c3cn_set_state(c3cn, C3CN_STATE_CLOSED);
196 cxgb3i_conn_closing(c3cn);
197}
198
199/*
200 * CPL (Chelsio Protocol Language) defines a message passing interface between
201 * the host driver and T3 asic.
202 * The section below implments CPLs that related to iscsi tcp connection
203 * open/close/abort and data send/receive.
204 */
205
206/*
207 * CPL connection active open request: host ->
208 */
209static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
210{
211 int i = 0;
212
213 while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)
214 ++i;
215 return i;
216}
217
218static unsigned int select_mss(struct s3_conn *c3cn, unsigned int pmtu)
219{
220 unsigned int idx;
221 struct dst_entry *dst = c3cn->dst_cache;
222 struct t3cdev *cdev = c3cn->cdev;
223 const struct t3c_data *td = T3C_DATA(cdev);
224 u16 advmss = dst_metric(dst, RTAX_ADVMSS);
225
226 if (advmss > pmtu - 40)
227 advmss = pmtu - 40;
228 if (advmss < td->mtus[0] - 40)
229 advmss = td->mtus[0] - 40;
230 idx = find_best_mtu(td, advmss + 40);
231 return idx;
232}
233
234static inline int compute_wscale(int win)
235{
236 int wscale = 0;
237 while (wscale < 14 && (65535<<wscale) < win)
238 wscale++;
239 return wscale;
240}
241
242static inline unsigned int calc_opt0h(struct s3_conn *c3cn)
243{
244 int wscale = compute_wscale(cxgb3_rcv_win);
245 return V_KEEP_ALIVE(1) |
246 F_TCAM_BYPASS |
247 V_WND_SCALE(wscale) |
248 V_MSS_IDX(c3cn->mss_idx);
249}
250
251static inline unsigned int calc_opt0l(struct s3_conn *c3cn)
252{
253 return V_ULP_MODE(ULP_MODE_ISCSI) |
254 V_RCV_BUFSIZ(cxgb3_rcv_win>>10);
255}
256
257static void make_act_open_req(struct s3_conn *c3cn, struct sk_buff *skb,
258 unsigned int atid, const struct l2t_entry *e)
259{
260 struct cpl_act_open_req *req;
261
262 c3cn_conn_debug("c3cn 0x%p, atid 0x%x.\n", c3cn, atid);
263
264 skb->priority = CPL_PRIORITY_SETUP;
265 req = (struct cpl_act_open_req *)__skb_put(skb, sizeof(*req));
266 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
267 req->wr.wr_lo = 0;
268 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, atid));
269 req->local_port = c3cn->saddr.sin_port;
270 req->peer_port = c3cn->daddr.sin_port;
271 req->local_ip = c3cn->saddr.sin_addr.s_addr;
272 req->peer_ip = c3cn->daddr.sin_addr.s_addr;
273 req->opt0h = htonl(calc_opt0h(c3cn) | V_L2T_IDX(e->idx) |
274 V_TX_CHANNEL(e->smt_idx));
275 req->opt0l = htonl(calc_opt0l(c3cn));
276 req->params = 0;
277 req->opt2 = 0;
278}
279
280static void fail_act_open(struct s3_conn *c3cn, int errno)
281{
282 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
283 c3cn, c3cn->state, c3cn->flags);
284 c3cn->err = errno;
285 c3cn_closed(c3cn);
286}
287
288static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
289{
290 struct s3_conn *c3cn = (struct s3_conn *)skb->sk;
291
292 c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn, c3cn->state);
293
294 c3cn_hold(c3cn);
295 spin_lock_bh(&c3cn->lock);
296 if (c3cn->state == C3CN_STATE_CONNECTING)
297 fail_act_open(c3cn, -EHOSTUNREACH);
298 spin_unlock_bh(&c3cn->lock);
299 c3cn_put(c3cn);
300 __kfree_skb(skb);
301}
302
303/*
304 * CPL connection close request: host ->
305 *
306 * Close a connection by sending a CPL_CLOSE_CON_REQ message and queue it to
307 * the write queue (i.e., after any unsent txt data).
308 */
309static void skb_entail(struct s3_conn *c3cn, struct sk_buff *skb,
310 int flags)
311{
312 skb_tcp_seq(skb) = c3cn->write_seq;
313 skb_flags(skb) = flags;
314 __skb_queue_tail(&c3cn->write_queue, skb);
315}
316
317static void send_close_req(struct s3_conn *c3cn)
318{
319 struct sk_buff *skb = c3cn->cpl_close;
320 struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
321 unsigned int tid = c3cn->tid;
322
323 c3cn_conn_debug("c3cn 0x%p, state 0x%x, flag 0x%lx.\n",
324 c3cn, c3cn->state, c3cn->flags);
325
326 c3cn->cpl_close = NULL;
327
328 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
329 req->wr.wr_lo = htonl(V_WR_TID(tid));
330 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
331 req->rsvd = htonl(c3cn->write_seq);
332
333 skb_entail(c3cn, skb, C3CB_FLAG_NO_APPEND);
334 if (c3cn->state != C3CN_STATE_CONNECTING)
335 c3cn_push_tx_frames(c3cn, 1);
336}
337
338/*
339 * CPL connection abort request: host ->
340 *
341 * Send an ABORT_REQ message. Makes sure we do not send multiple ABORT_REQs
342 * for the same connection and also that we do not try to send a message
343 * after the connection has closed.
344 */
345static void abort_arp_failure(struct t3cdev *cdev, struct sk_buff *skb)
346{
347 struct cpl_abort_req *req = cplhdr(skb);
348
349 c3cn_conn_debug("tdev 0x%p.\n", cdev);
350
351 req->cmd = CPL_ABORT_NO_RST;
352 cxgb3_ofld_send(cdev, skb);
353}
354
355static inline void c3cn_purge_write_queue(struct s3_conn *c3cn)
356{
357 struct sk_buff *skb;
358
359 while ((skb = __skb_dequeue(&c3cn->write_queue)))
360 __kfree_skb(skb);
361}
362
363static void send_abort_req(struct s3_conn *c3cn)
364{
365 struct sk_buff *skb = c3cn->cpl_abort_req;
366 struct cpl_abort_req *req;
367 unsigned int tid = c3cn->tid;
368
369 if (unlikely(c3cn->state == C3CN_STATE_ABORTING) || !skb ||
370 !c3cn->cdev)
371 return;
372
373 c3cn_set_state(c3cn, C3CN_STATE_ABORTING);
374
375 c3cn_conn_debug("c3cn 0x%p, flag ABORT_RPL + ABORT_SHUT.\n", c3cn);
376
377 c3cn_set_flag(c3cn, C3CN_ABORT_RPL_PENDING);
378
379 /* Purge the send queue so we don't send anything after an abort. */
380 c3cn_purge_write_queue(c3cn);
381
382 c3cn->cpl_abort_req = NULL;
383 req = (struct cpl_abort_req *)skb->head;
384 memset(req, 0, sizeof(*req));
385
386 skb->priority = CPL_PRIORITY_DATA;
387 set_arp_failure_handler(skb, abort_arp_failure);
388
389 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
390 req->wr.wr_lo = htonl(V_WR_TID(tid));
391 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
392 req->rsvd0 = htonl(c3cn->snd_nxt);
393 req->rsvd1 = !c3cn_flag(c3cn, C3CN_TX_DATA_SENT);
394 req->cmd = CPL_ABORT_SEND_RST;
395
396 l2t_send(c3cn->cdev, skb, c3cn->l2t);
397}
398
399/*
400 * CPL connection abort reply: host ->
401 *
402 * Send an ABORT_RPL message in response of the ABORT_REQ received.
403 */
404static void send_abort_rpl(struct s3_conn *c3cn, int rst_status)
405{
406 struct sk_buff *skb = c3cn->cpl_abort_rpl;
407 struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
408
409 c3cn->cpl_abort_rpl = NULL;
410
411 skb->priority = CPL_PRIORITY_DATA;
412 memset(rpl, 0, sizeof(*rpl));
413 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
414 rpl->wr.wr_lo = htonl(V_WR_TID(c3cn->tid));
415 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, c3cn->tid));
416 rpl->cmd = rst_status;
417
418 cxgb3_ofld_send(c3cn->cdev, skb);
419}
420
421/*
422 * CPL connection rx data ack: host ->
423 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
424 * credits sent.
425 */
426static u32 send_rx_credits(struct s3_conn *c3cn, u32 credits, u32 dack)
427{
428 struct sk_buff *skb;
429 struct cpl_rx_data_ack *req;
430
431 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
432 if (!skb)
433 return 0;
434
435 req = (struct cpl_rx_data_ack *)__skb_put(skb, sizeof(*req));
436 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
437 req->wr.wr_lo = 0;
438 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, c3cn->tid));
439 req->credit_dack = htonl(dack | V_RX_CREDITS(credits));
440 skb->priority = CPL_PRIORITY_ACK;
441 cxgb3_ofld_send(c3cn->cdev, skb);
442 return credits;
443}
444
445/*
446 * CPL connection tx data: host ->
447 *
448 * Send iscsi PDU via TX_DATA CPL message. Returns the number of
449 * credits sent.
450 * Each TX_DATA consumes work request credit (wrs), so we need to keep track of
451 * how many we've used so far and how many are pending (i.e., yet ack'ed by T3).
452 */
453
454/*
455 * For ULP connections HW may inserts digest bytes into the pdu. Those digest
456 * bytes are not sent by the host but are part of the TCP payload and therefore
457 * consume TCP sequence space.
458 */
459static const unsigned int cxgb3_ulp_extra_len[] = { 0, 4, 4, 8 };
460static inline unsigned int ulp_extra_len(const struct sk_buff *skb)
461{
462 return cxgb3_ulp_extra_len[skb_ulp_mode(skb) & 3];
463}
464
465static unsigned int wrlen __read_mostly;
466
467/*
468 * The number of WRs needed for an skb depends on the number of fragments
469 * in the skb and whether it has any payload in its main body. This maps the
470 * length of the gather list represented by an skb into the # of necessary WRs.
471 * The extra two fragments are for iscsi bhs and payload padding.
472 */
473#define SKB_WR_LIST_SIZE (MAX_SKB_FRAGS + 2)
474static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly;
475
476static void s3_init_wr_tab(unsigned int wr_len)
477{
478 int i;
479
480 if (skb_wrs[1]) /* already initialized */
481 return;
482
483 for (i = 1; i < SKB_WR_LIST_SIZE; i++) {
484 int sgl_len = (3 * i) / 2 + (i & 1);
485
486 sgl_len += 3;
487 skb_wrs[i] = (sgl_len <= wr_len
488 ? 1 : 1 + (sgl_len - 2) / (wr_len - 1));
489 }
490
491 wrlen = wr_len * 8;
492}
493
494static inline void reset_wr_list(struct s3_conn *c3cn)
495{
496 c3cn->wr_pending_head = c3cn->wr_pending_tail = NULL;
497}
498
499/*
500 * Add a WR to a connections's list of pending WRs. This is a singly-linked
501 * list of sk_buffs operating as a FIFO. The head is kept in wr_pending_head
502 * and the tail in wr_pending_tail.
503 */
504static inline void enqueue_wr(struct s3_conn *c3cn,
505 struct sk_buff *skb)
506{
507 skb_tx_wr_next(skb) = NULL;
508
509 /*
510 * We want to take an extra reference since both us and the driver
511 * need to free the packet before it's really freed. We know there's
512 * just one user currently so we use atomic_set rather than skb_get
513 * to avoid the atomic op.
514 */
515 atomic_set(&skb->users, 2);
516
517 if (!c3cn->wr_pending_head)
518 c3cn->wr_pending_head = skb;
519 else
520 skb_tx_wr_next(c3cn->wr_pending_tail) = skb;
521 c3cn->wr_pending_tail = skb;
522}
523
524static int count_pending_wrs(struct s3_conn *c3cn)
525{
526 int n = 0;
527 const struct sk_buff *skb = c3cn->wr_pending_head;
528
529 while (skb) {
530 n += skb->csum;
531 skb = skb_tx_wr_next(skb);
532 }
533 return n;
534}
535
536static inline struct sk_buff *peek_wr(const struct s3_conn *c3cn)
537{
538 return c3cn->wr_pending_head;
539}
540
541static inline void free_wr_skb(struct sk_buff *skb)
542{
543 kfree_skb(skb);
544}
545
546static inline struct sk_buff *dequeue_wr(struct s3_conn *c3cn)
547{
548 struct sk_buff *skb = c3cn->wr_pending_head;
549
550 if (likely(skb)) {
551 /* Don't bother clearing the tail */
552 c3cn->wr_pending_head = skb_tx_wr_next(skb);
553 skb_tx_wr_next(skb) = NULL;
554 }
555 return skb;
556}
557
558static void purge_wr_queue(struct s3_conn *c3cn)
559{
560 struct sk_buff *skb;
561 while ((skb = dequeue_wr(c3cn)) != NULL)
562 free_wr_skb(skb);
563}
564
565static inline void make_tx_data_wr(struct s3_conn *c3cn, struct sk_buff *skb,
566 int len, int req_completion)
567{
568 struct tx_data_wr *req;
569
570 skb_reset_transport_header(skb);
571 req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req));
572 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) |
573 (req_completion ? F_WR_COMPL : 0));
574 req->wr_lo = htonl(V_WR_TID(c3cn->tid));
575 req->sndseq = htonl(c3cn->snd_nxt);
576 /* len includes the length of any HW ULP additions */
577 req->len = htonl(len);
578 req->param = htonl(V_TX_PORT(c3cn->l2t->smt_idx));
579 /* V_TX_ULP_SUBMODE sets both the mode and submode */
580 req->flags = htonl(V_TX_ULP_SUBMODE(skb_ulp_mode(skb)) |
581 V_TX_SHOVE((skb_peek(&c3cn->write_queue) ? 0 : 1)));
582
583 if (!c3cn_flag(c3cn, C3CN_TX_DATA_SENT)) {
584 req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT |
585 V_TX_CPU_IDX(c3cn->qset));
586 /* Sendbuffer is in units of 32KB. */
587 req->param |= htonl(V_TX_SNDBUF(cxgb3_snd_win >> 15));
588 c3cn_set_flag(c3cn, C3CN_TX_DATA_SENT);
589 }
590}
591
592/**
593 * c3cn_push_tx_frames -- start transmit
594 * @c3cn: the offloaded connection
595 * @req_completion: request wr_ack or not
596 *
597 * Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a
598 * connection's send queue and sends them on to T3. Must be called with the
599 * connection's lock held. Returns the amount of send buffer space that was
600 * freed as a result of sending queued data to T3.
601 */
602static void arp_failure_discard(struct t3cdev *cdev, struct sk_buff *skb)
603{
604 kfree_skb(skb);
605}
606
607static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion)
608{
609 int total_size = 0;
610 struct sk_buff *skb;
611 struct t3cdev *cdev;
612 struct cxgb3i_sdev_data *cdata;
613
614 if (unlikely(c3cn->state == C3CN_STATE_CONNECTING ||
615 c3cn->state == C3CN_STATE_CLOSE_WAIT_1 ||
616 c3cn->state >= C3CN_STATE_ABORTING)) {
617 c3cn_tx_debug("c3cn 0x%p, in closing state %u.\n",
618 c3cn, c3cn->state);
619 return 0;
620 }
621
622 cdev = c3cn->cdev;
623 cdata = CXGB3_SDEV_DATA(cdev);
624
625 while (c3cn->wr_avail
626 && (skb = skb_peek(&c3cn->write_queue)) != NULL) {
627 int len = skb->len; /* length before skb_push */
628 int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len);
629 int wrs_needed = skb_wrs[frags];
630
631 if (wrs_needed > 1 && len + sizeof(struct tx_data_wr) <= wrlen)
632 wrs_needed = 1;
633
634 WARN_ON(frags >= SKB_WR_LIST_SIZE || wrs_needed < 1);
635
636 if (c3cn->wr_avail < wrs_needed) {
637 c3cn_tx_debug("c3cn 0x%p, skb len %u/%u, frag %u, "
638 "wr %d < %u.\n",
639 c3cn, skb->len, skb->data_len, frags,
640 wrs_needed, c3cn->wr_avail);
641 break;
642 }
643
644 __skb_unlink(skb, &c3cn->write_queue);
645 skb->priority = CPL_PRIORITY_DATA;
646 skb->csum = wrs_needed; /* remember this until the WR_ACK */
647 c3cn->wr_avail -= wrs_needed;
648 c3cn->wr_unacked += wrs_needed;
649 enqueue_wr(c3cn, skb);
650
651 c3cn_tx_debug("c3cn 0x%p, enqueue, skb len %u/%u, frag %u, "
652 "wr %d, left %u, unack %u.\n",
653 c3cn, skb->len, skb->data_len, frags,
654 wrs_needed, c3cn->wr_avail, c3cn->wr_unacked);
655
656
657 if (likely(skb_flags(skb) & C3CB_FLAG_NEED_HDR)) {
658 if ((req_completion &&
659 c3cn->wr_unacked == wrs_needed) ||
660 (skb_flags(skb) & C3CB_FLAG_COMPL) ||
661 c3cn->wr_unacked >= c3cn->wr_max / 2) {
662 req_completion = 1;
663 c3cn->wr_unacked = 0;
664 }
665 len += ulp_extra_len(skb);
666 make_tx_data_wr(c3cn, skb, len, req_completion);
667 c3cn->snd_nxt += len;
668 skb_flags(skb) &= ~C3CB_FLAG_NEED_HDR;
669 }
670
671 total_size += skb->truesize;
672 set_arp_failure_handler(skb, arp_failure_discard);
673 l2t_send(cdev, skb, c3cn->l2t);
674 }
675 return total_size;
676}
677
678/*
679 * process_cpl_msg: -> host
680 * Top-level CPL message processing used by most CPL messages that
681 * pertain to connections.
682 */
683static inline void process_cpl_msg(void (*fn)(struct s3_conn *,
684 struct sk_buff *),
685 struct s3_conn *c3cn,
686 struct sk_buff *skb)
687{
688 spin_lock_bh(&c3cn->lock);
689 fn(c3cn, skb);
690 spin_unlock_bh(&c3cn->lock);
691}
692
693/*
694 * process_cpl_msg_ref: -> host
695 * Similar to process_cpl_msg() but takes an extra connection reference around
696 * the call to the handler. Should be used if the handler may drop a
697 * connection reference.
698 */
699static inline void process_cpl_msg_ref(void (*fn) (struct s3_conn *,
700 struct sk_buff *),
701 struct s3_conn *c3cn,
702 struct sk_buff *skb)
703{
704 c3cn_hold(c3cn);
705 process_cpl_msg(fn, c3cn, skb);
706 c3cn_put(c3cn);
707}
708
709/*
710 * Process a CPL_ACT_ESTABLISH message: -> host
711 * Updates connection state from an active establish CPL message. Runs with
712 * the connection lock held.
713 */
714
715static inline void s3_free_atid(struct t3cdev *cdev, unsigned int tid)
716{
717 struct s3_conn *c3cn = cxgb3_free_atid(cdev, tid);
718 if (c3cn)
719 c3cn_put(c3cn);
720}
721
722static void c3cn_established(struct s3_conn *c3cn, u32 snd_isn,
723 unsigned int opt)
724{
725 c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn, c3cn->state);
726
727 c3cn->write_seq = c3cn->snd_nxt = c3cn->snd_una = snd_isn;
728
729 /*
730 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
731 * pass through opt0.
732 */
733 if (cxgb3_rcv_win > (M_RCV_BUFSIZ << 10))
734 c3cn->rcv_wup -= cxgb3_rcv_win - (M_RCV_BUFSIZ << 10);
735
736 dst_confirm(c3cn->dst_cache);
737
738 smp_mb();
739
740 c3cn_set_state(c3cn, C3CN_STATE_ESTABLISHED);
741}
742
743static void process_act_establish(struct s3_conn *c3cn, struct sk_buff *skb)
744{
745 struct cpl_act_establish *req = cplhdr(skb);
746 u32 rcv_isn = ntohl(req->rcv_isn); /* real RCV_ISN + 1 */
747
748 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
749 c3cn, c3cn->state, c3cn->flags);
750
751 if (unlikely(c3cn->state != C3CN_STATE_CONNECTING))
752 cxgb3i_log_error("TID %u expected SYN_SENT, got EST., s %u\n",
753 c3cn->tid, c3cn->state);
754
755 c3cn->copied_seq = c3cn->rcv_wup = c3cn->rcv_nxt = rcv_isn;
756 c3cn_established(c3cn, ntohl(req->snd_isn), ntohs(req->tcp_opt));
757
758 __kfree_skb(skb);
759
760 if (unlikely(c3cn_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED)))
761 /* upper layer has requested closing */
762 send_abort_req(c3cn);
763 else {
764 if (skb_queue_len(&c3cn->write_queue))
765 c3cn_push_tx_frames(c3cn, 1);
766 cxgb3i_conn_tx_open(c3cn);
767 }
768}
769
770static int do_act_establish(struct t3cdev *cdev, struct sk_buff *skb,
771 void *ctx)
772{
773 struct cpl_act_establish *req = cplhdr(skb);
774 unsigned int tid = GET_TID(req);
775 unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
776 struct s3_conn *c3cn = ctx;
777 struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(cdev);
778
779 c3cn_conn_debug("rcv, tid 0x%x, c3cn 0x%p, s %u, f 0x%lx.\n",
780 tid, c3cn, c3cn->state, c3cn->flags);
781
782 c3cn->tid = tid;
783 c3cn_hold(c3cn);
784 cxgb3_insert_tid(cdata->cdev, cdata->client, c3cn, tid);
785 s3_free_atid(cdev, atid);
786
787 c3cn->qset = G_QNUM(ntohl(skb->csum));
788
789 process_cpl_msg(process_act_establish, c3cn, skb);
790 return 0;
791}
792
793/*
794 * Process a CPL_ACT_OPEN_RPL message: -> host
795 * Handle active open failures.
796 */
797static int act_open_rpl_status_to_errno(int status)
798{
799 switch (status) {
800 case CPL_ERR_CONN_RESET:
801 return -ECONNREFUSED;
802 case CPL_ERR_ARP_MISS:
803 return -EHOSTUNREACH;
804 case CPL_ERR_CONN_TIMEDOUT:
805 return -ETIMEDOUT;
806 case CPL_ERR_TCAM_FULL:
807 return -ENOMEM;
808 case CPL_ERR_CONN_EXIST:
809 cxgb3i_log_error("ACTIVE_OPEN_RPL: 4-tuple in use\n");
810 return -EADDRINUSE;
811 default:
812 return -EIO;
813 }
814}
815
816static void act_open_retry_timer(unsigned long data)
817{
818 struct sk_buff *skb;
819 struct s3_conn *c3cn = (struct s3_conn *)data;
820
821 c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn, c3cn->state);
822
823 spin_lock_bh(&c3cn->lock);
824 skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_ATOMIC);
825 if (!skb)
826 fail_act_open(c3cn, -ENOMEM);
827 else {
828 skb->sk = (struct sock *)c3cn;
829 set_arp_failure_handler(skb, act_open_req_arp_failure);
830 make_act_open_req(c3cn, skb, c3cn->tid, c3cn->l2t);
831 l2t_send(c3cn->cdev, skb, c3cn->l2t);
832 }
833 spin_unlock_bh(&c3cn->lock);
834 c3cn_put(c3cn);
835}
836
837static void process_act_open_rpl(struct s3_conn *c3cn, struct sk_buff *skb)
838{
839 struct cpl_act_open_rpl *rpl = cplhdr(skb);
840
841 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
842 c3cn, c3cn->state, c3cn->flags);
843
844 if (rpl->status == CPL_ERR_CONN_EXIST &&
845 c3cn->retry_timer.function != act_open_retry_timer) {
846 c3cn->retry_timer.function = act_open_retry_timer;
847 if (!mod_timer(&c3cn->retry_timer, jiffies + HZ / 2))
848 c3cn_hold(c3cn);
849 } else
850 fail_act_open(c3cn, act_open_rpl_status_to_errno(rpl->status));
851 __kfree_skb(skb);
852}
853
854static int do_act_open_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
855{
856 struct s3_conn *c3cn = ctx;
857 struct cpl_act_open_rpl *rpl = cplhdr(skb);
858
859 c3cn_conn_debug("rcv, status 0x%x, c3cn 0x%p, s %u, f 0x%lx.\n",
860 rpl->status, c3cn, c3cn->state, c3cn->flags);
861
862 if (rpl->status != CPL_ERR_TCAM_FULL &&
863 rpl->status != CPL_ERR_CONN_EXIST &&
864 rpl->status != CPL_ERR_ARP_MISS)
865 cxgb3_queue_tid_release(cdev, GET_TID(rpl));
866
867 process_cpl_msg_ref(process_act_open_rpl, c3cn, skb);
868 return 0;
869}
870
871/*
872 * Process PEER_CLOSE CPL messages: -> host
873 * Handle peer FIN.
874 */
875static void process_peer_close(struct s3_conn *c3cn, struct sk_buff *skb)
876{
877 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
878 c3cn, c3cn->state, c3cn->flags);
879
880 if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING))
881 goto out;
882
883 switch (c3cn->state) {
884 case C3CN_STATE_ESTABLISHED:
885 c3cn_set_state(c3cn, C3CN_STATE_PASSIVE_CLOSE);
886 break;
887 case C3CN_STATE_ACTIVE_CLOSE:
888 c3cn_set_state(c3cn, C3CN_STATE_CLOSE_WAIT_2);
889 break;
890 case C3CN_STATE_CLOSE_WAIT_1:
891 c3cn_closed(c3cn);
892 break;
893 case C3CN_STATE_ABORTING:
894 break;
895 default:
896 cxgb3i_log_error("%s: peer close, TID %u in bad state %u\n",
897 c3cn->cdev->name, c3cn->tid, c3cn->state);
898 }
899
900 cxgb3i_conn_closing(c3cn);
901out:
902 __kfree_skb(skb);
903}
904
905static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
906{
907 struct s3_conn *c3cn = ctx;
908
909 c3cn_conn_debug("rcv, c3cn 0x%p, s %u, f 0x%lx.\n",
910 c3cn, c3cn->state, c3cn->flags);
911 process_cpl_msg_ref(process_peer_close, c3cn, skb);
912 return 0;
913}
914
915/*
916 * Process CLOSE_CONN_RPL CPL message: -> host
917 * Process a peer ACK to our FIN.
918 */
919static void process_close_con_rpl(struct s3_conn *c3cn, struct sk_buff *skb)
920{
921 struct cpl_close_con_rpl *rpl = cplhdr(skb);
922
923 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
924 c3cn, c3cn->state, c3cn->flags);
925
926 c3cn->snd_una = ntohl(rpl->snd_nxt) - 1; /* exclude FIN */
927
928 if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING))
929 goto out;
930
931 switch (c3cn->state) {
932 case C3CN_STATE_ACTIVE_CLOSE:
933 c3cn_set_state(c3cn, C3CN_STATE_CLOSE_WAIT_1);
934 break;
935 case C3CN_STATE_CLOSE_WAIT_1:
936 case C3CN_STATE_CLOSE_WAIT_2:
937 c3cn_closed(c3cn);
938 break;
939 case C3CN_STATE_ABORTING:
940 break;
941 default:
942 cxgb3i_log_error("%s: close_rpl, TID %u in bad state %u\n",
943 c3cn->cdev->name, c3cn->tid, c3cn->state);
944 }
945
946out:
947 kfree_skb(skb);
948}
949
950static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb,
951 void *ctx)
952{
953 struct s3_conn *c3cn = ctx;
954
955 c3cn_conn_debug("rcv, c3cn 0x%p, s %u, f 0x%lx.\n",
956 c3cn, c3cn->state, c3cn->flags);
957
958 process_cpl_msg_ref(process_close_con_rpl, c3cn, skb);
959 return 0;
960}
961
962/*
963 * Process ABORT_REQ_RSS CPL message: -> host
964 * Process abort requests. If we are waiting for an ABORT_RPL we ignore this
965 * request except that we need to reply to it.
966 */
967
968static int abort_status_to_errno(struct s3_conn *c3cn, int abort_reason,
969 int *need_rst)
970{
971 switch (abort_reason) {
972 case CPL_ERR_BAD_SYN: /* fall through */
973 case CPL_ERR_CONN_RESET:
974 return c3cn->state > C3CN_STATE_ESTABLISHED ?
975 -EPIPE : -ECONNRESET;
976 case CPL_ERR_XMIT_TIMEDOUT:
977 case CPL_ERR_PERSIST_TIMEDOUT:
978 case CPL_ERR_FINWAIT2_TIMEDOUT:
979 case CPL_ERR_KEEPALIVE_TIMEDOUT:
980 return -ETIMEDOUT;
981 default:
982 return -EIO;
983 }
984}
985
986static void process_abort_req(struct s3_conn *c3cn, struct sk_buff *skb)
987{
988 int rst_status = CPL_ABORT_NO_RST;
989 const struct cpl_abort_req_rss *req = cplhdr(skb);
990
991 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
992 c3cn, c3cn->state, c3cn->flags);
993
994 if (!c3cn_flag(c3cn, C3CN_ABORT_REQ_RCVD)) {
995 c3cn_set_flag(c3cn, C3CN_ABORT_REQ_RCVD);
996 c3cn_set_state(c3cn, C3CN_STATE_ABORTING);
997 __kfree_skb(skb);
998 return;
999 }
1000
1001 c3cn_clear_flag(c3cn, C3CN_ABORT_REQ_RCVD);
1002 send_abort_rpl(c3cn, rst_status);
1003
1004 if (!c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING)) {
1005 c3cn->err =
1006 abort_status_to_errno(c3cn, req->status, &rst_status);
1007 c3cn_closed(c3cn);
1008 }
1009}
1010
1011static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
1012{
1013 const struct cpl_abort_req_rss *req = cplhdr(skb);
1014 struct s3_conn *c3cn = ctx;
1015
1016 c3cn_conn_debug("rcv, c3cn 0x%p, s 0x%x, f 0x%lx.\n",
1017 c3cn, c3cn->state, c3cn->flags);
1018
1019 if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
1020 req->status == CPL_ERR_PERSIST_NEG_ADVICE) {
1021 __kfree_skb(skb);
1022 return 0;
1023 }
1024
1025 process_cpl_msg_ref(process_abort_req, c3cn, skb);
1026 return 0;
1027}
1028
1029/*
1030 * Process ABORT_RPL_RSS CPL message: -> host
1031 * Process abort replies. We only process these messages if we anticipate
1032 * them as the coordination between SW and HW in this area is somewhat lacking
1033 * and sometimes we get ABORT_RPLs after we are done with the connection that
1034 * originated the ABORT_REQ.
1035 */
1036static void process_abort_rpl(struct s3_conn *c3cn, struct sk_buff *skb)
1037{
1038 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
1039 c3cn, c3cn->state, c3cn->flags);
1040
1041 if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING)) {
1042 if (!c3cn_flag(c3cn, C3CN_ABORT_RPL_RCVD))
1043 c3cn_set_flag(c3cn, C3CN_ABORT_RPL_RCVD);
1044 else {
1045 c3cn_clear_flag(c3cn, C3CN_ABORT_RPL_RCVD);
1046 c3cn_clear_flag(c3cn, C3CN_ABORT_RPL_PENDING);
1047 if (c3cn_flag(c3cn, C3CN_ABORT_REQ_RCVD))
1048 cxgb3i_log_error("%s tid %u, ABORT_RPL_RSS\n",
1049 c3cn->cdev->name, c3cn->tid);
1050 c3cn_closed(c3cn);
1051 }
1052 }
1053 __kfree_skb(skb);
1054}
1055
1056static int do_abort_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
1057{
1058 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1059 struct s3_conn *c3cn = ctx;
1060
1061 c3cn_conn_debug("rcv, status 0x%x, c3cn 0x%p, s %u, 0x%lx.\n",
1062 rpl->status, c3cn, c3cn ? c3cn->state : 0,
1063 c3cn ? c3cn->flags : 0UL);
1064
1065 /*
1066 * Ignore replies to post-close aborts indicating that the abort was
1067 * requested too late. These connections are terminated when we get
1068 * PEER_CLOSE or CLOSE_CON_RPL and by the time the abort_rpl_rss
1069 * arrives the TID is either no longer used or it has been recycled.
1070 */
1071 if (rpl->status == CPL_ERR_ABORT_FAILED)
1072 goto discard;
1073
1074 /*
1075 * Sometimes we've already closed the connection, e.g., a post-close
1076 * abort races with ABORT_REQ_RSS, the latter frees the connection
1077 * expecting the ABORT_REQ will fail with CPL_ERR_ABORT_FAILED,
1078 * but FW turns the ABORT_REQ into a regular one and so we get
1079 * ABORT_RPL_RSS with status 0 and no connection.
1080 */
1081 if (!c3cn)
1082 goto discard;
1083
1084 process_cpl_msg_ref(process_abort_rpl, c3cn, skb);
1085 return 0;
1086
1087discard:
1088 __kfree_skb(skb);
1089 return 0;
1090}
1091
1092/*
1093 * Process RX_ISCSI_HDR CPL message: -> host
1094 * Handle received PDUs, the payload could be DDP'ed. If not, the payload
1095 * follow after the bhs.
1096 */
1097static void process_rx_iscsi_hdr(struct s3_conn *c3cn, struct sk_buff *skb)
1098{
1099 struct cpl_iscsi_hdr *hdr_cpl = cplhdr(skb);
1100 struct cpl_iscsi_hdr_norss data_cpl;
1101 struct cpl_rx_data_ddp_norss ddp_cpl;
1102 unsigned int hdr_len, data_len, status;
1103 unsigned int len;
1104 int err;
1105
1106 if (unlikely(c3cn->state >= C3CN_STATE_PASSIVE_CLOSE)) {
1107 if (c3cn->state != C3CN_STATE_ABORTING)
1108 send_abort_req(c3cn);
1109 __kfree_skb(skb);
1110 return;
1111 }
1112
1113 skb_tcp_seq(skb) = ntohl(hdr_cpl->seq);
1114 skb_flags(skb) = 0;
1115
1116 skb_reset_transport_header(skb);
1117 __skb_pull(skb, sizeof(struct cpl_iscsi_hdr));
1118
1119 len = hdr_len = ntohs(hdr_cpl->len);
1120 /* msg coalesce is off or not enough data received */
1121 if (skb->len <= hdr_len) {
1122 cxgb3i_log_error("%s: TID %u, ISCSI_HDR, skb len %u < %u.\n",
1123 c3cn->cdev->name, c3cn->tid,
1124 skb->len, hdr_len);
1125 goto abort_conn;
1126 }
1127
1128 err = skb_copy_bits(skb, skb->len - sizeof(ddp_cpl), &ddp_cpl,
1129 sizeof(ddp_cpl));
1130 if (err < 0)
1131 goto abort_conn;
1132
1133 skb_ulp_mode(skb) = ULP2_FLAG_DATA_READY;
1134 skb_rx_pdulen(skb) = ntohs(ddp_cpl.len);
1135 skb_rx_ddigest(skb) = ntohl(ddp_cpl.ulp_crc);
1136 status = ntohl(ddp_cpl.ddp_status);
1137
1138 c3cn_rx_debug("rx skb 0x%p, len %u, pdulen %u, ddp status 0x%x.\n",
1139 skb, skb->len, skb_rx_pdulen(skb), status);
1140
1141 if (status & (1 << RX_DDP_STATUS_HCRC_SHIFT))
1142 skb_ulp_mode(skb) |= ULP2_FLAG_HCRC_ERROR;
1143 if (status & (1 << RX_DDP_STATUS_DCRC_SHIFT))
1144 skb_ulp_mode(skb) |= ULP2_FLAG_DCRC_ERROR;
1145 if (status & (1 << RX_DDP_STATUS_PAD_SHIFT))
1146 skb_ulp_mode(skb) |= ULP2_FLAG_PAD_ERROR;
1147
1148 if (skb->len > (hdr_len + sizeof(ddp_cpl))) {
1149 err = skb_copy_bits(skb, hdr_len, &data_cpl, sizeof(data_cpl));
1150 if (err < 0)
1151 goto abort_conn;
1152 data_len = ntohs(data_cpl.len);
1153 len += sizeof(data_cpl) + data_len;
1154 } else if (status & (1 << RX_DDP_STATUS_DDP_SHIFT))
1155 skb_ulp_mode(skb) |= ULP2_FLAG_DATA_DDPED;
1156
1157 c3cn->rcv_nxt = ntohl(ddp_cpl.seq) + skb_rx_pdulen(skb);
1158 __pskb_trim(skb, len);
1159 __skb_queue_tail(&c3cn->receive_queue, skb);
1160 cxgb3i_conn_pdu_ready(c3cn);
1161
1162 return;
1163
1164abort_conn:
1165 send_abort_req(c3cn);
1166 __kfree_skb(skb);
1167}
1168
1169static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx)
1170{
1171 struct s3_conn *c3cn = ctx;
1172
1173 process_cpl_msg(process_rx_iscsi_hdr, c3cn, skb);
1174 return 0;
1175}
1176
1177/*
1178 * Process TX_DATA_ACK CPL messages: -> host
1179 * Process an acknowledgment of WR completion. Advance snd_una and send the
1180 * next batch of work requests from the write queue.
1181 */
1182static void check_wr_invariants(struct s3_conn *c3cn)
1183{
1184 int pending = count_pending_wrs(c3cn);
1185
1186 if (unlikely(c3cn->wr_avail + pending != c3cn->wr_max))
1187 cxgb3i_log_error("TID %u: credit imbalance: avail %u, "
1188 "pending %u, total should be %u\n",
1189 c3cn->tid, c3cn->wr_avail, pending,
1190 c3cn->wr_max);
1191}
1192
1193static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb)
1194{
1195 struct cpl_wr_ack *hdr = cplhdr(skb);
1196 unsigned int credits = ntohs(hdr->credits);
1197 u32 snd_una = ntohl(hdr->snd_una);
1198
1199 c3cn_tx_debug("%u WR credits, avail %u, unack %u, TID %u, state %u.\n",
1200 credits, c3cn->wr_avail, c3cn->wr_unacked,
1201 c3cn->tid, c3cn->state);
1202
1203 c3cn->wr_avail += credits;
1204 if (c3cn->wr_unacked > c3cn->wr_max - c3cn->wr_avail)
1205 c3cn->wr_unacked = c3cn->wr_max - c3cn->wr_avail;
1206
1207 while (credits) {
1208 struct sk_buff *p = peek_wr(c3cn);
1209
1210 if (unlikely(!p)) {
1211 cxgb3i_log_error("%u WR_ACK credits for TID %u with "
1212 "nothing pending, state %u\n",
1213 credits, c3cn->tid, c3cn->state);
1214 break;
1215 }
1216 if (unlikely(credits < p->csum)) {
1217 struct tx_data_wr *w = cplhdr(p);
1218 cxgb3i_log_error("TID %u got %u WR credits need %u, "
1219 "len %u, main body %u, frags %u, "
1220 "seq # %u, ACK una %u, ACK nxt %u, "
1221 "WR_AVAIL %u, WRs pending %u\n",
1222 c3cn->tid, credits, p->csum, p->len,
1223 p->len - p->data_len,
1224 skb_shinfo(p)->nr_frags,
1225 ntohl(w->sndseq), snd_una,
1226 ntohl(hdr->snd_nxt), c3cn->wr_avail,
1227 count_pending_wrs(c3cn) - credits);
1228 p->csum -= credits;
1229 break;
1230 } else {
1231 dequeue_wr(c3cn);
1232 credits -= p->csum;
1233 free_wr_skb(p);
1234 }
1235 }
1236
1237 check_wr_invariants(c3cn);
1238
1239 if (unlikely(before(snd_una, c3cn->snd_una))) {
1240 cxgb3i_log_error("TID %u, unexpected sequence # %u in WR_ACK "
1241 "snd_una %u\n",
1242 c3cn->tid, snd_una, c3cn->snd_una);
1243 goto out_free;
1244 }
1245
1246 if (c3cn->snd_una != snd_una) {
1247 c3cn->snd_una = snd_una;
1248 dst_confirm(c3cn->dst_cache);
1249 }
1250
1251 if (skb_queue_len(&c3cn->write_queue)) {
1252 if (c3cn_push_tx_frames(c3cn, 0))
1253 cxgb3i_conn_tx_open(c3cn);
1254 } else
1255 cxgb3i_conn_tx_open(c3cn);
1256out_free:
1257 __kfree_skb(skb);
1258}
1259
1260static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
1261{
1262 struct s3_conn *c3cn = ctx;
1263
1264 process_cpl_msg(process_wr_ack, c3cn, skb);
1265 return 0;
1266}
1267
1268/*
1269 * for each connection, pre-allocate skbs needed for close/abort requests. So
1270 * that we can service the request right away.
1271 */
1272static void c3cn_free_cpl_skbs(struct s3_conn *c3cn)
1273{
1274 if (c3cn->cpl_close)
1275 kfree_skb(c3cn->cpl_close);
1276 if (c3cn->cpl_abort_req)
1277 kfree_skb(c3cn->cpl_abort_req);
1278 if (c3cn->cpl_abort_rpl)
1279 kfree_skb(c3cn->cpl_abort_rpl);
1280}
1281
1282static int c3cn_alloc_cpl_skbs(struct s3_conn *c3cn)
1283{
1284 c3cn->cpl_close = alloc_skb(sizeof(struct cpl_close_con_req),
1285 GFP_KERNEL);
1286 if (!c3cn->cpl_close)
1287 return -ENOMEM;
1288 skb_put(c3cn->cpl_close, sizeof(struct cpl_close_con_req));
1289
1290 c3cn->cpl_abort_req = alloc_skb(sizeof(struct cpl_abort_req),
1291 GFP_KERNEL);
1292 if (!c3cn->cpl_abort_req)
1293 goto free_cpl_skbs;
1294 skb_put(c3cn->cpl_abort_req, sizeof(struct cpl_abort_req));
1295
1296 c3cn->cpl_abort_rpl = alloc_skb(sizeof(struct cpl_abort_rpl),
1297 GFP_KERNEL);
1298 if (!c3cn->cpl_abort_rpl)
1299 goto free_cpl_skbs;
1300 skb_put(c3cn->cpl_abort_rpl, sizeof(struct cpl_abort_rpl));
1301
1302 return 0;
1303
1304free_cpl_skbs:
1305 c3cn_free_cpl_skbs(c3cn);
1306 return -ENOMEM;
1307}
1308
1309/**
1310 * c3cn_release_offload_resources - release offload resource
1311 * @c3cn: the offloaded iscsi tcp connection.
1312 * Release resources held by an offload connection (TID, L2T entry, etc.)
1313 */
1314static void c3cn_release_offload_resources(struct s3_conn *c3cn)
1315{
1316 struct t3cdev *cdev = c3cn->cdev;
1317 unsigned int tid = c3cn->tid;
1318
1319 c3cn->qset = 0;
1320 c3cn_free_cpl_skbs(c3cn);
1321
1322 if (c3cn->wr_avail != c3cn->wr_max) {
1323 purge_wr_queue(c3cn);
1324 reset_wr_list(c3cn);
1325 }
1326
1327 if (cdev) {
1328 if (c3cn->l2t) {
1329 l2t_release(L2DATA(cdev), c3cn->l2t);
1330 c3cn->l2t = NULL;
1331 }
1332 if (c3cn->state == C3CN_STATE_CONNECTING)
1333 /* we have ATID */
1334 s3_free_atid(cdev, tid);
1335 else {
1336 /* we have TID */
1337 cxgb3_remove_tid(cdev, (void *)c3cn, tid);
1338 c3cn_put(c3cn);
1339 }
1340 }
1341
1342 c3cn->dst_cache = NULL;
1343 c3cn->cdev = NULL;
1344}
1345
1346/**
1347 * cxgb3i_c3cn_create - allocate and initialize an s3_conn structure
1348 * returns the s3_conn structure allocated.
1349 */
1350struct s3_conn *cxgb3i_c3cn_create(void)
1351{
1352 struct s3_conn *c3cn;
1353
1354 c3cn = kzalloc(sizeof(*c3cn), GFP_KERNEL);
1355 if (!c3cn)
1356 return NULL;
1357
1358 /* pre-allocate close/abort cpl, so we don't need to wait for memory
1359 when close/abort is requested. */
1360 if (c3cn_alloc_cpl_skbs(c3cn) < 0)
1361 goto free_c3cn;
1362
1363 c3cn_conn_debug("alloc c3cn 0x%p.\n", c3cn);
1364
1365 c3cn->flags = 0;
1366 spin_lock_init(&c3cn->lock);
1367 atomic_set(&c3cn->refcnt, 1);
1368 skb_queue_head_init(&c3cn->receive_queue);
1369 skb_queue_head_init(&c3cn->write_queue);
1370 setup_timer(&c3cn->retry_timer, NULL, (unsigned long)c3cn);
1371 rwlock_init(&c3cn->callback_lock);
1372
1373 return c3cn;
1374
1375free_c3cn:
1376 kfree(c3cn);
1377 return NULL;
1378}
1379
1380static void c3cn_active_close(struct s3_conn *c3cn)
1381{
1382 int data_lost;
1383 int close_req = 0;
1384
1385 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
1386 c3cn, c3cn->state, c3cn->flags);
1387
1388 dst_confirm(c3cn->dst_cache);
1389
1390 c3cn_hold(c3cn);
1391 spin_lock_bh(&c3cn->lock);
1392
1393 data_lost = skb_queue_len(&c3cn->receive_queue);
1394 __skb_queue_purge(&c3cn->receive_queue);
1395
1396 switch (c3cn->state) {
1397 case C3CN_STATE_CLOSED:
1398 case C3CN_STATE_ACTIVE_CLOSE:
1399 case C3CN_STATE_CLOSE_WAIT_1:
1400 case C3CN_STATE_CLOSE_WAIT_2:
1401 case C3CN_STATE_ABORTING:
1402 /* nothing need to be done */
1403 break;
1404 case C3CN_STATE_CONNECTING:
1405 /* defer until cpl_act_open_rpl or cpl_act_establish */
1406 c3cn_set_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED);
1407 break;
1408 case C3CN_STATE_ESTABLISHED:
1409 close_req = 1;
1410 c3cn_set_state(c3cn, C3CN_STATE_ACTIVE_CLOSE);
1411 break;
1412 case C3CN_STATE_PASSIVE_CLOSE:
1413 close_req = 1;
1414 c3cn_set_state(c3cn, C3CN_STATE_CLOSE_WAIT_2);
1415 break;
1416 }
1417
1418 if (close_req) {
1419 if (data_lost)
1420 /* Unread data was tossed, zap the connection. */
1421 send_abort_req(c3cn);
1422 else
1423 send_close_req(c3cn);
1424 }
1425
1426 spin_unlock_bh(&c3cn->lock);
1427 c3cn_put(c3cn);
1428}
1429
1430/**
1431 * cxgb3i_c3cn_release - close and release an iscsi tcp connection and any
1432 * resource held
1433 * @c3cn: the iscsi tcp connection
1434 */
1435void cxgb3i_c3cn_release(struct s3_conn *c3cn)
1436{
1437 c3cn_conn_debug("c3cn 0x%p, s %u, f 0x%lx.\n",
1438 c3cn, c3cn->state, c3cn->flags);
1439 if (unlikely(c3cn->state == C3CN_STATE_CONNECTING))
1440 c3cn_set_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED);
1441 else if (likely(c3cn->state != C3CN_STATE_CLOSED))
1442 c3cn_active_close(c3cn);
1443 c3cn_put(c3cn);
1444}
1445
1446static int is_cxgb3_dev(struct net_device *dev)
1447{
1448 struct cxgb3i_sdev_data *cdata;
1449 struct net_device *ndev = dev;
1450
1451 if (dev->priv_flags & IFF_802_1Q_VLAN)
1452 ndev = vlan_dev_real_dev(dev);
1453
1454 write_lock(&cdata_rwlock);
1455 list_for_each_entry(cdata, &cdata_list, list) {
1456 struct adap_ports *ports = &cdata->ports;
1457 int i;
1458
1459 for (i = 0; i < ports->nports; i++)
1460 if (ndev == ports->lldevs[i]) {
1461 write_unlock(&cdata_rwlock);
1462 return 1;
1463 }
1464 }
1465 write_unlock(&cdata_rwlock);
1466 return 0;
1467}
1468
1469/**
1470 * cxgb3_egress_dev - return the cxgb3 egress device
1471 * @root_dev: the root device anchoring the search
1472 * @c3cn: the connection used to determine egress port in bonding mode
1473 * @context: in bonding mode, indicates a connection set up or failover
1474 *
1475 * Return egress device or NULL if the egress device isn't one of our ports.
1476 */
1477static struct net_device *cxgb3_egress_dev(struct net_device *root_dev,
1478 struct s3_conn *c3cn,
1479 int context)
1480{
1481 while (root_dev) {
1482 if (root_dev->priv_flags & IFF_802_1Q_VLAN)
1483 root_dev = vlan_dev_real_dev(root_dev);
1484 else if (is_cxgb3_dev(root_dev))
1485 return root_dev;
1486 else
1487 return NULL;
1488 }
1489 return NULL;
1490}
1491
1492static struct rtable *find_route(struct net_device *dev,
1493 __be32 saddr, __be32 daddr,
1494 __be16 sport, __be16 dport)
1495{
1496 struct rtable *rt;
1497 struct flowi fl = {
1498 .oif = dev ? dev->ifindex : 0,
1499 .nl_u = {
1500 .ip4_u = {
1501 .daddr = daddr,
1502 .saddr = saddr,
1503 .tos = 0 } },
1504 .proto = IPPROTO_TCP,
1505 .uli_u = {
1506 .ports = {
1507 .sport = sport,
1508 .dport = dport } } };
1509
1510 if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
1511 return NULL;
1512 return rt;
1513}
1514
1515/*
1516 * Assign offload parameters to some connection fields.
1517 */
1518static void init_offload_conn(struct s3_conn *c3cn,
1519 struct t3cdev *cdev,
1520 struct dst_entry *dst)
1521{
1522 BUG_ON(c3cn->cdev != cdev);
1523 c3cn->wr_max = c3cn->wr_avail = T3C_DATA(cdev)->max_wrs - 1;
1524 c3cn->wr_unacked = 0;
1525 c3cn->mss_idx = select_mss(c3cn, dst_mtu(dst));
1526
1527 reset_wr_list(c3cn);
1528}
1529
1530static int initiate_act_open(struct s3_conn *c3cn, struct net_device *dev)
1531{
1532 struct cxgb3i_sdev_data *cdata = NDEV2CDATA(dev);
1533 struct t3cdev *cdev = cdata->cdev;
1534 struct dst_entry *dst = c3cn->dst_cache;
1535 struct sk_buff *skb;
1536
1537 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
1538 c3cn, c3cn->state, c3cn->flags);
1539 /*
1540 * Initialize connection data. Note that the flags and ULP mode are
1541 * initialized higher up ...
1542 */
1543 c3cn->dev = dev;
1544 c3cn->cdev = cdev;
1545 c3cn->tid = cxgb3_alloc_atid(cdev, cdata->client, c3cn);
1546 if (c3cn->tid < 0)
1547 goto out_err;
1548
1549 c3cn->qset = 0;
1550 c3cn->l2t = t3_l2t_get(cdev, dst->neighbour, dev);
1551 if (!c3cn->l2t)
1552 goto free_tid;
1553
1554 skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_KERNEL);
1555 if (!skb)
1556 goto free_l2t;
1557
1558 skb->sk = (struct sock *)c3cn;
1559 set_arp_failure_handler(skb, act_open_req_arp_failure);
1560
1561 c3cn_hold(c3cn);
1562
1563 init_offload_conn(c3cn, cdev, dst);
1564 c3cn->err = 0;
1565
1566 make_act_open_req(c3cn, skb, c3cn->tid, c3cn->l2t);
1567 l2t_send(cdev, skb, c3cn->l2t);
1568 return 0;
1569
1570free_l2t:
1571 l2t_release(L2DATA(cdev), c3cn->l2t);
1572free_tid:
1573 s3_free_atid(cdev, c3cn->tid);
1574 c3cn->tid = 0;
1575out_err:
1576 return -EINVAL;
1577}
1578
1579/**
1580 * cxgb3i_find_dev - find the interface associated with the given address
1581 * @ipaddr: ip address
1582 */
1583static struct net_device *
1584cxgb3i_find_dev(struct net_device *dev, __be32 ipaddr)
1585{
1586 struct flowi fl;
1587 int err;
1588 struct rtable *rt;
1589
1590 memset(&fl, 0, sizeof(fl));
1591 fl.nl_u.ip4_u.daddr = ipaddr;
1592
1593 err = ip_route_output_key(dev ? dev_net(dev) : &init_net, &rt, &fl);
1594 if (!err)
1595 return (&rt->dst)->dev;
1596
1597 return NULL;
1598}
1599
1600/**
1601 * cxgb3i_c3cn_connect - initiates an iscsi tcp connection to a given address
1602 * @c3cn: the iscsi tcp connection
1603 * @usin: destination address
1604 *
1605 * return 0 if active open request is sent, < 0 otherwise.
1606 */
1607int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
1608 struct sockaddr_in *usin)
1609{
1610 struct rtable *rt;
1611 struct cxgb3i_sdev_data *cdata;
1612 struct t3cdev *cdev;
1613 __be32 sipv4;
1614 struct net_device *dstdev;
1615 int err;
1616
1617 c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev);
1618
1619 if (usin->sin_family != AF_INET)
1620 return -EAFNOSUPPORT;
1621
1622 c3cn->daddr.sin_port = usin->sin_port;
1623 c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr;
1624
1625 dstdev = cxgb3i_find_dev(dev, usin->sin_addr.s_addr);
1626 if (!dstdev || !is_cxgb3_dev(dstdev))
1627 return -ENETUNREACH;
1628
1629 if (dstdev->priv_flags & IFF_802_1Q_VLAN)
1630 dev = dstdev;
1631
1632 rt = find_route(dev, c3cn->saddr.sin_addr.s_addr,
1633 c3cn->daddr.sin_addr.s_addr,
1634 c3cn->saddr.sin_port,
1635 c3cn->daddr.sin_port);
1636 if (rt == NULL) {
1637 c3cn_conn_debug("NO route to 0x%x, port %u, dev %s.\n",
1638 c3cn->daddr.sin_addr.s_addr,
1639 ntohs(c3cn->daddr.sin_port),
1640 dev ? dev->name : "any");
1641 return -ENETUNREACH;
1642 }
1643
1644 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
1645 c3cn_conn_debug("multi-cast route to 0x%x, port %u, dev %s.\n",
1646 c3cn->daddr.sin_addr.s_addr,
1647 ntohs(c3cn->daddr.sin_port),
1648 dev ? dev->name : "any");
1649 ip_rt_put(rt);
1650 return -ENETUNREACH;
1651 }
1652
1653 if (!c3cn->saddr.sin_addr.s_addr)
1654 c3cn->saddr.sin_addr.s_addr = rt->rt_src;
1655
1656 /* now commit destination to connection */
1657 c3cn->dst_cache = &rt->dst;
1658
1659 /* try to establish an offloaded connection */
1660 dev = cxgb3_egress_dev(c3cn->dst_cache->dev, c3cn, 0);
1661 if (dev == NULL) {
1662 c3cn_conn_debug("c3cn 0x%p, egress dev NULL.\n", c3cn);
1663 return -ENETUNREACH;
1664 }
1665 cdata = NDEV2CDATA(dev);
1666 cdev = cdata->cdev;
1667
1668 /* get a source port if one hasn't been provided */
1669 err = c3cn_get_port(c3cn, cdata);
1670 if (err)
1671 return err;
1672
1673 c3cn_conn_debug("c3cn 0x%p get port %u.\n",
1674 c3cn, ntohs(c3cn->saddr.sin_port));
1675
1676 sipv4 = cxgb3i_get_private_ipv4addr(dev);
1677 if (!sipv4) {
1678 c3cn_conn_debug("c3cn 0x%p, iscsi ip not configured.\n", c3cn);
1679 sipv4 = c3cn->saddr.sin_addr.s_addr;
1680 cxgb3i_set_private_ipv4addr(dev, sipv4);
1681 } else
1682 c3cn->saddr.sin_addr.s_addr = sipv4;
1683
1684 c3cn_conn_debug("c3cn 0x%p, %pI4,%u-%pI4,%u SYN_SENT.\n",
1685 c3cn,
1686 &c3cn->saddr.sin_addr.s_addr,
1687 ntohs(c3cn->saddr.sin_port),
1688 &c3cn->daddr.sin_addr.s_addr,
1689 ntohs(c3cn->daddr.sin_port));
1690
1691 c3cn_set_state(c3cn, C3CN_STATE_CONNECTING);
1692 if (!initiate_act_open(c3cn, dev))
1693 return 0;
1694
1695 /*
1696 * If we get here, we don't have an offload connection so simply
1697 * return a failure.
1698 */
1699 err = -ENOTSUPP;
1700
1701 /*
1702 * This trashes the connection and releases the local port,
1703 * if necessary.
1704 */
1705 c3cn_conn_debug("c3cn 0x%p -> CLOSED.\n", c3cn);
1706 c3cn_set_state(c3cn, C3CN_STATE_CLOSED);
1707 ip_rt_put(rt);
1708 c3cn_put_port(c3cn);
1709 return err;
1710}
1711
1712/**
1713 * cxgb3i_c3cn_rx_credits - ack received tcp data.
1714 * @c3cn: iscsi tcp connection
1715 * @copied: # of bytes processed
1716 *
1717 * Called after some received data has been read. It returns RX credits
1718 * to the HW for the amount of data processed.
1719 */
1720void cxgb3i_c3cn_rx_credits(struct s3_conn *c3cn, int copied)
1721{
1722 struct t3cdev *cdev;
1723 int must_send;
1724 u32 credits, dack = 0;
1725
1726 if (c3cn->state != C3CN_STATE_ESTABLISHED)
1727 return;
1728
1729 credits = c3cn->copied_seq - c3cn->rcv_wup;
1730 if (unlikely(!credits))
1731 return;
1732
1733 cdev = c3cn->cdev;
1734
1735 if (unlikely(cxgb3_rx_credit_thres == 0))
1736 return;
1737
1738 dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1);
1739
1740 /*
1741 * For coalescing to work effectively ensure the receive window has
1742 * at least 16KB left.
1743 */
1744 must_send = credits + 16384 >= cxgb3_rcv_win;
1745
1746 if (must_send || credits >= cxgb3_rx_credit_thres)
1747 c3cn->rcv_wup += send_rx_credits(c3cn, credits, dack);
1748}
1749
1750/**
1751 * cxgb3i_c3cn_send_pdus - send the skbs containing iscsi pdus
1752 * @c3cn: iscsi tcp connection
1753 * @skb: skb contains the iscsi pdu
1754 *
1755 * Add a list of skbs to a connection send queue. The skbs must comply with
1756 * the max size limit of the device and have a headroom of at least
1757 * TX_HEADER_LEN bytes.
1758 * Return # of bytes queued.
1759 */
1760int cxgb3i_c3cn_send_pdus(struct s3_conn *c3cn, struct sk_buff *skb)
1761{
1762 struct sk_buff *next;
1763 int err, copied = 0;
1764
1765 spin_lock_bh(&c3cn->lock);
1766
1767 if (c3cn->state != C3CN_STATE_ESTABLISHED) {
1768 c3cn_tx_debug("c3cn 0x%p, not in est. state %u.\n",
1769 c3cn, c3cn->state);
1770 err = -EAGAIN;
1771 goto out_err;
1772 }
1773
1774 if (c3cn->err) {
1775 c3cn_tx_debug("c3cn 0x%p, err %d.\n", c3cn, c3cn->err);
1776 err = -EPIPE;
1777 goto out_err;
1778 }
1779
1780 if (c3cn->write_seq - c3cn->snd_una >= cxgb3_snd_win) {
1781 c3cn_tx_debug("c3cn 0x%p, snd %u - %u > %u.\n",
1782 c3cn, c3cn->write_seq, c3cn->snd_una,
1783 cxgb3_snd_win);
1784 err = -ENOBUFS;
1785 goto out_err;
1786 }
1787
1788 while (skb) {
1789 int frags = skb_shinfo(skb)->nr_frags +
1790 (skb->len != skb->data_len);
1791
1792 if (unlikely(skb_headroom(skb) < TX_HEADER_LEN)) {
1793 c3cn_tx_debug("c3cn 0x%p, skb head.\n", c3cn);
1794 err = -EINVAL;
1795 goto out_err;
1796 }
1797
1798 if (frags >= SKB_WR_LIST_SIZE) {
1799 cxgb3i_log_error("c3cn 0x%p, tx frags %d, len %u,%u.\n",
1800 c3cn, skb_shinfo(skb)->nr_frags,
1801 skb->len, skb->data_len);
1802 err = -EINVAL;
1803 goto out_err;
1804 }
1805
1806 next = skb->next;
1807 skb->next = NULL;
1808 skb_entail(c3cn, skb, C3CB_FLAG_NO_APPEND | C3CB_FLAG_NEED_HDR);
1809 copied += skb->len;
1810 c3cn->write_seq += skb->len + ulp_extra_len(skb);
1811 skb = next;
1812 }
1813done:
1814 if (likely(skb_queue_len(&c3cn->write_queue)))
1815 c3cn_push_tx_frames(c3cn, 1);
1816 spin_unlock_bh(&c3cn->lock);
1817 return copied;
1818
1819out_err:
1820 if (copied == 0 && err == -EPIPE)
1821 copied = c3cn->err ? c3cn->err : -EPIPE;
1822 else
1823 copied = err;
1824 goto done;
1825}
1826
1827static void sdev_data_cleanup(struct cxgb3i_sdev_data *cdata)
1828{
1829 struct adap_ports *ports = &cdata->ports;
1830 struct s3_conn *c3cn;
1831 int i;
1832
1833 for (i = 0; i < cxgb3_max_connect; i++) {
1834 if (cdata->sport_conn[i]) {
1835 c3cn = cdata->sport_conn[i];
1836 cdata->sport_conn[i] = NULL;
1837
1838 spin_lock_bh(&c3cn->lock);
1839 c3cn->cdev = NULL;
1840 c3cn_set_flag(c3cn, C3CN_OFFLOAD_DOWN);
1841 c3cn_closed(c3cn);
1842 spin_unlock_bh(&c3cn->lock);
1843 }
1844 }
1845
1846 for (i = 0; i < ports->nports; i++)
1847 NDEV2CDATA(ports->lldevs[i]) = NULL;
1848
1849 cxgb3i_free_big_mem(cdata);
1850}
1851
1852void cxgb3i_sdev_cleanup(void)
1853{
1854 struct cxgb3i_sdev_data *cdata;
1855
1856 write_lock(&cdata_rwlock);
1857 list_for_each_entry(cdata, &cdata_list, list) {
1858 list_del(&cdata->list);
1859 sdev_data_cleanup(cdata);
1860 }
1861 write_unlock(&cdata_rwlock);
1862}
1863
1864int cxgb3i_sdev_init(cxgb3_cpl_handler_func *cpl_handlers)
1865{
1866 cpl_handlers[CPL_ACT_ESTABLISH] = do_act_establish;
1867 cpl_handlers[CPL_ACT_OPEN_RPL] = do_act_open_rpl;
1868 cpl_handlers[CPL_PEER_CLOSE] = do_peer_close;
1869 cpl_handlers[CPL_ABORT_REQ_RSS] = do_abort_req;
1870 cpl_handlers[CPL_ABORT_RPL_RSS] = do_abort_rpl;
1871 cpl_handlers[CPL_CLOSE_CON_RPL] = do_close_con_rpl;
1872 cpl_handlers[CPL_TX_DMA_ACK] = do_wr_ack;
1873 cpl_handlers[CPL_ISCSI_HDR] = do_iscsi_hdr;
1874
1875 if (cxgb3_max_connect > CXGB3I_MAX_CONN)
1876 cxgb3_max_connect = CXGB3I_MAX_CONN;
1877 return 0;
1878}
1879
1880/**
1881 * cxgb3i_sdev_add - allocate and initialize resources for each adapter found
1882 * @cdev: t3cdev adapter
1883 * @client: cxgb3 driver client
1884 */
1885void cxgb3i_sdev_add(struct t3cdev *cdev, struct cxgb3_client *client)
1886{
1887 struct cxgb3i_sdev_data *cdata;
1888 struct ofld_page_info rx_page_info;
1889 unsigned int wr_len;
1890 int mapsize = cxgb3_max_connect * sizeof(struct s3_conn *);
1891 int i;
1892
1893 cdata = cxgb3i_alloc_big_mem(sizeof(*cdata) + mapsize, GFP_KERNEL);
1894 if (!cdata) {
1895 cxgb3i_log_warn("t3dev 0x%p, offload up, OOM %d.\n",
1896 cdev, mapsize);
1897 return;
1898 }
1899
1900 if (cdev->ctl(cdev, GET_WR_LEN, &wr_len) < 0 ||
1901 cdev->ctl(cdev, GET_PORTS, &cdata->ports) < 0 ||
1902 cdev->ctl(cdev, GET_RX_PAGE_INFO, &rx_page_info) < 0) {
1903 cxgb3i_log_warn("t3dev 0x%p, offload up, ioctl failed.\n",
1904 cdev);
1905 goto free_cdata;
1906 }
1907
1908 s3_init_wr_tab(wr_len);
1909
1910 spin_lock_init(&cdata->lock);
1911 INIT_LIST_HEAD(&cdata->list);
1912 cdata->cdev = cdev;
1913 cdata->client = client;
1914
1915 for (i = 0; i < cdata->ports.nports; i++)
1916 NDEV2CDATA(cdata->ports.lldevs[i]) = cdata;
1917
1918 write_lock(&cdata_rwlock);
1919 list_add_tail(&cdata->list, &cdata_list);
1920 write_unlock(&cdata_rwlock);
1921
1922 cxgb3i_log_info("t3dev 0x%p, offload up, added.\n", cdev);
1923 return;
1924
1925free_cdata:
1926 cxgb3i_free_big_mem(cdata);
1927}
1928
1929/**
1930 * cxgb3i_sdev_remove - free the allocated resources for the adapter
1931 * @cdev: t3cdev adapter
1932 */
1933void cxgb3i_sdev_remove(struct t3cdev *cdev)
1934{
1935 struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(cdev);
1936
1937 cxgb3i_log_info("t3dev 0x%p, offload down, remove.\n", cdev);
1938
1939 write_lock(&cdata_rwlock);
1940 list_del(&cdata->list);
1941 write_unlock(&cdata_rwlock);
1942
1943 sdev_data_cleanup(cdata);
1944}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.h b/drivers/scsi/cxgb3i/cxgb3i_offload.h
deleted file mode 100644
index 6a1d86b1fafe..000000000000
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.h
+++ /dev/null
@@ -1,243 +0,0 @@
1/*
2 * cxgb3i_offload.h: Chelsio S3xx iscsi offloaded tcp connection management
3 *
4 * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 *
11 * Written by: Dimitris Michailidis (dm@chelsio.com)
12 * Karen Xie (kxie@chelsio.com)
13 */
14
15#ifndef _CXGB3I_OFFLOAD_H
16#define _CXGB3I_OFFLOAD_H
17
18#include <linux/skbuff.h>
19#include <linux/in.h>
20
21#include "common.h"
22#include "adapter.h"
23#include "t3cdev.h"
24#include "cxgb3_offload.h"
25
26#define cxgb3i_log_error(fmt...) printk(KERN_ERR "cxgb3i: ERR! " fmt)
27#define cxgb3i_log_warn(fmt...) printk(KERN_WARNING "cxgb3i: WARN! " fmt)
28#define cxgb3i_log_info(fmt...) printk(KERN_INFO "cxgb3i: " fmt)
29#define cxgb3i_log_debug(fmt, args...) \
30 printk(KERN_INFO "cxgb3i: %s - " fmt, __func__ , ## args)
31
32/**
33 * struct s3_conn - an iscsi tcp connection structure
34 *
35 * @dev: net device of with connection
36 * @cdev: adapter t3cdev for net device
37 * @flags: see c3cn_flags below
38 * @tid: connection id assigned by the h/w
39 * @qset: queue set used by connection
40 * @mss_idx: Maximum Segment Size table index
41 * @l2t: ARP resolution entry for offload packets
42 * @wr_max: maximum in-flight writes
43 * @wr_avail: number of writes available
44 * @wr_unacked: writes since last request for completion notification
45 * @wr_pending_head: head of pending write queue
46 * @wr_pending_tail: tail of pending write queue
47 * @cpl_close: skb for cpl_close_req
48 * @cpl_abort_req: skb for cpl_abort_req
49 * @cpl_abort_rpl: skb for cpl_abort_rpl
50 * @lock: connection status lock
51 * @refcnt: reference count on connection
52 * @state: connection state
53 * @saddr: source ip/port address
54 * @daddr: destination ip/port address
55 * @dst_cache: reference to destination route
56 * @receive_queue: received PDUs
57 * @write_queue: un-pushed pending writes
58 * @retry_timer: retry timer for various operations
59 * @err: connection error status
60 * @callback_lock: lock for opaque user context
61 * @user_data: opaque user context
62 * @rcv_nxt: next receive seq. #
63 * @copied_seq: head of yet unread data
64 * @rcv_wup: rcv_nxt on last window update sent
65 * @snd_nxt: next sequence we send
66 * @snd_una: first byte we want an ack for
67 * @write_seq: tail+1 of data held in send buffer
68 */
69struct s3_conn {
70 struct net_device *dev;
71 struct t3cdev *cdev;
72 unsigned long flags;
73 int tid;
74 int qset;
75 int mss_idx;
76 struct l2t_entry *l2t;
77 int wr_max;
78 int wr_avail;
79 int wr_unacked;
80 struct sk_buff *wr_pending_head;
81 struct sk_buff *wr_pending_tail;
82 struct sk_buff *cpl_close;
83 struct sk_buff *cpl_abort_req;
84 struct sk_buff *cpl_abort_rpl;
85 spinlock_t lock;
86 atomic_t refcnt;
87 volatile unsigned int state;
88 struct sockaddr_in saddr;
89 struct sockaddr_in daddr;
90 struct dst_entry *dst_cache;
91 struct sk_buff_head receive_queue;
92 struct sk_buff_head write_queue;
93 struct timer_list retry_timer;
94 int err;
95 rwlock_t callback_lock;
96 void *user_data;
97
98 u32 rcv_nxt;
99 u32 copied_seq;
100 u32 rcv_wup;
101 u32 snd_nxt;
102 u32 snd_una;
103 u32 write_seq;
104};
105
106/*
107 * connection state
108 */
109enum conn_states {
110 C3CN_STATE_CONNECTING = 1,
111 C3CN_STATE_ESTABLISHED,
112 C3CN_STATE_ACTIVE_CLOSE,
113 C3CN_STATE_PASSIVE_CLOSE,
114 C3CN_STATE_CLOSE_WAIT_1,
115 C3CN_STATE_CLOSE_WAIT_2,
116 C3CN_STATE_ABORTING,
117 C3CN_STATE_CLOSED,
118};
119
120static inline unsigned int c3cn_is_closing(const struct s3_conn *c3cn)
121{
122 return c3cn->state >= C3CN_STATE_ACTIVE_CLOSE;
123}
124static inline unsigned int c3cn_is_established(const struct s3_conn *c3cn)
125{
126 return c3cn->state == C3CN_STATE_ESTABLISHED;
127}
128
129/*
130 * Connection flags -- many to track some close related events.
131 */
132enum c3cn_flags {
133 C3CN_ABORT_RPL_RCVD, /* received one ABORT_RPL_RSS message */
134 C3CN_ABORT_REQ_RCVD, /* received one ABORT_REQ_RSS message */
135 C3CN_ABORT_RPL_PENDING, /* expecting an abort reply */
136 C3CN_TX_DATA_SENT, /* already sent a TX_DATA WR */
137 C3CN_ACTIVE_CLOSE_NEEDED, /* need to be closed */
138 C3CN_OFFLOAD_DOWN /* offload function off */
139};
140
141/**
142 * cxgb3i_sdev_data - Per adapter data.
143 * Linked off of each Ethernet device port on the adapter.
144 * Also available via the t3cdev structure since we have pointers to our port
145 * net_device's there ...
146 *
147 * @list: list head to link elements
148 * @cdev: t3cdev adapter
149 * @client: CPL client pointer
150 * @ports: array of adapter ports
151 * @sport_next: next port
152 * @sport_conn: source port connection
153 */
154struct cxgb3i_sdev_data {
155 struct list_head list;
156 struct t3cdev *cdev;
157 struct cxgb3_client *client;
158 struct adap_ports ports;
159 spinlock_t lock;
160 unsigned int sport_next;
161 struct s3_conn *sport_conn[0];
162};
163#define NDEV2CDATA(ndev) (*(struct cxgb3i_sdev_data **)&(ndev)->ec_ptr)
164#define CXGB3_SDEV_DATA(cdev) NDEV2CDATA((cdev)->lldev)
165
166void cxgb3i_sdev_cleanup(void);
167int cxgb3i_sdev_init(cxgb3_cpl_handler_func *);
168void cxgb3i_sdev_add(struct t3cdev *, struct cxgb3_client *);
169void cxgb3i_sdev_remove(struct t3cdev *);
170
171struct s3_conn *cxgb3i_c3cn_create(void);
172int cxgb3i_c3cn_connect(struct net_device *, struct s3_conn *,
173 struct sockaddr_in *);
174void cxgb3i_c3cn_rx_credits(struct s3_conn *, int);
175int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *);
176void cxgb3i_c3cn_release(struct s3_conn *);
177
178/**
179 * cxgb3_skb_cb - control block for received pdu state and ULP mode management.
180 *
181 * @flag: see C3CB_FLAG_* below
182 * @ulp_mode: ULP mode/submode of sk_buff
183 * @seq: tcp sequence number
184 */
185struct cxgb3_skb_rx_cb {
186 __u32 ddigest; /* data digest */
187 __u32 pdulen; /* recovered pdu length */
188};
189
190struct cxgb3_skb_tx_cb {
191 struct sk_buff *wr_next; /* next wr */
192};
193
194struct cxgb3_skb_cb {
195 __u8 flags;
196 __u8 ulp_mode;
197 __u32 seq;
198 union {
199 struct cxgb3_skb_rx_cb rx;
200 struct cxgb3_skb_tx_cb tx;
201 };
202};
203
204#define CXGB3_SKB_CB(skb) ((struct cxgb3_skb_cb *)&((skb)->cb[0]))
205#define skb_flags(skb) (CXGB3_SKB_CB(skb)->flags)
206#define skb_ulp_mode(skb) (CXGB3_SKB_CB(skb)->ulp_mode)
207#define skb_tcp_seq(skb) (CXGB3_SKB_CB(skb)->seq)
208#define skb_rx_ddigest(skb) (CXGB3_SKB_CB(skb)->rx.ddigest)
209#define skb_rx_pdulen(skb) (CXGB3_SKB_CB(skb)->rx.pdulen)
210#define skb_tx_wr_next(skb) (CXGB3_SKB_CB(skb)->tx.wr_next)
211
212enum c3cb_flags {
213 C3CB_FLAG_NEED_HDR = 1 << 0, /* packet needs a TX_DATA_WR header */
214 C3CB_FLAG_NO_APPEND = 1 << 1, /* don't grow this skb */
215 C3CB_FLAG_COMPL = 1 << 2, /* request WR completion */
216};
217
218/**
219 * sge_opaque_hdr -
220 * Opaque version of structure the SGE stores at skb->head of TX_DATA packets
221 * and for which we must reserve space.
222 */
223struct sge_opaque_hdr {
224 void *dev;
225 dma_addr_t addr[MAX_SKB_FRAGS + 1];
226};
227
228/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
229#define TX_HEADER_LEN \
230 (sizeof(struct tx_data_wr) + sizeof(struct sge_opaque_hdr))
231#define SKB_TX_HEADROOM SKB_MAX_HEAD(TX_HEADER_LEN)
232
233/*
234 * get and set private ip for iscsi traffic
235 */
236#define cxgb3i_get_private_ipv4addr(ndev) \
237 (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr)
238#define cxgb3i_set_private_ipv4addr(ndev, addr) \
239 (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr) = addr
240
241/* max. connections per adapter */
242#define CXGB3I_MAX_CONN 16384
243#endif /* _CXGB3_OFFLOAD_H */
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.c b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
deleted file mode 100644
index dc5e3e77a351..000000000000
--- a/drivers/scsi/cxgb3i/cxgb3i_pdu.c
+++ /dev/null
@@ -1,495 +0,0 @@
1/*
2 * cxgb3i_pdu.c: Chelsio S3xx iSCSI driver.
3 *
4 * Copyright (c) 2008 Chelsio Communications, Inc.
5 * Copyright (c) 2008 Mike Christie
6 * Copyright (c) 2008 Red Hat, Inc. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 *
12 * Written by: Karen Xie (kxie@chelsio.com)
13 */
14
15#include <linux/slab.h>
16#include <linux/skbuff.h>
17#include <linux/crypto.h>
18#include <scsi/scsi_cmnd.h>
19#include <scsi/scsi_host.h>
20
21#include "cxgb3i.h"
22#include "cxgb3i_pdu.h"
23
24#ifdef __DEBUG_CXGB3I_RX__
25#define cxgb3i_rx_debug cxgb3i_log_debug
26#else
27#define cxgb3i_rx_debug(fmt...)
28#endif
29
30#ifdef __DEBUG_CXGB3I_TX__
31#define cxgb3i_tx_debug cxgb3i_log_debug
32#else
33#define cxgb3i_tx_debug(fmt...)
34#endif
35
36/* always allocate rooms for AHS */
37#define SKB_TX_PDU_HEADER_LEN \
38 (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE)
39static unsigned int skb_extra_headroom;
40static struct page *pad_page;
41
42/*
43 * pdu receive, interact with libiscsi_tcp
44 */
45static inline int read_pdu_skb(struct iscsi_conn *conn, struct sk_buff *skb,
46 unsigned int offset, int offloaded)
47{
48 int status = 0;
49 int bytes_read;
50
51 bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status);
52 switch (status) {
53 case ISCSI_TCP_CONN_ERR:
54 return -EIO;
55 case ISCSI_TCP_SUSPENDED:
56 /* no transfer - just have caller flush queue */
57 return bytes_read;
58 case ISCSI_TCP_SKB_DONE:
59 /*
60 * pdus should always fit in the skb and we should get
61 * segment done notifcation.
62 */
63 iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb.");
64 return -EFAULT;
65 case ISCSI_TCP_SEGMENT_DONE:
66 return bytes_read;
67 default:
68 iscsi_conn_printk(KERN_ERR, conn, "Invalid iscsi_tcp_recv_skb "
69 "status %d\n", status);
70 return -EINVAL;
71 }
72}
73
74static int cxgb3i_conn_read_pdu_skb(struct iscsi_conn *conn,
75 struct sk_buff *skb)
76{
77 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
78 bool offloaded = 0;
79 unsigned int offset;
80 int rc;
81
82 cxgb3i_rx_debug("conn 0x%p, skb 0x%p, len %u, flag 0x%x.\n",
83 conn, skb, skb->len, skb_ulp_mode(skb));
84
85 if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) {
86 iscsi_conn_failure(conn, ISCSI_ERR_PROTO);
87 return -EIO;
88 }
89
90 if (conn->hdrdgst_en && (skb_ulp_mode(skb) & ULP2_FLAG_HCRC_ERROR)) {
91 iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST);
92 return -EIO;
93 }
94
95 if (conn->datadgst_en && (skb_ulp_mode(skb) & ULP2_FLAG_DCRC_ERROR)) {
96 iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
97 return -EIO;
98 }
99
100 /* iscsi hdr */
101 rc = read_pdu_skb(conn, skb, 0, 0);
102 if (rc <= 0)
103 return rc;
104
105 if (iscsi_tcp_recv_segment_is_hdr(tcp_conn))
106 return 0;
107
108 offset = rc;
109 if (conn->hdrdgst_en)
110 offset += ISCSI_DIGEST_SIZE;
111
112 /* iscsi data */
113 if (skb_ulp_mode(skb) & ULP2_FLAG_DATA_DDPED) {
114 cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, ddp'ed, "
115 "itt 0x%x.\n",
116 skb,
117 tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK,
118 tcp_conn->in.datalen,
119 ntohl(tcp_conn->in.hdr->itt));
120 offloaded = 1;
121 } else {
122 cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, NOT ddp'ed, "
123 "itt 0x%x.\n",
124 skb,
125 tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK,
126 tcp_conn->in.datalen,
127 ntohl(tcp_conn->in.hdr->itt));
128 offset += sizeof(struct cpl_iscsi_hdr_norss);
129 }
130
131 rc = read_pdu_skb(conn, skb, offset, offloaded);
132 if (rc < 0)
133 return rc;
134 else
135 return 0;
136}
137
138/*
139 * pdu transmit, interact with libiscsi_tcp
140 */
141static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
142{
143 u8 submode = 0;
144
145 if (hcrc)
146 submode |= 1;
147 if (dcrc)
148 submode |= 2;
149 skb_ulp_mode(skb) = (ULP_MODE_ISCSI << 4) | submode;
150}
151
152void cxgb3i_conn_cleanup_task(struct iscsi_task *task)
153{
154 struct cxgb3i_task_data *tdata = task->dd_data +
155 sizeof(struct iscsi_tcp_task);
156
157 /* never reached the xmit task callout */
158 if (tdata->skb)
159 __kfree_skb(tdata->skb);
160 memset(tdata, 0, sizeof(struct cxgb3i_task_data));
161
162 /* MNC - Do we need a check in case this is called but
163 * cxgb3i_conn_alloc_pdu has never been called on the task */
164 cxgb3i_release_itt(task, task->hdr_itt);
165 iscsi_tcp_cleanup_task(task);
166}
167
168static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt,
169 unsigned int offset, unsigned int *off,
170 struct scatterlist **sgp)
171{
172 int i;
173 struct scatterlist *sg;
174
175 for_each_sg(sgl, sg, sgcnt, i) {
176 if (offset < sg->length) {
177 *off = offset;
178 *sgp = sg;
179 return 0;
180 }
181 offset -= sg->length;
182 }
183 return -EFAULT;
184}
185
186static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
187 unsigned int dlen, skb_frag_t *frags,
188 int frag_max)
189{
190 unsigned int datalen = dlen;
191 unsigned int sglen = sg->length - sgoffset;
192 struct page *page = sg_page(sg);
193 int i;
194
195 i = 0;
196 do {
197 unsigned int copy;
198
199 if (!sglen) {
200 sg = sg_next(sg);
201 if (!sg) {
202 cxgb3i_log_error("%s, sg NULL, len %u/%u.\n",
203 __func__, datalen, dlen);
204 return -EINVAL;
205 }
206 sgoffset = 0;
207 sglen = sg->length;
208 page = sg_page(sg);
209
210 }
211 copy = min(datalen, sglen);
212 if (i && page == frags[i - 1].page &&
213 sgoffset + sg->offset ==
214 frags[i - 1].page_offset + frags[i - 1].size) {
215 frags[i - 1].size += copy;
216 } else {
217 if (i >= frag_max) {
218 cxgb3i_log_error("%s, too many pages %u, "
219 "dlen %u.\n", __func__,
220 frag_max, dlen);
221 return -EINVAL;
222 }
223
224 frags[i].page = page;
225 frags[i].page_offset = sg->offset + sgoffset;
226 frags[i].size = copy;
227 i++;
228 }
229 datalen -= copy;
230 sgoffset += copy;
231 sglen -= copy;
232 } while (datalen);
233
234 return i;
235}
236
237int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
238{
239 struct iscsi_conn *conn = task->conn;
240 struct iscsi_tcp_task *tcp_task = task->dd_data;
241 struct cxgb3i_task_data *tdata = task->dd_data + sizeof(*tcp_task);
242 struct scsi_cmnd *sc = task->sc;
243 int headroom = SKB_TX_PDU_HEADER_LEN;
244
245 tcp_task->dd_data = tdata;
246 task->hdr = NULL;
247
248 /* write command, need to send data pdus */
249 if (skb_extra_headroom && (opcode == ISCSI_OP_SCSI_DATA_OUT ||
250 (opcode == ISCSI_OP_SCSI_CMD &&
251 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE))))
252 headroom += min(skb_extra_headroom, conn->max_xmit_dlength);
253
254 tdata->skb = alloc_skb(TX_HEADER_LEN + headroom, GFP_ATOMIC);
255 if (!tdata->skb)
256 return -ENOMEM;
257 skb_reserve(tdata->skb, TX_HEADER_LEN);
258
259 cxgb3i_tx_debug("task 0x%p, opcode 0x%x, skb 0x%p.\n",
260 task, opcode, tdata->skb);
261
262 task->hdr = (struct iscsi_hdr *)tdata->skb->data;
263 task->hdr_max = SKB_TX_PDU_HEADER_LEN;
264
265 /* data_out uses scsi_cmd's itt */
266 if (opcode != ISCSI_OP_SCSI_DATA_OUT)
267 cxgb3i_reserve_itt(task, &task->hdr->itt);
268
269 return 0;
270}
271
272int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
273 unsigned int count)
274{
275 struct iscsi_conn *conn = task->conn;
276 struct iscsi_tcp_task *tcp_task = task->dd_data;
277 struct cxgb3i_task_data *tdata = tcp_task->dd_data;
278 struct sk_buff *skb = tdata->skb;
279 unsigned int datalen = count;
280 int i, padlen = iscsi_padding(count);
281 struct page *pg;
282
283 cxgb3i_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n",
284 task, task->sc, offset, count, skb);
285
286 skb_put(skb, task->hdr_len);
287 tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0);
288 if (!count)
289 return 0;
290
291 if (task->sc) {
292 struct scsi_data_buffer *sdb = scsi_out(task->sc);
293 struct scatterlist *sg = NULL;
294 int err;
295
296 tdata->offset = offset;
297 tdata->count = count;
298 err = sgl_seek_offset(sdb->table.sgl, sdb->table.nents,
299 tdata->offset, &tdata->sgoffset, &sg);
300 if (err < 0) {
301 cxgb3i_log_warn("tpdu, sgl %u, bad offset %u/%u.\n",
302 sdb->table.nents, tdata->offset,
303 sdb->length);
304 return err;
305 }
306 err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count,
307 tdata->frags, MAX_PDU_FRAGS);
308 if (err < 0) {
309 cxgb3i_log_warn("tpdu, sgl %u, bad offset %u + %u.\n",
310 sdb->table.nents, tdata->offset,
311 tdata->count);
312 return err;
313 }
314 tdata->nr_frags = err;
315
316 if (tdata->nr_frags > MAX_SKB_FRAGS ||
317 (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) {
318 char *dst = skb->data + task->hdr_len;
319 skb_frag_t *frag = tdata->frags;
320
321 /* data fits in the skb's headroom */
322 for (i = 0; i < tdata->nr_frags; i++, frag++) {
323 char *src = kmap_atomic(frag->page,
324 KM_SOFTIRQ0);
325
326 memcpy(dst, src+frag->page_offset, frag->size);
327 dst += frag->size;
328 kunmap_atomic(src, KM_SOFTIRQ0);
329 }
330 if (padlen) {
331 memset(dst, 0, padlen);
332 padlen = 0;
333 }
334 skb_put(skb, count + padlen);
335 } else {
336 /* data fit into frag_list */
337 for (i = 0; i < tdata->nr_frags; i++)
338 get_page(tdata->frags[i].page);
339
340 memcpy(skb_shinfo(skb)->frags, tdata->frags,
341 sizeof(skb_frag_t) * tdata->nr_frags);
342 skb_shinfo(skb)->nr_frags = tdata->nr_frags;
343 skb->len += count;
344 skb->data_len += count;
345 skb->truesize += count;
346 }
347
348 } else {
349 pg = virt_to_page(task->data);
350
351 get_page(pg);
352 skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data),
353 count);
354 skb->len += count;
355 skb->data_len += count;
356 skb->truesize += count;
357 }
358
359 if (padlen) {
360 i = skb_shinfo(skb)->nr_frags;
361 get_page(pad_page);
362 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, pad_page, 0,
363 padlen);
364
365 skb->data_len += padlen;
366 skb->truesize += padlen;
367 skb->len += padlen;
368 }
369
370 return 0;
371}
372
373int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
374{
375 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
376 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
377 struct iscsi_tcp_task *tcp_task = task->dd_data;
378 struct cxgb3i_task_data *tdata = tcp_task->dd_data;
379 struct sk_buff *skb = tdata->skb;
380 unsigned int datalen;
381 int err;
382
383 if (!skb)
384 return 0;
385
386 datalen = skb->data_len;
387 tdata->skb = NULL;
388 err = cxgb3i_c3cn_send_pdus(cconn->cep->c3cn, skb);
389 if (err > 0) {
390 int pdulen = err;
391
392 cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
393 task, skb, skb->len, skb->data_len, err);
394
395 if (task->conn->hdrdgst_en)
396 pdulen += ISCSI_DIGEST_SIZE;
397 if (datalen && task->conn->datadgst_en)
398 pdulen += ISCSI_DIGEST_SIZE;
399
400 task->conn->txdata_octets += pdulen;
401 return 0;
402 }
403
404 if (err == -EAGAIN || err == -ENOBUFS) {
405 /* reset skb to send when we are called again */
406 tdata->skb = skb;
407 return err;
408 }
409
410 kfree_skb(skb);
411 cxgb3i_tx_debug("itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
412 task->itt, skb, skb->len, skb->data_len, err);
413 iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
414 iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
415 return err;
416}
417
418int cxgb3i_pdu_init(void)
419{
420 if (SKB_TX_HEADROOM > (512 * MAX_SKB_FRAGS))
421 skb_extra_headroom = SKB_TX_HEADROOM;
422 pad_page = alloc_page(GFP_KERNEL);
423 if (!pad_page)
424 return -ENOMEM;
425 memset(page_address(pad_page), 0, PAGE_SIZE);
426 return 0;
427}
428
429void cxgb3i_pdu_cleanup(void)
430{
431 if (pad_page) {
432 __free_page(pad_page);
433 pad_page = NULL;
434 }
435}
436
437void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn)
438{
439 struct sk_buff *skb;
440 unsigned int read = 0;
441 struct iscsi_conn *conn = c3cn->user_data;
442 int err = 0;
443
444 cxgb3i_rx_debug("cn 0x%p.\n", c3cn);
445
446 read_lock(&c3cn->callback_lock);
447 if (unlikely(!conn || conn->suspend_rx)) {
448 cxgb3i_rx_debug("conn 0x%p, id %d, suspend_rx %lu!\n",
449 conn, conn ? conn->id : 0xFF,
450 conn ? conn->suspend_rx : 0xFF);
451 read_unlock(&c3cn->callback_lock);
452 return;
453 }
454 skb = skb_peek(&c3cn->receive_queue);
455 while (!err && skb) {
456 __skb_unlink(skb, &c3cn->receive_queue);
457 read += skb_rx_pdulen(skb);
458 cxgb3i_rx_debug("conn 0x%p, cn 0x%p, rx skb 0x%p, pdulen %u.\n",
459 conn, c3cn, skb, skb_rx_pdulen(skb));
460 err = cxgb3i_conn_read_pdu_skb(conn, skb);
461 __kfree_skb(skb);
462 skb = skb_peek(&c3cn->receive_queue);
463 }
464 read_unlock(&c3cn->callback_lock);
465 c3cn->copied_seq += read;
466 cxgb3i_c3cn_rx_credits(c3cn, read);
467 conn->rxdata_octets += read;
468
469 if (err) {
470 cxgb3i_log_info("conn 0x%p rx failed err %d.\n", conn, err);
471 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
472 }
473}
474
475void cxgb3i_conn_tx_open(struct s3_conn *c3cn)
476{
477 struct iscsi_conn *conn = c3cn->user_data;
478
479 cxgb3i_tx_debug("cn 0x%p.\n", c3cn);
480 if (conn) {
481 cxgb3i_tx_debug("cn 0x%p, cid %d.\n", c3cn, conn->id);
482 iscsi_conn_queue_work(conn);
483 }
484}
485
486void cxgb3i_conn_closing(struct s3_conn *c3cn)
487{
488 struct iscsi_conn *conn;
489
490 read_lock(&c3cn->callback_lock);
491 conn = c3cn->user_data;
492 if (conn && c3cn->state != C3CN_STATE_ESTABLISHED)
493 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
494 read_unlock(&c3cn->callback_lock);
495}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.h b/drivers/scsi/cxgb3i/cxgb3i_pdu.h
deleted file mode 100644
index 0770b23d90da..000000000000
--- a/drivers/scsi/cxgb3i/cxgb3i_pdu.h
+++ /dev/null
@@ -1,59 +0,0 @@
1/*
2 * cxgb3i_ulp2.h: Chelsio S3xx iSCSI driver.
3 *
4 * Copyright (c) 2008 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 */
12
13#ifndef __CXGB3I_ULP2_PDU_H__
14#define __CXGB3I_ULP2_PDU_H__
15
16struct cpl_iscsi_hdr_norss {
17 union opcode_tid ot;
18 u16 pdu_len_ddp;
19 u16 len;
20 u32 seq;
21 u16 urg;
22 u8 rsvd;
23 u8 status;
24};
25
26struct cpl_rx_data_ddp_norss {
27 union opcode_tid ot;
28 u16 urg;
29 u16 len;
30 u32 seq;
31 u32 nxt_seq;
32 u32 ulp_crc;
33 u32 ddp_status;
34};
35
36#define RX_DDP_STATUS_IPP_SHIFT 27 /* invalid pagepod */
37#define RX_DDP_STATUS_TID_SHIFT 26 /* tid mismatch */
38#define RX_DDP_STATUS_COLOR_SHIFT 25 /* color mismatch */
39#define RX_DDP_STATUS_OFFSET_SHIFT 24 /* offset mismatch */
40#define RX_DDP_STATUS_ULIMIT_SHIFT 23 /* ulimit error */
41#define RX_DDP_STATUS_TAG_SHIFT 22 /* tag mismatch */
42#define RX_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */
43#define RX_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */
44#define RX_DDP_STATUS_PAD_SHIFT 19 /* pad error */
45#define RX_DDP_STATUS_PPP_SHIFT 18 /* pagepod parity error */
46#define RX_DDP_STATUS_LLIMIT_SHIFT 17 /* llimit error */
47#define RX_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */
48#define RX_DDP_STATUS_PMM_SHIFT 15 /* pagepod mismatch */
49
50#define ULP2_FLAG_DATA_READY 0x1
51#define ULP2_FLAG_DATA_DDPED 0x2
52#define ULP2_FLAG_HCRC_ERROR 0x10
53#define ULP2_FLAG_DCRC_ERROR 0x20
54#define ULP2_FLAG_PAD_ERROR 0x40
55
56void cxgb3i_conn_closing(struct s3_conn *c3cn);
57void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn);
58void cxgb3i_conn_tx_open(struct s3_conn *c3cn);
59#endif
diff --git a/drivers/scsi/cxgbi/Kconfig b/drivers/scsi/cxgbi/Kconfig
new file mode 100644
index 000000000000..17eb5d522f42
--- /dev/null
+++ b/drivers/scsi/cxgbi/Kconfig
@@ -0,0 +1,2 @@
1source "drivers/scsi/cxgbi/cxgb3i/Kconfig"
2source "drivers/scsi/cxgbi/cxgb4i/Kconfig"
diff --git a/drivers/scsi/cxgbi/Makefile b/drivers/scsi/cxgbi/Makefile
new file mode 100644
index 000000000000..86007e344955
--- /dev/null
+++ b/drivers/scsi/cxgbi/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libcxgbi.o cxgb3i/
2obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libcxgbi.o cxgb4i/
diff --git a/drivers/scsi/cxgb3i/Kbuild b/drivers/scsi/cxgbi/cxgb3i/Kbuild
index 70d060b7ff4f..09dbf9efc8ea 100644
--- a/drivers/scsi/cxgb3i/Kbuild
+++ b/drivers/scsi/cxgbi/cxgb3i/Kbuild
@@ -1,4 +1,3 @@
1EXTRA_CFLAGS += -I$(srctree)/drivers/net/cxgb3 1EXTRA_CFLAGS += -I$(srctree)/drivers/net/cxgb3
2 2
3cxgb3i-y := cxgb3i_init.o cxgb3i_iscsi.o cxgb3i_pdu.o cxgb3i_offload.o cxgb3i_ddp.o
4obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i.o 3obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i.o
diff --git a/drivers/scsi/cxgb3i/Kconfig b/drivers/scsi/cxgbi/cxgb3i/Kconfig
index bfdcaf5c9c57..5cf4e9831f1b 100644
--- a/drivers/scsi/cxgb3i/Kconfig
+++ b/drivers/scsi/cxgbi/cxgb3i/Kconfig
@@ -1,7 +1,7 @@
1config SCSI_CXGB3_ISCSI 1config SCSI_CXGB3_ISCSI
2 tristate "Chelsio S3xx iSCSI support" 2 tristate "Chelsio T3 iSCSI support"
3 depends on CHELSIO_T3_DEPENDS 3 depends on CHELSIO_T3_DEPENDS
4 select CHELSIO_T3 4 select CHELSIO_T3
5 select SCSI_ISCSI_ATTRS 5 select SCSI_ISCSI_ATTRS
6 ---help--- 6 ---help---
7 This driver supports iSCSI offload for the Chelsio S3 series devices. 7 This driver supports iSCSI offload for the Chelsio T3 devices.
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
new file mode 100644
index 000000000000..a129a170b47b
--- /dev/null
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -0,0 +1,1465 @@
1/*
2 * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management
3 *
4 * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 *
11 * Written by: Dimitris Michailidis (dm@chelsio.com)
12 * Karen Xie (kxie@chelsio.com)
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
16
17#include <linux/version.h>
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <scsi/scsi_host.h>
21
22#include "common.h"
23#include "t3_cpl.h"
24#include "t3cdev.h"
25#include "cxgb3_defs.h"
26#include "cxgb3_ctl_defs.h"
27#include "cxgb3_offload.h"
28#include "firmware_exports.h"
29#include "cxgb3i.h"
30
31static unsigned int dbg_level;
32#include "../libcxgbi.h"
33
34#define DRV_MODULE_NAME "cxgb3i"
35#define DRV_MODULE_DESC "Chelsio T3 iSCSI Driver"
36#define DRV_MODULE_VERSION "2.0.0"
37#define DRV_MODULE_RELDATE "Jun. 2010"
38
39static char version[] =
40 DRV_MODULE_DESC " " DRV_MODULE_NAME
41 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
42
43MODULE_AUTHOR("Chelsio Communications, Inc.");
44MODULE_DESCRIPTION(DRV_MODULE_DESC);
45MODULE_VERSION(DRV_MODULE_VERSION);
46MODULE_LICENSE("GPL");
47
48module_param(dbg_level, uint, 0644);
49MODULE_PARM_DESC(dbg_level, "debug flag (default=0)");
50
51static int cxgb3i_rcv_win = 256 * 1024;
52module_param(cxgb3i_rcv_win, int, 0644);
53MODULE_PARM_DESC(cxgb3i_rcv_win, "TCP receive window in bytes (default=256KB)");
54
55static int cxgb3i_snd_win = 128 * 1024;
56module_param(cxgb3i_snd_win, int, 0644);
57MODULE_PARM_DESC(cxgb3i_snd_win, "TCP send window in bytes (default=128KB)");
58
59static int cxgb3i_rx_credit_thres = 10 * 1024;
60module_param(cxgb3i_rx_credit_thres, int, 0644);
61MODULE_PARM_DESC(rx_credit_thres,
62 "RX credits return threshold in bytes (default=10KB)");
63
64static unsigned int cxgb3i_max_connect = 8 * 1024;
65module_param(cxgb3i_max_connect, uint, 0644);
66MODULE_PARM_DESC(cxgb3i_max_connect, "Max. # of connections (default=8092)");
67
68static unsigned int cxgb3i_sport_base = 20000;
69module_param(cxgb3i_sport_base, uint, 0644);
70MODULE_PARM_DESC(cxgb3i_sport_base, "starting port number (default=20000)");
71
72static void cxgb3i_dev_open(struct t3cdev *);
73static void cxgb3i_dev_close(struct t3cdev *);
74static void cxgb3i_dev_event_handler(struct t3cdev *, u32, u32);
75
76static struct cxgb3_client t3_client = {
77 .name = DRV_MODULE_NAME,
78 .handlers = cxgb3i_cpl_handlers,
79 .add = cxgb3i_dev_open,
80 .remove = cxgb3i_dev_close,
81 .event_handler = cxgb3i_dev_event_handler,
82};
83
84static struct scsi_host_template cxgb3i_host_template = {
85 .module = THIS_MODULE,
86 .name = DRV_MODULE_NAME,
87 .proc_name = DRV_MODULE_NAME,
88 .can_queue = CXGB3I_SCSI_HOST_QDEPTH,
89 .queuecommand = iscsi_queuecommand,
90 .change_queue_depth = iscsi_change_queue_depth,
91 .sg_tablesize = SG_ALL,
92 .max_sectors = 0xFFFF,
93 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
94 .eh_abort_handler = iscsi_eh_abort,
95 .eh_device_reset_handler = iscsi_eh_device_reset,
96 .eh_target_reset_handler = iscsi_eh_recover_target,
97 .target_alloc = iscsi_target_alloc,
98 .use_clustering = DISABLE_CLUSTERING,
99 .this_id = -1,
100};
101
102static struct iscsi_transport cxgb3i_iscsi_transport = {
103 .owner = THIS_MODULE,
104 .name = DRV_MODULE_NAME,
105 /* owner and name should be set already */
106 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
107 | CAP_DATADGST | CAP_DIGEST_OFFLOAD |
108 CAP_PADDING_OFFLOAD,
109 .param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH |
110 ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN |
111 ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T |
112 ISCSI_IMM_DATA_EN | ISCSI_FIRST_BURST |
113 ISCSI_MAX_BURST | ISCSI_PDU_INORDER_EN |
114 ISCSI_DATASEQ_INORDER_EN | ISCSI_ERL |
115 ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS |
116 ISCSI_EXP_STATSN | ISCSI_PERSISTENT_PORT |
117 ISCSI_PERSISTENT_ADDRESS |
118 ISCSI_TARGET_NAME | ISCSI_TPGT |
119 ISCSI_USERNAME | ISCSI_PASSWORD |
120 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
121 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
122 ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
123 ISCSI_PING_TMO | ISCSI_RECV_TMO |
124 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
125 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
126 ISCSI_HOST_INITIATOR_NAME |
127 ISCSI_HOST_NETDEV_NAME,
128 .get_host_param = cxgbi_get_host_param,
129 .set_host_param = cxgbi_set_host_param,
130 /* session management */
131 .create_session = cxgbi_create_session,
132 .destroy_session = cxgbi_destroy_session,
133 .get_session_param = iscsi_session_get_param,
134 /* connection management */
135 .create_conn = cxgbi_create_conn,
136 .bind_conn = cxgbi_bind_conn,
137 .destroy_conn = iscsi_tcp_conn_teardown,
138 .start_conn = iscsi_conn_start,
139 .stop_conn = iscsi_conn_stop,
140 .get_conn_param = cxgbi_get_conn_param,
141 .set_param = cxgbi_set_conn_param,
142 .get_stats = cxgbi_get_conn_stats,
143 /* pdu xmit req from user space */
144 .send_pdu = iscsi_conn_send_pdu,
145 /* task */
146 .init_task = iscsi_tcp_task_init,
147 .xmit_task = iscsi_tcp_task_xmit,
148 .cleanup_task = cxgbi_cleanup_task,
149 /* pdu */
150 .alloc_pdu = cxgbi_conn_alloc_pdu,
151 .init_pdu = cxgbi_conn_init_pdu,
152 .xmit_pdu = cxgbi_conn_xmit_pdu,
153 .parse_pdu_itt = cxgbi_parse_pdu_itt,
154 /* TCP connect/disconnect */
155 .ep_connect = cxgbi_ep_connect,
156 .ep_poll = cxgbi_ep_poll,
157 .ep_disconnect = cxgbi_ep_disconnect,
158 /* Error recovery timeout call */
159 .session_recovery_timedout = iscsi_session_recovery_timedout,
160};
161
162static struct scsi_transport_template *cxgb3i_stt;
163
164/*
165 * CPL (Chelsio Protocol Language) defines a message passing interface between
166 * the host driver and Chelsio asic.
167 * The section below implments CPLs that related to iscsi tcp connection
168 * open/close/abort and data send/receive.
169 */
170
171static int push_tx_frames(struct cxgbi_sock *csk, int req_completion);
172
173static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
174 const struct l2t_entry *e)
175{
176 unsigned int wscale = cxgbi_sock_compute_wscale(cxgb3i_rcv_win);
177 struct cpl_act_open_req *req = (struct cpl_act_open_req *)skb->head;
178
179 skb->priority = CPL_PRIORITY_SETUP;
180
181 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
182 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, csk->atid));
183 req->local_port = csk->saddr.sin_port;
184 req->peer_port = csk->daddr.sin_port;
185 req->local_ip = csk->saddr.sin_addr.s_addr;
186 req->peer_ip = csk->daddr.sin_addr.s_addr;
187
188 req->opt0h = htonl(V_KEEP_ALIVE(1) | F_TCAM_BYPASS |
189 V_WND_SCALE(wscale) | V_MSS_IDX(csk->mss_idx) |
190 V_L2T_IDX(e->idx) | V_TX_CHANNEL(e->smt_idx));
191 req->opt0l = htonl(V_ULP_MODE(ULP2_MODE_ISCSI) |
192 V_RCV_BUFSIZ(cxgb3i_rcv_win>>10));
193
194 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
195 "csk 0x%p,%u,0x%lx,%u, %pI4:%u-%pI4:%u, %u,%u,%u.\n",
196 csk, csk->state, csk->flags, csk->atid,
197 &req->local_ip, ntohs(req->local_port),
198 &req->peer_ip, ntohs(req->peer_port),
199 csk->mss_idx, e->idx, e->smt_idx);
200
201 l2t_send(csk->cdev->lldev, skb, csk->l2t);
202}
203
204static inline void act_open_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
205{
206 cxgbi_sock_act_open_req_arp_failure(NULL, skb);
207}
208
209/*
210 * CPL connection close request: host ->
211 *
212 * Close a connection by sending a CPL_CLOSE_CON_REQ message and queue it to
213 * the write queue (i.e., after any unsent txt data).
214 */
215static void send_close_req(struct cxgbi_sock *csk)
216{
217 struct sk_buff *skb = csk->cpl_close;
218 struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
219 unsigned int tid = csk->tid;
220
221 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
222 "csk 0x%p,%u,0x%lx,%u.\n",
223 csk, csk->state, csk->flags, csk->tid);
224
225 csk->cpl_close = NULL;
226 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
227 req->wr.wr_lo = htonl(V_WR_TID(tid));
228 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
229 req->rsvd = htonl(csk->write_seq);
230
231 cxgbi_sock_skb_entail(csk, skb);
232 if (csk->state >= CTP_ESTABLISHED)
233 push_tx_frames(csk, 1);
234}
235
236/*
237 * CPL connection abort request: host ->
238 *
239 * Send an ABORT_REQ message. Makes sure we do not send multiple ABORT_REQs
240 * for the same connection and also that we do not try to send a message
241 * after the connection has closed.
242 */
243static void abort_arp_failure(struct t3cdev *tdev, struct sk_buff *skb)
244{
245 struct cpl_abort_req *req = cplhdr(skb);
246
247 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
248 "t3dev 0x%p, tid %u, skb 0x%p.\n",
249 tdev, GET_TID(req), skb);
250 req->cmd = CPL_ABORT_NO_RST;
251 cxgb3_ofld_send(tdev, skb);
252}
253
254static void send_abort_req(struct cxgbi_sock *csk)
255{
256 struct sk_buff *skb = csk->cpl_abort_req;
257 struct cpl_abort_req *req;
258
259 if (unlikely(csk->state == CTP_ABORTING || !skb))
260 return;
261 cxgbi_sock_set_state(csk, CTP_ABORTING);
262 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
263 /* Purge the send queue so we don't send anything after an abort. */
264 cxgbi_sock_purge_write_queue(csk);
265
266 csk->cpl_abort_req = NULL;
267 req = (struct cpl_abort_req *)skb->head;
268 skb->priority = CPL_PRIORITY_DATA;
269 set_arp_failure_handler(skb, abort_arp_failure);
270 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
271 req->wr.wr_lo = htonl(V_WR_TID(csk->tid));
272 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
273 req->rsvd0 = htonl(csk->snd_nxt);
274 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
275 req->cmd = CPL_ABORT_SEND_RST;
276
277 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
278 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
279 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
280 req->rsvd1);
281
282 l2t_send(csk->cdev->lldev, skb, csk->l2t);
283}
284
285/*
286 * CPL connection abort reply: host ->
287 *
288 * Send an ABORT_RPL message in response of the ABORT_REQ received.
289 */
290static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
291{
292 struct sk_buff *skb = csk->cpl_abort_rpl;
293 struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
294
295 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
296 "csk 0x%p,%u,0x%lx,%u, status %d.\n",
297 csk, csk->state, csk->flags, csk->tid, rst_status);
298
299 csk->cpl_abort_rpl = NULL;
300 skb->priority = CPL_PRIORITY_DATA;
301 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
302 rpl->wr.wr_lo = htonl(V_WR_TID(csk->tid));
303 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
304 rpl->cmd = rst_status;
305 cxgb3_ofld_send(csk->cdev->lldev, skb);
306}
307
308/*
309 * CPL connection rx data ack: host ->
310 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
311 * credits sent.
312 */
313static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
314{
315 struct sk_buff *skb;
316 struct cpl_rx_data_ack *req;
317 u32 dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1);
318
319 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
320 "csk 0x%p,%u,0x%lx,%u, credit %u, dack %u.\n",
321 csk, csk->state, csk->flags, csk->tid, credits, dack);
322
323 skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
324 if (!skb) {
325 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
326 return 0;
327 }
328 req = (struct cpl_rx_data_ack *)skb->head;
329 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
330 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, csk->tid));
331 req->credit_dack = htonl(F_RX_DACK_CHANGE | V_RX_DACK_MODE(1) |
332 V_RX_CREDITS(credits));
333 skb->priority = CPL_PRIORITY_ACK;
334 cxgb3_ofld_send(csk->cdev->lldev, skb);
335 return credits;
336}
337
338/*
339 * CPL connection tx data: host ->
340 *
341 * Send iscsi PDU via TX_DATA CPL message. Returns the number of
342 * credits sent.
343 * Each TX_DATA consumes work request credit (wrs), so we need to keep track of
344 * how many we've used so far and how many are pending (i.e., yet ack'ed by T3).
345 */
346
347static unsigned int wrlen __read_mostly;
348static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly;
349
350static void init_wr_tab(unsigned int wr_len)
351{
352 int i;
353
354 if (skb_wrs[1]) /* already initialized */
355 return;
356 for (i = 1; i < SKB_WR_LIST_SIZE; i++) {
357 int sgl_len = (3 * i) / 2 + (i & 1);
358
359 sgl_len += 3;
360 skb_wrs[i] = (sgl_len <= wr_len
361 ? 1 : 1 + (sgl_len - 2) / (wr_len - 1));
362 }
363 wrlen = wr_len * 8;
364}
365
366static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
367 int len, int req_completion)
368{
369 struct tx_data_wr *req;
370 struct l2t_entry *l2t = csk->l2t;
371
372 skb_reset_transport_header(skb);
373 req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req));
374 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) |
375 (req_completion ? F_WR_COMPL : 0));
376 req->wr_lo = htonl(V_WR_TID(csk->tid));
377 /* len includes the length of any HW ULP additions */
378 req->len = htonl(len);
379 /* V_TX_ULP_SUBMODE sets both the mode and submode */
380 req->flags = htonl(V_TX_ULP_SUBMODE(cxgbi_skcb_ulp_mode(skb)) |
381 V_TX_SHOVE((skb_peek(&csk->write_queue) ? 0 : 1)));
382 req->sndseq = htonl(csk->snd_nxt);
383 req->param = htonl(V_TX_PORT(l2t->smt_idx));
384
385 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
386 req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT |
387 V_TX_CPU_IDX(csk->rss_qid));
388 /* sendbuffer is in units of 32KB. */
389 req->param |= htonl(V_TX_SNDBUF(cxgb3i_snd_win >> 15));
390 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
391 }
392}
393
394/**
395 * push_tx_frames -- start transmit
396 * @c3cn: the offloaded connection
397 * @req_completion: request wr_ack or not
398 *
399 * Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a
400 * connection's send queue and sends them on to T3. Must be called with the
401 * connection's lock held. Returns the amount of send buffer space that was
402 * freed as a result of sending queued data to T3.
403 */
404
405static void arp_failure_skb_discard(struct t3cdev *dev, struct sk_buff *skb)
406{
407 kfree_skb(skb);
408}
409
410static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
411{
412 int total_size = 0;
413 struct sk_buff *skb;
414
415 if (unlikely(csk->state < CTP_ESTABLISHED ||
416 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
417 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
418 "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
419 csk, csk->state, csk->flags, csk->tid);
420 return 0;
421 }
422
423 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
424 int len = skb->len; /* length before skb_push */
425 int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len);
426 int wrs_needed = skb_wrs[frags];
427
428 if (wrs_needed > 1 && len + sizeof(struct tx_data_wr) <= wrlen)
429 wrs_needed = 1;
430
431 WARN_ON(frags >= SKB_WR_LIST_SIZE || wrs_needed < 1);
432
433 if (csk->wr_cred < wrs_needed) {
434 log_debug(1 << CXGBI_DBG_PDU_TX,
435 "csk 0x%p, skb len %u/%u, frag %u, wr %d<%u.\n",
436 csk, skb->len, skb->data_len, frags,
437 wrs_needed, csk->wr_cred);
438 break;
439 }
440
441 __skb_unlink(skb, &csk->write_queue);
442 skb->priority = CPL_PRIORITY_DATA;
443 skb->csum = wrs_needed; /* remember this until the WR_ACK */
444 csk->wr_cred -= wrs_needed;
445 csk->wr_una_cred += wrs_needed;
446 cxgbi_sock_enqueue_wr(csk, skb);
447
448 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
449 "csk 0x%p, enqueue, skb len %u/%u, frag %u, wr %d, "
450 "left %u, unack %u.\n",
451 csk, skb->len, skb->data_len, frags, skb->csum,
452 csk->wr_cred, csk->wr_una_cred);
453
454 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
455 if ((req_completion &&
456 csk->wr_una_cred == wrs_needed) ||
457 csk->wr_una_cred >= csk->wr_max_cred / 2) {
458 req_completion = 1;
459 csk->wr_una_cred = 0;
460 }
461 len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
462 make_tx_data_wr(csk, skb, len, req_completion);
463 csk->snd_nxt += len;
464 cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
465 }
466 total_size += skb->truesize;
467 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
468 "csk 0x%p, tid 0x%x, send skb 0x%p.\n",
469 csk, csk->tid, skb);
470 set_arp_failure_handler(skb, arp_failure_skb_discard);
471 l2t_send(csk->cdev->lldev, skb, csk->l2t);
472 }
473 return total_size;
474}
475
476/*
477 * Process a CPL_ACT_ESTABLISH message: -> host
478 * Updates connection state from an active establish CPL message. Runs with
479 * the connection lock held.
480 */
481
482static inline void free_atid(struct cxgbi_sock *csk)
483{
484 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
485 cxgb3_free_atid(csk->cdev->lldev, csk->atid);
486 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
487 cxgbi_sock_put(csk);
488 }
489}
490
491static int do_act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
492{
493 struct cxgbi_sock *csk = ctx;
494 struct cpl_act_establish *req = cplhdr(skb);
495 unsigned int tid = GET_TID(req);
496 unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
497 u32 rcv_isn = ntohl(req->rcv_isn); /* real RCV_ISN + 1 */
498
499 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
500 "atid 0x%x,tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
501 atid, atid, csk, csk->state, csk->flags, rcv_isn);
502
503 cxgbi_sock_get(csk);
504 cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
505 csk->tid = tid;
506 cxgb3_insert_tid(csk->cdev->lldev, &t3_client, csk, tid);
507
508 free_atid(csk);
509
510 csk->rss_qid = G_QNUM(ntohs(skb->csum));
511
512 spin_lock_bh(&csk->lock);
513 if (csk->retry_timer.function) {
514 del_timer(&csk->retry_timer);
515 csk->retry_timer.function = NULL;
516 }
517
518 if (unlikely(csk->state != CTP_ACTIVE_OPEN))
519 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
520 csk, csk->state, csk->flags, csk->tid);
521
522 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
523 if (cxgb3i_rcv_win > (M_RCV_BUFSIZ << 10))
524 csk->rcv_wup -= cxgb3i_rcv_win - (M_RCV_BUFSIZ << 10);
525
526 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
527
528 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
529 /* upper layer has requested closing */
530 send_abort_req(csk);
531 else {
532 if (skb_queue_len(&csk->write_queue))
533 push_tx_frames(csk, 1);
534 cxgbi_conn_tx_open(csk);
535 }
536
537 spin_unlock_bh(&csk->lock);
538 __kfree_skb(skb);
539 return 0;
540}
541
542/*
543 * Process a CPL_ACT_OPEN_RPL message: -> host
544 * Handle active open failures.
545 */
546static int act_open_rpl_status_to_errno(int status)
547{
548 switch (status) {
549 case CPL_ERR_CONN_RESET:
550 return -ECONNREFUSED;
551 case CPL_ERR_ARP_MISS:
552 return -EHOSTUNREACH;
553 case CPL_ERR_CONN_TIMEDOUT:
554 return -ETIMEDOUT;
555 case CPL_ERR_TCAM_FULL:
556 return -ENOMEM;
557 case CPL_ERR_CONN_EXIST:
558 return -EADDRINUSE;
559 default:
560 return -EIO;
561 }
562}
563
564static void act_open_retry_timer(unsigned long data)
565{
566 struct sk_buff *skb;
567 struct cxgbi_sock *csk = (struct cxgbi_sock *)data;
568
569 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
570 "csk 0x%p,%u,0x%lx,%u.\n",
571 csk, csk->state, csk->flags, csk->tid);
572
573 cxgbi_sock_get(csk);
574 spin_lock_bh(&csk->lock);
575 skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC);
576 if (!skb)
577 cxgbi_sock_fail_act_open(csk, -ENOMEM);
578 else {
579 skb->sk = (struct sock *)csk;
580 set_arp_failure_handler(skb, act_open_arp_failure);
581 send_act_open_req(csk, skb, csk->l2t);
582 }
583 spin_unlock_bh(&csk->lock);
584 cxgbi_sock_put(csk);
585}
586
587static int do_act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
588{
589 struct cxgbi_sock *csk = ctx;
590 struct cpl_act_open_rpl *rpl = cplhdr(skb);
591
592 pr_info("csk 0x%p,%u,0x%lx,%u, status %u, %pI4:%u-%pI4:%u.\n",
593 csk, csk->state, csk->flags, csk->atid, rpl->status,
594 &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
595 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port));
596
597 if (rpl->status != CPL_ERR_TCAM_FULL &&
598 rpl->status != CPL_ERR_CONN_EXIST &&
599 rpl->status != CPL_ERR_ARP_MISS)
600 cxgb3_queue_tid_release(tdev, GET_TID(rpl));
601
602 cxgbi_sock_get(csk);
603 spin_lock_bh(&csk->lock);
604 if (rpl->status == CPL_ERR_CONN_EXIST &&
605 csk->retry_timer.function != act_open_retry_timer) {
606 csk->retry_timer.function = act_open_retry_timer;
607 mod_timer(&csk->retry_timer, jiffies + HZ / 2);
608 } else
609 cxgbi_sock_fail_act_open(csk,
610 act_open_rpl_status_to_errno(rpl->status));
611
612 spin_unlock_bh(&csk->lock);
613 cxgbi_sock_put(csk);
614 __kfree_skb(skb);
615 return 0;
616}
617
618/*
619 * Process PEER_CLOSE CPL messages: -> host
620 * Handle peer FIN.
621 */
622static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
623{
624 struct cxgbi_sock *csk = ctx;
625
626 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
627 "csk 0x%p,%u,0x%lx,%u.\n",
628 csk, csk->state, csk->flags, csk->tid);
629
630 cxgbi_sock_rcv_peer_close(csk);
631 __kfree_skb(skb);
632 return 0;
633}
634
635/*
636 * Process CLOSE_CONN_RPL CPL message: -> host
637 * Process a peer ACK to our FIN.
638 */
639static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb,
640 void *ctx)
641{
642 struct cxgbi_sock *csk = ctx;
643 struct cpl_close_con_rpl *rpl = cplhdr(skb);
644
645 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
646 "csk 0x%p,%u,0x%lx,%u, snxt %u.\n",
647 csk, csk->state, csk->flags, csk->tid, ntohl(rpl->snd_nxt));
648
649 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
650 __kfree_skb(skb);
651 return 0;
652}
653
654/*
655 * Process ABORT_REQ_RSS CPL message: -> host
656 * Process abort requests. If we are waiting for an ABORT_RPL we ignore this
657 * request except that we need to reply to it.
658 */
659
660static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
661 int *need_rst)
662{
663 switch (abort_reason) {
664 case CPL_ERR_BAD_SYN: /* fall through */
665 case CPL_ERR_CONN_RESET:
666 return csk->state > CTP_ESTABLISHED ? -EPIPE : -ECONNRESET;
667 case CPL_ERR_XMIT_TIMEDOUT:
668 case CPL_ERR_PERSIST_TIMEDOUT:
669 case CPL_ERR_FINWAIT2_TIMEDOUT:
670 case CPL_ERR_KEEPALIVE_TIMEDOUT:
671 return -ETIMEDOUT;
672 default:
673 return -EIO;
674 }
675}
676
677static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
678{
679 const struct cpl_abort_req_rss *req = cplhdr(skb);
680 struct cxgbi_sock *csk = ctx;
681 int rst_status = CPL_ABORT_NO_RST;
682
683 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
684 "csk 0x%p,%u,0x%lx,%u.\n",
685 csk, csk->state, csk->flags, csk->tid);
686
687 if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
688 req->status == CPL_ERR_PERSIST_NEG_ADVICE) {
689 goto done;
690 }
691
692 cxgbi_sock_get(csk);
693 spin_lock_bh(&csk->lock);
694
695 if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) {
696 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
697 cxgbi_sock_set_state(csk, CTP_ABORTING);
698 goto out;
699 }
700
701 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
702 send_abort_rpl(csk, rst_status);
703
704 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
705 csk->err = abort_status_to_errno(csk, req->status, &rst_status);
706 cxgbi_sock_closed(csk);
707 }
708
709out:
710 spin_unlock_bh(&csk->lock);
711 cxgbi_sock_put(csk);
712done:
713 __kfree_skb(skb);
714 return 0;
715}
716
717/*
718 * Process ABORT_RPL_RSS CPL message: -> host
719 * Process abort replies. We only process these messages if we anticipate
720 * them as the coordination between SW and HW in this area is somewhat lacking
721 * and sometimes we get ABORT_RPLs after we are done with the connection that
722 * originated the ABORT_REQ.
723 */
724static int do_abort_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
725{
726 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
727 struct cxgbi_sock *csk = ctx;
728
729 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
730 "status 0x%x, csk 0x%p, s %u, 0x%lx.\n",
731 rpl->status, csk, csk ? csk->state : 0,
732 csk ? csk->flags : 0UL);
733 /*
734 * Ignore replies to post-close aborts indicating that the abort was
735 * requested too late. These connections are terminated when we get
736 * PEER_CLOSE or CLOSE_CON_RPL and by the time the abort_rpl_rss
737 * arrives the TID is either no longer used or it has been recycled.
738 */
739 if (rpl->status == CPL_ERR_ABORT_FAILED)
740 goto rel_skb;
741 /*
742 * Sometimes we've already closed the connection, e.g., a post-close
743 * abort races with ABORT_REQ_RSS, the latter frees the connection
744 * expecting the ABORT_REQ will fail with CPL_ERR_ABORT_FAILED,
745 * but FW turns the ABORT_REQ into a regular one and so we get
746 * ABORT_RPL_RSS with status 0 and no connection.
747 */
748 if (csk)
749 cxgbi_sock_rcv_abort_rpl(csk);
750rel_skb:
751 __kfree_skb(skb);
752 return 0;
753}
754
755/*
756 * Process RX_ISCSI_HDR CPL message: -> host
757 * Handle received PDUs, the payload could be DDP'ed. If not, the payload
758 * follow after the bhs.
759 */
760static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx)
761{
762 struct cxgbi_sock *csk = ctx;
763 struct cpl_iscsi_hdr *hdr_cpl = cplhdr(skb);
764 struct cpl_iscsi_hdr_norss data_cpl;
765 struct cpl_rx_data_ddp_norss ddp_cpl;
766 unsigned int hdr_len, data_len, status;
767 unsigned int len;
768 int err;
769
770 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
771 "csk 0x%p,%u,0x%lx,%u, skb 0x%p,%u.\n",
772 csk, csk->state, csk->flags, csk->tid, skb, skb->len);
773
774 spin_lock_bh(&csk->lock);
775
776 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
777 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
778 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
779 csk, csk->state, csk->flags, csk->tid);
780 if (csk->state != CTP_ABORTING)
781 goto abort_conn;
782 else
783 goto discard;
784 }
785
786 cxgbi_skcb_tcp_seq(skb) = ntohl(hdr_cpl->seq);
787 cxgbi_skcb_flags(skb) = 0;
788
789 skb_reset_transport_header(skb);
790 __skb_pull(skb, sizeof(struct cpl_iscsi_hdr));
791
792 len = hdr_len = ntohs(hdr_cpl->len);
793 /* msg coalesce is off or not enough data received */
794 if (skb->len <= hdr_len) {
795 pr_err("%s: tid %u, CPL_ISCSI_HDR, skb len %u < %u.\n",
796 csk->cdev->ports[csk->port_id]->name, csk->tid,
797 skb->len, hdr_len);
798 goto abort_conn;
799 }
800 cxgbi_skcb_set_flag(skb, SKCBF_RX_COALESCED);
801
802 err = skb_copy_bits(skb, skb->len - sizeof(ddp_cpl), &ddp_cpl,
803 sizeof(ddp_cpl));
804 if (err < 0) {
805 pr_err("%s: tid %u, copy cpl_ddp %u-%zu failed %d.\n",
806 csk->cdev->ports[csk->port_id]->name, csk->tid,
807 skb->len, sizeof(ddp_cpl), err);
808 goto abort_conn;
809 }
810
811 cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS);
812 cxgbi_skcb_rx_pdulen(skb) = ntohs(ddp_cpl.len);
813 cxgbi_skcb_rx_ddigest(skb) = ntohl(ddp_cpl.ulp_crc);
814 status = ntohl(ddp_cpl.ddp_status);
815
816 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
817 "csk 0x%p, skb 0x%p,%u, pdulen %u, status 0x%x.\n",
818 csk, skb, skb->len, cxgbi_skcb_rx_pdulen(skb), status);
819
820 if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT))
821 cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR);
822 if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT))
823 cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR);
824 if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT))
825 cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR);
826
827 if (skb->len > (hdr_len + sizeof(ddp_cpl))) {
828 err = skb_copy_bits(skb, hdr_len, &data_cpl, sizeof(data_cpl));
829 if (err < 0) {
830 pr_err("%s: tid %u, cp %zu/%u failed %d.\n",
831 csk->cdev->ports[csk->port_id]->name,
832 csk->tid, sizeof(data_cpl), skb->len, err);
833 goto abort_conn;
834 }
835 data_len = ntohs(data_cpl.len);
836 log_debug(1 << CXGBI_DBG_DDP | 1 << CXGBI_DBG_PDU_RX,
837 "skb 0x%p, pdu not ddp'ed %u/%u, status 0x%x.\n",
838 skb, data_len, cxgbi_skcb_rx_pdulen(skb), status);
839 len += sizeof(data_cpl) + data_len;
840 } else if (status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT))
841 cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD);
842
843 csk->rcv_nxt = ntohl(ddp_cpl.seq) + cxgbi_skcb_rx_pdulen(skb);
844 __pskb_trim(skb, len);
845 __skb_queue_tail(&csk->receive_queue, skb);
846 cxgbi_conn_pdu_ready(csk);
847
848 spin_unlock_bh(&csk->lock);
849 return 0;
850
851abort_conn:
852 send_abort_req(csk);
853discard:
854 spin_unlock_bh(&csk->lock);
855 __kfree_skb(skb);
856 return 0;
857}
858
859/*
860 * Process TX_DATA_ACK CPL messages: -> host
861 * Process an acknowledgment of WR completion. Advance snd_una and send the
862 * next batch of work requests from the write queue.
863 */
864static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
865{
866 struct cxgbi_sock *csk = ctx;
867 struct cpl_wr_ack *hdr = cplhdr(skb);
868
869 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
870 "csk 0x%p,%u,0x%lx,%u, cr %u.\n",
871 csk, csk->state, csk->flags, csk->tid, ntohs(hdr->credits));
872
873 cxgbi_sock_rcv_wr_ack(csk, ntohs(hdr->credits), ntohl(hdr->snd_una), 1);
874 __kfree_skb(skb);
875 return 0;
876}
877
878/*
879 * for each connection, pre-allocate skbs needed for close/abort requests. So
880 * that we can service the request right away.
881 */
882static int alloc_cpls(struct cxgbi_sock *csk)
883{
884 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 0,
885 GFP_KERNEL);
886 if (!csk->cpl_close)
887 return -ENOMEM;
888 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 0,
889 GFP_KERNEL);
890 if (!csk->cpl_abort_req)
891 goto free_cpl_skbs;
892
893 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 0,
894 GFP_KERNEL);
895 if (!csk->cpl_abort_rpl)
896 goto free_cpl_skbs;
897
898 return 0;
899
900free_cpl_skbs:
901 cxgbi_sock_free_cpl_skbs(csk);
902 return -ENOMEM;
903}
904
905/**
906 * release_offload_resources - release offload resource
907 * @c3cn: the offloaded iscsi tcp connection.
908 * Release resources held by an offload connection (TID, L2T entry, etc.)
909 */
910static void l2t_put(struct cxgbi_sock *csk)
911{
912 struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
913
914 if (csk->l2t) {
915 l2t_release(L2DATA(t3dev), csk->l2t);
916 csk->l2t = NULL;
917 cxgbi_sock_put(csk);
918 }
919}
920
921static void release_offload_resources(struct cxgbi_sock *csk)
922{
923 struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
924
925 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
926 "csk 0x%p,%u,0x%lx,%u.\n",
927 csk, csk->state, csk->flags, csk->tid);
928
929 csk->rss_qid = 0;
930 cxgbi_sock_free_cpl_skbs(csk);
931
932 if (csk->wr_cred != csk->wr_max_cred) {
933 cxgbi_sock_purge_wr_queue(csk);
934 cxgbi_sock_reset_wr_list(csk);
935 }
936 l2t_put(csk);
937 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
938 free_atid(csk);
939 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
940 cxgb3_remove_tid(t3dev, (void *)csk, csk->tid);
941 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
942 cxgbi_sock_put(csk);
943 }
944 csk->dst = NULL;
945 csk->cdev = NULL;
946}
947
948static void update_address(struct cxgbi_hba *chba)
949{
950 if (chba->ipv4addr) {
951 if (chba->vdev &&
952 chba->ipv4addr != cxgb3i_get_private_ipv4addr(chba->vdev)) {
953 cxgb3i_set_private_ipv4addr(chba->vdev, chba->ipv4addr);
954 cxgb3i_set_private_ipv4addr(chba->ndev, 0);
955 pr_info("%s set %pI4.\n",
956 chba->vdev->name, &chba->ipv4addr);
957 } else if (chba->ipv4addr !=
958 cxgb3i_get_private_ipv4addr(chba->ndev)) {
959 cxgb3i_set_private_ipv4addr(chba->ndev, chba->ipv4addr);
960 pr_info("%s set %pI4.\n",
961 chba->ndev->name, &chba->ipv4addr);
962 }
963 } else if (cxgb3i_get_private_ipv4addr(chba->ndev)) {
964 if (chba->vdev)
965 cxgb3i_set_private_ipv4addr(chba->vdev, 0);
966 cxgb3i_set_private_ipv4addr(chba->ndev, 0);
967 }
968}
969
970static int init_act_open(struct cxgbi_sock *csk)
971{
972 struct dst_entry *dst = csk->dst;
973 struct cxgbi_device *cdev = csk->cdev;
974 struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev;
975 struct net_device *ndev = cdev->ports[csk->port_id];
976 struct cxgbi_hba *chba = cdev->hbas[csk->port_id];
977 struct sk_buff *skb = NULL;
978
979 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
980 "csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags);
981
982 update_address(chba);
983 if (chba->ipv4addr)
984 csk->saddr.sin_addr.s_addr = chba->ipv4addr;
985
986 csk->rss_qid = 0;
987 csk->l2t = t3_l2t_get(t3dev, dst->neighbour, ndev);
988 if (!csk->l2t) {
989 pr_err("NO l2t available.\n");
990 return -EINVAL;
991 }
992 cxgbi_sock_get(csk);
993
994 csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk);
995 if (csk->atid < 0) {
996 pr_err("NO atid available.\n");
997 goto rel_resource;
998 }
999 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
1000 cxgbi_sock_get(csk);
1001
1002 skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL);
1003 if (!skb)
1004 goto rel_resource;
1005 skb->sk = (struct sock *)csk;
1006 set_arp_failure_handler(skb, act_open_arp_failure);
1007
1008 csk->wr_max_cred = csk->wr_cred = T3C_DATA(t3dev)->max_wrs - 1;
1009 csk->wr_una_cred = 0;
1010 csk->mss_idx = cxgbi_sock_select_mss(csk, dst_mtu(dst));
1011 cxgbi_sock_reset_wr_list(csk);
1012 csk->err = 0;
1013
1014 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1015 "csk 0x%p,%u,0x%lx, %pI4:%u-%pI4:%u.\n",
1016 csk, csk->state, csk->flags,
1017 &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
1018 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port));
1019
1020 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1021 send_act_open_req(csk, skb, csk->l2t);
1022 return 0;
1023
1024rel_resource:
1025 if (skb)
1026 __kfree_skb(skb);
1027 return -EINVAL;
1028}
1029
1030cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = {
1031 [CPL_ACT_ESTABLISH] = do_act_establish,
1032 [CPL_ACT_OPEN_RPL] = do_act_open_rpl,
1033 [CPL_PEER_CLOSE] = do_peer_close,
1034 [CPL_ABORT_REQ_RSS] = do_abort_req,
1035 [CPL_ABORT_RPL_RSS] = do_abort_rpl,
1036 [CPL_CLOSE_CON_RPL] = do_close_con_rpl,
1037 [CPL_TX_DMA_ACK] = do_wr_ack,
1038 [CPL_ISCSI_HDR] = do_iscsi_hdr,
1039};
1040
1041/**
1042 * cxgb3i_ofld_init - allocate and initialize resources for each adapter found
1043 * @cdev: cxgbi adapter
1044 */
1045int cxgb3i_ofld_init(struct cxgbi_device *cdev)
1046{
1047 struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev;
1048 struct adap_ports port;
1049 struct ofld_page_info rx_page_info;
1050 unsigned int wr_len;
1051 int rc;
1052
1053 if (t3dev->ctl(t3dev, GET_WR_LEN, &wr_len) < 0 ||
1054 t3dev->ctl(t3dev, GET_PORTS, &port) < 0 ||
1055 t3dev->ctl(t3dev, GET_RX_PAGE_INFO, &rx_page_info) < 0) {
1056 pr_warn("t3 0x%p, offload up, ioctl failed.\n", t3dev);
1057 return -EINVAL;
1058 }
1059
1060 if (cxgb3i_max_connect > CXGBI_MAX_CONN)
1061 cxgb3i_max_connect = CXGBI_MAX_CONN;
1062
1063 rc = cxgbi_device_portmap_create(cdev, cxgb3i_sport_base,
1064 cxgb3i_max_connect);
1065 if (rc < 0)
1066 return rc;
1067
1068 init_wr_tab(wr_len);
1069 cdev->csk_release_offload_resources = release_offload_resources;
1070 cdev->csk_push_tx_frames = push_tx_frames;
1071 cdev->csk_send_abort_req = send_abort_req;
1072 cdev->csk_send_close_req = send_close_req;
1073 cdev->csk_send_rx_credits = send_rx_credits;
1074 cdev->csk_alloc_cpls = alloc_cpls;
1075 cdev->csk_init_act_open = init_act_open;
1076
1077 pr_info("cdev 0x%p, offload up, added.\n", cdev);
1078 return 0;
1079}
1080
1081/*
1082 * functions to program the pagepod in h/w
1083 */
1084static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr)
1085{
1086 struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head;
1087
1088 memset(req, 0, sizeof(*req));
1089
1090 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS));
1091 req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) |
1092 V_ULPTX_CMD(ULP_MEM_WRITE));
1093 req->len = htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE >> 5) |
1094 V_ULPTX_NFLITS((PPOD_SIZE >> 3) + 1));
1095}
1096
1097static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr,
1098 unsigned int idx, unsigned int npods,
1099 struct cxgbi_gather_list *gl)
1100{
1101 struct cxgbi_device *cdev = csk->cdev;
1102 struct cxgbi_ddp_info *ddp = cdev->ddp;
1103 unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
1104 int i;
1105
1106 log_debug(1 << CXGBI_DBG_DDP,
1107 "csk 0x%p, idx %u, npods %u, gl 0x%p.\n",
1108 csk, idx, npods, gl);
1109
1110 for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
1111 struct sk_buff *skb = ddp->gl_skb[idx];
1112
1113 /* hold on to the skb until we clear the ddp mapping */
1114 skb_get(skb);
1115
1116 ulp_mem_io_set_hdr(skb, pm_addr);
1117 cxgbi_ddp_ppod_set((struct cxgbi_pagepod *)(skb->head +
1118 sizeof(struct ulp_mem_io)),
1119 hdr, gl, i * PPOD_PAGES_MAX);
1120 skb->priority = CPL_PRIORITY_CONTROL;
1121 cxgb3_ofld_send(cdev->lldev, skb);
1122 }
1123 return 0;
1124}
1125
1126static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag,
1127 unsigned int idx, unsigned int npods)
1128{
1129 struct cxgbi_device *cdev = chba->cdev;
1130 struct cxgbi_ddp_info *ddp = cdev->ddp;
1131 unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
1132 int i;
1133
1134 log_debug(1 << CXGBI_DBG_DDP,
1135 "cdev 0x%p, idx %u, npods %u, tag 0x%x.\n",
1136 cdev, idx, npods, tag);
1137
1138 for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
1139 struct sk_buff *skb = ddp->gl_skb[idx];
1140
1141 if (!skb) {
1142 pr_err("tag 0x%x, 0x%x, %d/%u, skb NULL.\n",
1143 tag, idx, i, npods);
1144 continue;
1145 }
1146 ddp->gl_skb[idx] = NULL;
1147 memset(skb->head + sizeof(struct ulp_mem_io), 0, PPOD_SIZE);
1148 ulp_mem_io_set_hdr(skb, pm_addr);
1149 skb->priority = CPL_PRIORITY_CONTROL;
1150 cxgb3_ofld_send(cdev->lldev, skb);
1151 }
1152}
1153
1154static void ddp_free_gl_skb(struct cxgbi_ddp_info *ddp, int idx, int cnt)
1155{
1156 int i;
1157
1158 log_debug(1 << CXGBI_DBG_DDP,
1159 "ddp 0x%p, idx %d, cnt %d.\n", ddp, idx, cnt);
1160
1161 for (i = 0; i < cnt; i++, idx++)
1162 if (ddp->gl_skb[idx]) {
1163 kfree_skb(ddp->gl_skb[idx]);
1164 ddp->gl_skb[idx] = NULL;
1165 }
1166}
1167
1168static int ddp_alloc_gl_skb(struct cxgbi_ddp_info *ddp, int idx,
1169 int cnt, gfp_t gfp)
1170{
1171 int i;
1172
1173 log_debug(1 << CXGBI_DBG_DDP,
1174 "ddp 0x%p, idx %d, cnt %d.\n", ddp, idx, cnt);
1175
1176 for (i = 0; i < cnt; i++) {
1177 struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
1178 PPOD_SIZE, 0, gfp);
1179 if (skb)
1180 ddp->gl_skb[idx + i] = skb;
1181 else {
1182 ddp_free_gl_skb(ddp, idx, i);
1183 return -ENOMEM;
1184 }
1185 }
1186 return 0;
1187}
1188
1189static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
1190 unsigned int tid, int pg_idx, bool reply)
1191{
1192 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
1193 GFP_KERNEL);
1194 struct cpl_set_tcb_field *req;
1195 u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0;
1196
1197 log_debug(1 << CXGBI_DBG_DDP,
1198 "csk 0x%p, tid %u, pg_idx %d.\n", csk, tid, pg_idx);
1199 if (!skb)
1200 return -ENOMEM;
1201
1202 /* set up ulp submode and page size */
1203 req = (struct cpl_set_tcb_field *)skb->head;
1204 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1205 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1206 req->reply = V_NO_REPLY(reply ? 0 : 1);
1207 req->cpu_idx = 0;
1208 req->word = htons(31);
1209 req->mask = cpu_to_be64(0xF0000000);
1210 req->val = cpu_to_be64(val << 28);
1211 skb->priority = CPL_PRIORITY_CONTROL;
1212
1213 cxgb3_ofld_send(csk->cdev->lldev, skb);
1214 return 0;
1215}
1216
1217/**
1218 * cxgb3i_setup_conn_digest - setup conn. digest setting
1219 * @csk: cxgb tcp socket
1220 * @tid: connection id
1221 * @hcrc: header digest enabled
1222 * @dcrc: data digest enabled
1223 * @reply: request reply from h/w
1224 * set up the iscsi digest settings for a connection identified by tid
1225 */
1226static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
1227 int hcrc, int dcrc, int reply)
1228{
1229 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
1230 GFP_KERNEL);
1231 struct cpl_set_tcb_field *req;
1232 u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0);
1233
1234 log_debug(1 << CXGBI_DBG_DDP,
1235 "csk 0x%p, tid %u, crc %d,%d.\n", csk, tid, hcrc, dcrc);
1236 if (!skb)
1237 return -ENOMEM;
1238
1239 /* set up ulp submode and page size */
1240 req = (struct cpl_set_tcb_field *)skb->head;
1241 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1242 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1243 req->reply = V_NO_REPLY(reply ? 0 : 1);
1244 req->cpu_idx = 0;
1245 req->word = htons(31);
1246 req->mask = cpu_to_be64(0x0F000000);
1247 req->val = cpu_to_be64(val << 24);
1248 skb->priority = CPL_PRIORITY_CONTROL;
1249
1250 cxgb3_ofld_send(csk->cdev->lldev, skb);
1251 return 0;
1252}
1253
1254/**
1255 * t3_ddp_cleanup - release the cxgb3 adapter's ddp resource
1256 * @cdev: cxgb3i adapter
1257 * release all the resource held by the ddp pagepod manager for a given
1258 * adapter if needed
1259 */
1260
1261static void t3_ddp_cleanup(struct cxgbi_device *cdev)
1262{
1263 struct t3cdev *tdev = (struct t3cdev *)cdev->lldev;
1264
1265 if (cxgbi_ddp_cleanup(cdev)) {
1266 pr_info("t3dev 0x%p, ulp_iscsi no more user.\n", tdev);
1267 tdev->ulp_iscsi = NULL;
1268 }
1269}
1270
1271/**
1272 * ddp_init - initialize the cxgb3 adapter's ddp resource
1273 * @cdev: cxgb3i adapter
1274 * initialize the ddp pagepod manager for a given adapter
1275 */
1276static int cxgb3i_ddp_init(struct cxgbi_device *cdev)
1277{
1278 struct t3cdev *tdev = (struct t3cdev *)cdev->lldev;
1279 struct cxgbi_ddp_info *ddp = tdev->ulp_iscsi;
1280 struct ulp_iscsi_info uinfo;
1281 unsigned int pgsz_factor[4];
1282 int err;
1283
1284 if (ddp) {
1285 kref_get(&ddp->refcnt);
1286 pr_warn("t3dev 0x%p, ddp 0x%p already set up.\n",
1287 tdev, tdev->ulp_iscsi);
1288 cdev->ddp = ddp;
1289 return -EALREADY;
1290 }
1291
1292 err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo);
1293 if (err < 0) {
1294 pr_err("%s, failed to get iscsi param err=%d.\n",
1295 tdev->name, err);
1296 return err;
1297 }
1298
1299 err = cxgbi_ddp_init(cdev, uinfo.llimit, uinfo.ulimit,
1300 uinfo.max_txsz, uinfo.max_rxsz);
1301 if (err < 0)
1302 return err;
1303
1304 ddp = cdev->ddp;
1305
1306 uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
1307 cxgbi_ddp_page_size_factor(pgsz_factor);
1308 uinfo.ulimit = uinfo.llimit + (ddp->nppods << PPOD_SIZE_SHIFT);
1309
1310 err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo);
1311 if (err < 0) {
1312 pr_warn("%s unable to set iscsi param err=%d, ddp disabled.\n",
1313 tdev->name, err);
1314 cxgbi_ddp_cleanup(cdev);
1315 return err;
1316 }
1317 tdev->ulp_iscsi = ddp;
1318
1319 cdev->csk_ddp_free_gl_skb = ddp_free_gl_skb;
1320 cdev->csk_ddp_alloc_gl_skb = ddp_alloc_gl_skb;
1321 cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
1322 cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
1323 cdev->csk_ddp_set = ddp_set_map;
1324 cdev->csk_ddp_clear = ddp_clear_map;
1325
1326 pr_info("tdev 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, "
1327 "%u/%u.\n",
1328 tdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask,
1329 ddp->rsvd_tag_mask, ddp->max_txsz, uinfo.max_txsz,
1330 ddp->max_rxsz, uinfo.max_rxsz);
1331 return 0;
1332}
1333
1334static void cxgb3i_dev_close(struct t3cdev *t3dev)
1335{
1336 struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev);
1337
1338 if (!cdev || cdev->flags & CXGBI_FLAG_ADAPTER_RESET) {
1339 pr_info("0x%p close, f 0x%x.\n", cdev, cdev ? cdev->flags : 0);
1340 return;
1341 }
1342
1343 cxgbi_device_unregister(cdev);
1344}
1345
1346/**
1347 * cxgb3i_dev_open - init a t3 adapter structure and any h/w settings
1348 * @t3dev: t3cdev adapter
1349 */
1350static void cxgb3i_dev_open(struct t3cdev *t3dev)
1351{
1352 struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev);
1353 struct adapter *adapter = tdev2adap(t3dev);
1354 int i, err;
1355
1356 if (cdev) {
1357 pr_info("0x%p, updating.\n", cdev);
1358 return;
1359 }
1360
1361 cdev = cxgbi_device_register(0, adapter->params.nports);
1362 if (!cdev) {
1363 pr_warn("device 0x%p register failed.\n", t3dev);
1364 return;
1365 }
1366
1367 cdev->flags = CXGBI_FLAG_DEV_T3 | CXGBI_FLAG_IPV4_SET;
1368 cdev->lldev = t3dev;
1369 cdev->pdev = adapter->pdev;
1370 cdev->ports = adapter->port;
1371 cdev->nports = adapter->params.nports;
1372 cdev->mtus = adapter->params.mtus;
1373 cdev->nmtus = NMTUS;
1374 cdev->snd_win = cxgb3i_snd_win;
1375 cdev->rcv_win = cxgb3i_rcv_win;
1376 cdev->rx_credit_thres = cxgb3i_rx_credit_thres;
1377 cdev->skb_tx_rsvd = CXGB3I_TX_HEADER_LEN;
1378 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr_norss);
1379 cdev->dev_ddp_cleanup = t3_ddp_cleanup;
1380 cdev->itp = &cxgb3i_iscsi_transport;
1381
1382 err = cxgb3i_ddp_init(cdev);
1383 if (err) {
1384 pr_info("0x%p ddp init failed\n", cdev);
1385 goto err_out;
1386 }
1387
1388 err = cxgb3i_ofld_init(cdev);
1389 if (err) {
1390 pr_info("0x%p offload init failed\n", cdev);
1391 goto err_out;
1392 }
1393
1394 err = cxgbi_hbas_add(cdev, CXGB3I_MAX_LUN, CXGBI_MAX_CONN,
1395 &cxgb3i_host_template, cxgb3i_stt);
1396 if (err)
1397 goto err_out;
1398
1399 for (i = 0; i < cdev->nports; i++)
1400 cdev->hbas[i]->ipv4addr =
1401 cxgb3i_get_private_ipv4addr(cdev->ports[i]);
1402
1403 pr_info("cdev 0x%p, f 0x%x, t3dev 0x%p open, err %d.\n",
1404 cdev, cdev ? cdev->flags : 0, t3dev, err);
1405 return;
1406
1407err_out:
1408 cxgbi_device_unregister(cdev);
1409}
1410
1411static void cxgb3i_dev_event_handler(struct t3cdev *t3dev, u32 event, u32 port)
1412{
1413 struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev);
1414
1415 log_debug(1 << CXGBI_DBG_TOE,
1416 "0x%p, cdev 0x%p, event 0x%x, port 0x%x.\n",
1417 t3dev, cdev, event, port);
1418 if (!cdev)
1419 return;
1420
1421 switch (event) {
1422 case OFFLOAD_STATUS_DOWN:
1423 cdev->flags |= CXGBI_FLAG_ADAPTER_RESET;
1424 break;
1425 case OFFLOAD_STATUS_UP:
1426 cdev->flags &= ~CXGBI_FLAG_ADAPTER_RESET;
1427 break;
1428 }
1429}
1430
1431/**
1432 * cxgb3i_init_module - module init entry point
1433 *
1434 * initialize any driver wide global data structures and register itself
1435 * with the cxgb3 module
1436 */
1437static int __init cxgb3i_init_module(void)
1438{
1439 int rc;
1440
1441 printk(KERN_INFO "%s", version);
1442
1443 rc = cxgbi_iscsi_init(&cxgb3i_iscsi_transport, &cxgb3i_stt);
1444 if (rc < 0)
1445 return rc;
1446
1447 cxgb3_register_client(&t3_client);
1448 return 0;
1449}
1450
1451/**
1452 * cxgb3i_exit_module - module cleanup/exit entry point
1453 *
1454 * go through the driver hba list and for each hba, release any resource held.
1455 * and unregisters iscsi transport and the cxgb3 module
1456 */
1457static void __exit cxgb3i_exit_module(void)
1458{
1459 cxgb3_unregister_client(&t3_client);
1460 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T3);
1461 cxgbi_iscsi_cleanup(&cxgb3i_iscsi_transport, &cxgb3i_stt);
1462}
1463
1464module_init(cxgb3i_init_module);
1465module_exit(cxgb3i_exit_module);
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h
new file mode 100644
index 000000000000..5f5e3394b594
--- /dev/null
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h
@@ -0,0 +1,51 @@
1/*
2 * cxgb3i.h: Chelsio S3xx iSCSI driver.
3 *
4 * Copyright (c) 2008 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 */
12
13#ifndef __CXGB3I_H__
14#define __CXGB3I_H__
15
16#define CXGB3I_SCSI_HOST_QDEPTH 1024
17#define CXGB3I_MAX_LUN 512
18#define ISCSI_PDU_NONPAYLOAD_MAX \
19 (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE + 2*ISCSI_DIGEST_SIZE)
20
21/*for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
22#define CXGB3I_TX_HEADER_LEN \
23 (sizeof(struct tx_data_wr) + sizeof(struct sge_opaque_hdr))
24
25extern cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS];
26
27#define cxgb3i_get_private_ipv4addr(ndev) \
28 (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr)
29#define cxgb3i_set_private_ipv4addr(ndev, addr) \
30 (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr) = addr
31
32struct cpl_iscsi_hdr_norss {
33 union opcode_tid ot;
34 u16 pdu_len_ddp;
35 u16 len;
36 u32 seq;
37 u16 urg;
38 u8 rsvd;
39 u8 status;
40};
41
42struct cpl_rx_data_ddp_norss {
43 union opcode_tid ot;
44 u16 urg;
45 u16 len;
46 u32 seq;
47 u32 nxt_seq;
48 u32 ulp_crc;
49 u32 ddp_status;
50};
51#endif
diff --git a/drivers/scsi/cxgbi/cxgb4i/Kbuild b/drivers/scsi/cxgbi/cxgb4i/Kbuild
new file mode 100644
index 000000000000..b9f4af7454b7
--- /dev/null
+++ b/drivers/scsi/cxgbi/cxgb4i/Kbuild
@@ -0,0 +1,3 @@
1EXTRA_CFLAGS += -I$(srctree)/drivers/net/cxgb4
2
3obj-$(CONFIG_SCSI_CXGB4_ISCSI) += cxgb4i.o
diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig
new file mode 100644
index 000000000000..bb94b39b17b3
--- /dev/null
+++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig
@@ -0,0 +1,7 @@
1config SCSI_CXGB4_ISCSI
2 tristate "Chelsio T4 iSCSI support"
3 depends on CHELSIO_T4_DEPENDS
4 select CHELSIO_T4
5 select SCSI_ISCSI_ATTRS
6 ---help---
7 This driver supports iSCSI offload for the Chelsio T4 devices.
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
new file mode 100644
index 000000000000..99f2b8c5dd63
--- /dev/null
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -0,0 +1,1604 @@
1/*
2 * cxgb4i.c: Chelsio T4 iSCSI driver.
3 *
4 * Copyright (c) 2010 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 * Rakesh Ranjan (rranjan@chelsio.com)
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
15
16#include <linux/version.h>
17#include <linux/module.h>
18#include <linux/moduleparam.h>
19#include <scsi/scsi_host.h>
20#include <net/tcp.h>
21#include <net/dst.h>
22#include <linux/netdevice.h>
23
24#include "t4_msg.h"
25#include "cxgb4.h"
26#include "cxgb4_uld.h"
27#include "t4fw_api.h"
28#include "l2t.h"
29#include "cxgb4i.h"
30
31static unsigned int dbg_level;
32
33#include "../libcxgbi.h"
34
35#define DRV_MODULE_NAME "cxgb4i"
36#define DRV_MODULE_DESC "Chelsio T4 iSCSI Driver"
37#define DRV_MODULE_VERSION "0.9.1"
38#define DRV_MODULE_RELDATE "Aug. 2010"
39
40static char version[] =
41 DRV_MODULE_DESC " " DRV_MODULE_NAME
42 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
43
44MODULE_AUTHOR("Chelsio Communications, Inc.");
45MODULE_DESCRIPTION(DRV_MODULE_DESC);
46MODULE_VERSION(DRV_MODULE_VERSION);
47MODULE_LICENSE("GPL");
48
49module_param(dbg_level, uint, 0644);
50MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)");
51
52static int cxgb4i_rcv_win = 256 * 1024;
53module_param(cxgb4i_rcv_win, int, 0644);
54MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes");
55
56static int cxgb4i_snd_win = 128 * 1024;
57module_param(cxgb4i_snd_win, int, 0644);
58MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes");
59
60static int cxgb4i_rx_credit_thres = 10 * 1024;
61module_param(cxgb4i_rx_credit_thres, int, 0644);
62MODULE_PARM_DESC(cxgb4i_rx_credit_thres,
63 "RX credits return threshold in bytes (default=10KB)");
64
65static unsigned int cxgb4i_max_connect = (8 * 1024);
66module_param(cxgb4i_max_connect, uint, 0644);
67MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections");
68
69static unsigned short cxgb4i_sport_base = 20000;
70module_param(cxgb4i_sport_base, ushort, 0644);
71MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)");
72
73typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *);
74
75static void *t4_uld_add(const struct cxgb4_lld_info *);
76static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *);
77static int t4_uld_state_change(void *, enum cxgb4_state state);
78
79static const struct cxgb4_uld_info cxgb4i_uld_info = {
80 .name = DRV_MODULE_NAME,
81 .add = t4_uld_add,
82 .rx_handler = t4_uld_rx_handler,
83 .state_change = t4_uld_state_change,
84};
85
86static struct scsi_host_template cxgb4i_host_template = {
87 .module = THIS_MODULE,
88 .name = DRV_MODULE_NAME,
89 .proc_name = DRV_MODULE_NAME,
90 .can_queue = CXGB4I_SCSI_HOST_QDEPTH,
91 .queuecommand = iscsi_queuecommand,
92 .change_queue_depth = iscsi_change_queue_depth,
93 .sg_tablesize = SG_ALL,
94 .max_sectors = 0xFFFF,
95 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
96 .eh_abort_handler = iscsi_eh_abort,
97 .eh_device_reset_handler = iscsi_eh_device_reset,
98 .eh_target_reset_handler = iscsi_eh_recover_target,
99 .target_alloc = iscsi_target_alloc,
100 .use_clustering = DISABLE_CLUSTERING,
101 .this_id = -1,
102};
103
104static struct iscsi_transport cxgb4i_iscsi_transport = {
105 .owner = THIS_MODULE,
106 .name = DRV_MODULE_NAME,
107 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST |
108 CAP_DATADGST | CAP_DIGEST_OFFLOAD |
109 CAP_PADDING_OFFLOAD,
110 .param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH |
111 ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN |
112 ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T |
113 ISCSI_IMM_DATA_EN | ISCSI_FIRST_BURST |
114 ISCSI_MAX_BURST | ISCSI_PDU_INORDER_EN |
115 ISCSI_DATASEQ_INORDER_EN | ISCSI_ERL |
116 ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS |
117 ISCSI_EXP_STATSN | ISCSI_PERSISTENT_PORT |
118 ISCSI_PERSISTENT_ADDRESS |
119 ISCSI_TARGET_NAME | ISCSI_TPGT |
120 ISCSI_USERNAME | ISCSI_PASSWORD |
121 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
122 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
123 ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
124 ISCSI_PING_TMO | ISCSI_RECV_TMO |
125 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
126 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
127 ISCSI_HOST_INITIATOR_NAME |
128 ISCSI_HOST_NETDEV_NAME,
129 .get_host_param = cxgbi_get_host_param,
130 .set_host_param = cxgbi_set_host_param,
131 /* session management */
132 .create_session = cxgbi_create_session,
133 .destroy_session = cxgbi_destroy_session,
134 .get_session_param = iscsi_session_get_param,
135 /* connection management */
136 .create_conn = cxgbi_create_conn,
137 .bind_conn = cxgbi_bind_conn,
138 .destroy_conn = iscsi_tcp_conn_teardown,
139 .start_conn = iscsi_conn_start,
140 .stop_conn = iscsi_conn_stop,
141 .get_conn_param = cxgbi_get_conn_param,
142 .set_param = cxgbi_set_conn_param,
143 .get_stats = cxgbi_get_conn_stats,
144 /* pdu xmit req from user space */
145 .send_pdu = iscsi_conn_send_pdu,
146 /* task */
147 .init_task = iscsi_tcp_task_init,
148 .xmit_task = iscsi_tcp_task_xmit,
149 .cleanup_task = cxgbi_cleanup_task,
150 /* pdu */
151 .alloc_pdu = cxgbi_conn_alloc_pdu,
152 .init_pdu = cxgbi_conn_init_pdu,
153 .xmit_pdu = cxgbi_conn_xmit_pdu,
154 .parse_pdu_itt = cxgbi_parse_pdu_itt,
155 /* TCP connect/disconnect */
156 .ep_connect = cxgbi_ep_connect,
157 .ep_poll = cxgbi_ep_poll,
158 .ep_disconnect = cxgbi_ep_disconnect,
159 /* Error recovery timeout call */
160 .session_recovery_timedout = iscsi_session_recovery_timedout,
161};
162
163static struct scsi_transport_template *cxgb4i_stt;
164
165/*
166 * CPL (Chelsio Protocol Language) defines a message passing interface between
167 * the host driver and Chelsio asic.
168 * The section below implments CPLs that related to iscsi tcp connection
169 * open/close/abort and data send/receive.
170 */
171#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
172#define RCV_BUFSIZ_MASK 0x3FFU
173#define MAX_IMM_TX_PKT_LEN 128
174
175static inline void set_queue(struct sk_buff *skb, unsigned int queue,
176 const struct cxgbi_sock *csk)
177{
178 skb->queue_mapping = queue;
179}
180
181static int push_tx_frames(struct cxgbi_sock *, int);
182
183/*
184 * is_ofld_imm - check whether a packet can be sent as immediate data
185 * @skb: the packet
186 *
187 * Returns true if a packet can be sent as an offload WR with immediate
188 * data. We currently use the same limit as for Ethernet packets.
189 */
190static inline int is_ofld_imm(const struct sk_buff *skb)
191{
192 return skb->len <= (MAX_IMM_TX_PKT_LEN -
193 sizeof(struct fw_ofld_tx_data_wr));
194}
195
196static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
197 struct l2t_entry *e)
198{
199 struct cpl_act_open_req *req;
200 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
201 unsigned long long opt0;
202 unsigned int opt2;
203 unsigned int qid_atid = ((unsigned int)csk->atid) |
204 (((unsigned int)csk->rss_qid) << 14);
205
206 opt0 = KEEP_ALIVE(1) |
207 WND_SCALE(wscale) |
208 MSS_IDX(csk->mss_idx) |
209 L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) |
210 TX_CHAN(csk->tx_chan) |
211 SMAC_SEL(csk->smac_idx) |
212 ULP_MODE(ULP_MODE_ISCSI) |
213 RCV_BUFSIZ(cxgb4i_rcv_win >> 10);
214 opt2 = RX_CHANNEL(0) |
215 RSS_QUEUE_VALID |
216 (1 << 20) | (1 << 22) |
217 RSS_QUEUE(csk->rss_qid);
218
219 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
220 req = (struct cpl_act_open_req *)skb->head;
221
222 INIT_TP_WR(req, 0);
223 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
224 qid_atid));
225 req->local_port = csk->saddr.sin_port;
226 req->peer_port = csk->daddr.sin_port;
227 req->local_ip = csk->saddr.sin_addr.s_addr;
228 req->peer_ip = csk->daddr.sin_addr.s_addr;
229 req->opt0 = cpu_to_be64(opt0);
230 req->params = 0;
231 req->opt2 = cpu_to_be32(opt2);
232
233 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
234 "csk 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
235 csk, &req->local_ip, ntohs(req->local_port),
236 &req->peer_ip, ntohs(req->peer_port),
237 csk->atid, csk->rss_qid);
238
239 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
240}
241
242static void send_close_req(struct cxgbi_sock *csk)
243{
244 struct sk_buff *skb = csk->cpl_close;
245 struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
246 unsigned int tid = csk->tid;
247
248 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
249 "csk 0x%p,%u,0x%lx, tid %u.\n",
250 csk, csk->state, csk->flags, csk->tid);
251 csk->cpl_close = NULL;
252 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
253 INIT_TP_WR(req, tid);
254 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
255 req->rsvd = 0;
256
257 cxgbi_sock_skb_entail(csk, skb);
258 if (csk->state >= CTP_ESTABLISHED)
259 push_tx_frames(csk, 1);
260}
261
262static void abort_arp_failure(void *handle, struct sk_buff *skb)
263{
264 struct cxgbi_sock *csk = (struct cxgbi_sock *)handle;
265 struct cpl_abort_req *req;
266
267 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
268 "csk 0x%p,%u,0x%lx, tid %u, abort.\n",
269 csk, csk->state, csk->flags, csk->tid);
270 req = (struct cpl_abort_req *)skb->data;
271 req->cmd = CPL_ABORT_NO_RST;
272 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
273}
274
275static void send_abort_req(struct cxgbi_sock *csk)
276{
277 struct cpl_abort_req *req;
278 struct sk_buff *skb = csk->cpl_abort_req;
279
280 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev)
281 return;
282 cxgbi_sock_set_state(csk, CTP_ABORTING);
283 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
284 cxgbi_sock_purge_write_queue(csk);
285
286 csk->cpl_abort_req = NULL;
287 req = (struct cpl_abort_req *)skb->head;
288 set_queue(skb, CPL_PRIORITY_DATA, csk);
289 req->cmd = CPL_ABORT_SEND_RST;
290 t4_set_arp_err_handler(skb, csk, abort_arp_failure);
291 INIT_TP_WR(req, csk->tid);
292 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
293 req->rsvd0 = htonl(csk->snd_nxt);
294 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
295
296 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
297 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
298 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
299 req->rsvd1);
300
301 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
302}
303
304static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
305{
306 struct sk_buff *skb = csk->cpl_abort_rpl;
307 struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
308
309 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
310 "csk 0x%p,%u,0x%lx,%u, status %d.\n",
311 csk, csk->state, csk->flags, csk->tid, rst_status);
312
313 csk->cpl_abort_rpl = NULL;
314 set_queue(skb, CPL_PRIORITY_DATA, csk);
315 INIT_TP_WR(rpl, csk->tid);
316 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
317 rpl->cmd = rst_status;
318 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
319}
320
321/*
322 * CPL connection rx data ack: host ->
323 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
324 * credits sent.
325 */
326static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
327{
328 struct sk_buff *skb;
329 struct cpl_rx_data_ack *req;
330
331 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
332 "csk 0x%p,%u,0x%lx,%u, credit %u.\n",
333 csk, csk->state, csk->flags, csk->tid, credits);
334
335 skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
336 if (!skb) {
337 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
338 return 0;
339 }
340 req = (struct cpl_rx_data_ack *)skb->head;
341
342 set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
343 INIT_TP_WR(req, csk->tid);
344 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
345 csk->tid));
346 req->credit_dack = cpu_to_be32(RX_CREDITS(credits) | RX_FORCE_ACK(1));
347 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
348 return credits;
349}
350
351/*
352 * sgl_len - calculates the size of an SGL of the given capacity
353 * @n: the number of SGL entries
354 * Calculates the number of flits needed for a scatter/gather list that
355 * can hold the given number of entries.
356 */
357static inline unsigned int sgl_len(unsigned int n)
358{
359 n--;
360 return (3 * n) / 2 + (n & 1) + 2;
361}
362
363/*
364 * calc_tx_flits_ofld - calculate # of flits for an offload packet
365 * @skb: the packet
366 *
367 * Returns the number of flits needed for the given offload packet.
368 * These packets are already fully constructed and no additional headers
369 * will be added.
370 */
371static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
372{
373 unsigned int flits, cnt;
374
375 if (is_ofld_imm(skb))
376 return DIV_ROUND_UP(skb->len, 8);
377 flits = skb_transport_offset(skb) / 8;
378 cnt = skb_shinfo(skb)->nr_frags;
379 if (skb->tail != skb->transport_header)
380 cnt++;
381 return flits + sgl_len(cnt);
382}
383
384static inline void send_tx_flowc_wr(struct cxgbi_sock *csk)
385{
386 struct sk_buff *skb;
387 struct fw_flowc_wr *flowc;
388 int flowclen, i;
389
390 flowclen = 80;
391 skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
392 flowc = (struct fw_flowc_wr *)skb->head;
393 flowc->op_to_nparams =
394 htonl(FW_WR_OP(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS(8));
395 flowc->flowid_len16 =
396 htonl(FW_WR_LEN16(DIV_ROUND_UP(72, 16)) |
397 FW_WR_FLOWID(csk->tid));
398 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
399 flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
400 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
401 flowc->mnemval[1].val = htonl(csk->tx_chan);
402 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
403 flowc->mnemval[2].val = htonl(csk->tx_chan);
404 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
405 flowc->mnemval[3].val = htonl(csk->rss_qid);
406 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
407 flowc->mnemval[4].val = htonl(csk->snd_nxt);
408 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
409 flowc->mnemval[5].val = htonl(csk->rcv_nxt);
410 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
411 flowc->mnemval[6].val = htonl(cxgb4i_snd_win);
412 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
413 flowc->mnemval[7].val = htonl(csk->advmss);
414 flowc->mnemval[8].mnemonic = 0;
415 flowc->mnemval[8].val = 0;
416 for (i = 0; i < 9; i++) {
417 flowc->mnemval[i].r4[0] = 0;
418 flowc->mnemval[i].r4[1] = 0;
419 flowc->mnemval[i].r4[2] = 0;
420 }
421 set_queue(skb, CPL_PRIORITY_DATA, csk);
422
423 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
424 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
425 csk, csk->tid, 0, csk->tx_chan, csk->rss_qid,
426 csk->snd_nxt, csk->rcv_nxt, cxgb4i_snd_win,
427 csk->advmss);
428
429 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
430}
431
432static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
433 int dlen, int len, u32 credits, int compl)
434{
435 struct fw_ofld_tx_data_wr *req;
436 unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3;
437 unsigned int wr_ulp_mode = 0;
438
439 req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
440
441 if (is_ofld_imm(skb)) {
442 req->op_to_immdlen = htonl(FW_WR_OP(FW_OFLD_TX_DATA_WR) |
443 FW_WR_COMPL(1) |
444 FW_WR_IMMDLEN(dlen));
445 req->flowid_len16 = htonl(FW_WR_FLOWID(csk->tid) |
446 FW_WR_LEN16(credits));
447 } else {
448 req->op_to_immdlen =
449 cpu_to_be32(FW_WR_OP(FW_OFLD_TX_DATA_WR) |
450 FW_WR_COMPL(1) |
451 FW_WR_IMMDLEN(0));
452 req->flowid_len16 =
453 cpu_to_be32(FW_WR_FLOWID(csk->tid) |
454 FW_WR_LEN16(credits));
455 }
456 if (submode)
457 wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE(ULP2_MODE_ISCSI) |
458 FW_OFLD_TX_DATA_WR_ULPSUBMODE(submode);
459 req->tunnel_to_proxy = htonl(wr_ulp_mode) |
460 FW_OFLD_TX_DATA_WR_SHOVE(skb_peek(&csk->write_queue) ? 0 : 1);
461 req->plen = htonl(len);
462 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT))
463 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
464}
465
466static void arp_failure_skb_discard(void *handle, struct sk_buff *skb)
467{
468 kfree_skb(skb);
469}
470
471static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
472{
473 int total_size = 0;
474 struct sk_buff *skb;
475
476 if (unlikely(csk->state < CTP_ESTABLISHED ||
477 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
478 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK |
479 1 << CXGBI_DBG_PDU_TX,
480 "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
481 csk, csk->state, csk->flags, csk->tid);
482 return 0;
483 }
484
485 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
486 int dlen = skb->len;
487 int len = skb->len;
488 unsigned int credits_needed;
489
490 skb_reset_transport_header(skb);
491 if (is_ofld_imm(skb))
492 credits_needed = DIV_ROUND_UP(dlen +
493 sizeof(struct fw_ofld_tx_data_wr), 16);
494 else
495 credits_needed = DIV_ROUND_UP(8*calc_tx_flits_ofld(skb)
496 + sizeof(struct fw_ofld_tx_data_wr),
497 16);
498
499 if (csk->wr_cred < credits_needed) {
500 log_debug(1 << CXGBI_DBG_PDU_TX,
501 "csk 0x%p, skb %u/%u, wr %d < %u.\n",
502 csk, skb->len, skb->data_len,
503 credits_needed, csk->wr_cred);
504 break;
505 }
506 __skb_unlink(skb, &csk->write_queue);
507 set_queue(skb, CPL_PRIORITY_DATA, csk);
508 skb->csum = credits_needed;
509 csk->wr_cred -= credits_needed;
510 csk->wr_una_cred += credits_needed;
511 cxgbi_sock_enqueue_wr(csk, skb);
512
513 log_debug(1 << CXGBI_DBG_PDU_TX,
514 "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
515 csk, skb->len, skb->data_len, credits_needed,
516 csk->wr_cred, csk->wr_una_cred);
517
518 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
519 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
520 send_tx_flowc_wr(csk);
521 skb->csum += 5;
522 csk->wr_cred -= 5;
523 csk->wr_una_cred += 5;
524 }
525 len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
526 make_tx_data_wr(csk, skb, dlen, len, credits_needed,
527 req_completion);
528 csk->snd_nxt += len;
529 cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
530 }
531 total_size += skb->truesize;
532 t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard);
533
534 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
535 "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n",
536 csk, csk->state, csk->flags, csk->tid, skb, len);
537
538 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
539 }
540 return total_size;
541}
542
543static inline void free_atid(struct cxgbi_sock *csk)
544{
545 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
546
547 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
548 cxgb4_free_atid(lldi->tids, csk->atid);
549 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
550 cxgbi_sock_put(csk);
551 }
552}
553
554static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
555{
556 struct cxgbi_sock *csk;
557 struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data;
558 unsigned short tcp_opt = ntohs(req->tcp_opt);
559 unsigned int tid = GET_TID(req);
560 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
561 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
562 struct tid_info *t = lldi->tids;
563 u32 rcv_isn = be32_to_cpu(req->rcv_isn);
564
565 csk = lookup_atid(t, atid);
566 if (unlikely(!csk)) {
567 pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev);
568 goto rel_skb;
569 }
570
571 if (csk->atid != atid) {
572 pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n",
573 atid, csk, csk->state, csk->flags, csk->tid, csk->atid);
574 goto rel_skb;
575 }
576
577 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
578 "csk 0x%p,%u,0x%lx, tid %u, atid %u, rseq %u.\n",
579 csk, csk->state, csk->flags, tid, atid, rcv_isn);
580
581 cxgbi_sock_get(csk);
582 csk->tid = tid;
583 cxgb4_insert_tid(lldi->tids, csk, tid);
584 cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
585
586 free_atid(csk);
587
588 spin_lock_bh(&csk->lock);
589 if (unlikely(csk->state != CTP_ACTIVE_OPEN))
590 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
591 csk, csk->state, csk->flags, csk->tid);
592
593 if (csk->retry_timer.function) {
594 del_timer(&csk->retry_timer);
595 csk->retry_timer.function = NULL;
596 }
597
598 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
599 /*
600 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
601 * pass through opt0.
602 */
603 if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10))
604 csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10);
605
606 csk->advmss = lldi->mtus[GET_TCPOPT_MSS(tcp_opt)] - 40;
607 if (GET_TCPOPT_TSTAMP(tcp_opt))
608 csk->advmss -= 12;
609 if (csk->advmss < 128)
610 csk->advmss = 128;
611
612 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
613 "csk 0x%p, mss_idx %u, advmss %u.\n",
614 csk, GET_TCPOPT_MSS(tcp_opt), csk->advmss);
615
616 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
617
618 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
619 send_abort_req(csk);
620 else {
621 if (skb_queue_len(&csk->write_queue))
622 push_tx_frames(csk, 0);
623 cxgbi_conn_tx_open(csk);
624 }
625 spin_unlock_bh(&csk->lock);
626
627rel_skb:
628 __kfree_skb(skb);
629}
630
631static int act_open_rpl_status_to_errno(int status)
632{
633 switch (status) {
634 case CPL_ERR_CONN_RESET:
635 return -ECONNREFUSED;
636 case CPL_ERR_ARP_MISS:
637 return -EHOSTUNREACH;
638 case CPL_ERR_CONN_TIMEDOUT:
639 return -ETIMEDOUT;
640 case CPL_ERR_TCAM_FULL:
641 return -ENOMEM;
642 case CPL_ERR_CONN_EXIST:
643 return -EADDRINUSE;
644 default:
645 return -EIO;
646 }
647}
648
649static void csk_act_open_retry_timer(unsigned long data)
650{
651 struct sk_buff *skb;
652 struct cxgbi_sock *csk = (struct cxgbi_sock *)data;
653
654 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
655 "csk 0x%p,%u,0x%lx,%u.\n",
656 csk, csk->state, csk->flags, csk->tid);
657
658 cxgbi_sock_get(csk);
659 spin_lock_bh(&csk->lock);
660 skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC);
661 if (!skb)
662 cxgbi_sock_fail_act_open(csk, -ENOMEM);
663 else {
664 skb->sk = (struct sock *)csk;
665 t4_set_arp_err_handler(skb, csk,
666 cxgbi_sock_act_open_req_arp_failure);
667 send_act_open_req(csk, skb, csk->l2t);
668 }
669 spin_unlock_bh(&csk->lock);
670 cxgbi_sock_put(csk);
671}
672
673static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
674{
675 struct cxgbi_sock *csk;
676 struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data;
677 unsigned int tid = GET_TID(rpl);
678 unsigned int atid =
679 GET_TID_TID(GET_AOPEN_ATID(be32_to_cpu(rpl->atid_status)));
680 unsigned int status = GET_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
681 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
682 struct tid_info *t = lldi->tids;
683
684 csk = lookup_atid(t, atid);
685 if (unlikely(!csk)) {
686 pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid);
687 goto rel_skb;
688 }
689
690 pr_info("%pI4:%u-%pI4:%u, atid %u,%u, status %u, csk 0x%p,%u,0x%lx.\n",
691 &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
692 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port),
693 atid, tid, status, csk, csk->state, csk->flags);
694
695 if (status && status != CPL_ERR_TCAM_FULL &&
696 status != CPL_ERR_CONN_EXIST &&
697 status != CPL_ERR_ARP_MISS)
698 cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl));
699
700 cxgbi_sock_get(csk);
701 spin_lock_bh(&csk->lock);
702
703 if (status == CPL_ERR_CONN_EXIST &&
704 csk->retry_timer.function != csk_act_open_retry_timer) {
705 csk->retry_timer.function = csk_act_open_retry_timer;
706 mod_timer(&csk->retry_timer, jiffies + HZ / 2);
707 } else
708 cxgbi_sock_fail_act_open(csk,
709 act_open_rpl_status_to_errno(status));
710
711 spin_unlock_bh(&csk->lock);
712 cxgbi_sock_put(csk);
713rel_skb:
714 __kfree_skb(skb);
715}
716
717static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb)
718{
719 struct cxgbi_sock *csk;
720 struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data;
721 unsigned int tid = GET_TID(req);
722 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
723 struct tid_info *t = lldi->tids;
724
725 csk = lookup_tid(t, tid);
726 if (unlikely(!csk)) {
727 pr_err("can't find connection for tid %u.\n", tid);
728 goto rel_skb;
729 }
730 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
731 "csk 0x%p,%u,0x%lx,%u.\n",
732 csk, csk->state, csk->flags, csk->tid);
733 cxgbi_sock_rcv_peer_close(csk);
734rel_skb:
735 __kfree_skb(skb);
736}
737
738static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
739{
740 struct cxgbi_sock *csk;
741 struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data;
742 unsigned int tid = GET_TID(rpl);
743 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
744 struct tid_info *t = lldi->tids;
745
746 csk = lookup_tid(t, tid);
747 if (unlikely(!csk)) {
748 pr_err("can't find connection for tid %u.\n", tid);
749 goto rel_skb;
750 }
751 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
752 "csk 0x%p,%u,0x%lx,%u.\n",
753 csk, csk->state, csk->flags, csk->tid);
754 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
755rel_skb:
756 __kfree_skb(skb);
757}
758
759static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
760 int *need_rst)
761{
762 switch (abort_reason) {
763 case CPL_ERR_BAD_SYN: /* fall through */
764 case CPL_ERR_CONN_RESET:
765 return csk->state > CTP_ESTABLISHED ?
766 -EPIPE : -ECONNRESET;
767 case CPL_ERR_XMIT_TIMEDOUT:
768 case CPL_ERR_PERSIST_TIMEDOUT:
769 case CPL_ERR_FINWAIT2_TIMEDOUT:
770 case CPL_ERR_KEEPALIVE_TIMEDOUT:
771 return -ETIMEDOUT;
772 default:
773 return -EIO;
774 }
775}
776
777static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
778{
779 struct cxgbi_sock *csk;
780 struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data;
781 unsigned int tid = GET_TID(req);
782 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
783 struct tid_info *t = lldi->tids;
784 int rst_status = CPL_ABORT_NO_RST;
785
786 csk = lookup_tid(t, tid);
787 if (unlikely(!csk)) {
788 pr_err("can't find connection for tid %u.\n", tid);
789 goto rel_skb;
790 }
791
792 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
793 "csk 0x%p,%u,0x%lx, tid %u, status 0x%x.\n",
794 csk, csk->state, csk->flags, csk->tid, req->status);
795
796 if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
797 req->status == CPL_ERR_PERSIST_NEG_ADVICE)
798 goto rel_skb;
799
800 cxgbi_sock_get(csk);
801 spin_lock_bh(&csk->lock);
802
803 if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) {
804 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
805 cxgbi_sock_set_state(csk, CTP_ABORTING);
806 goto done;
807 }
808
809 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
810 send_abort_rpl(csk, rst_status);
811
812 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
813 csk->err = abort_status_to_errno(csk, req->status, &rst_status);
814 cxgbi_sock_closed(csk);
815 }
816done:
817 spin_unlock_bh(&csk->lock);
818 cxgbi_sock_put(csk);
819rel_skb:
820 __kfree_skb(skb);
821}
822
823static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
824{
825 struct cxgbi_sock *csk;
826 struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data;
827 unsigned int tid = GET_TID(rpl);
828 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
829 struct tid_info *t = lldi->tids;
830
831 csk = lookup_tid(t, tid);
832 if (!csk)
833 goto rel_skb;
834
835 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
836 "status 0x%x, csk 0x%p, s %u, 0x%lx.\n",
837 rpl->status, csk, csk ? csk->state : 0,
838 csk ? csk->flags : 0UL);
839
840 if (rpl->status == CPL_ERR_ABORT_FAILED)
841 goto rel_skb;
842
843 cxgbi_sock_rcv_abort_rpl(csk);
844rel_skb:
845 __kfree_skb(skb);
846}
847
848static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
849{
850 struct cxgbi_sock *csk;
851 struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
852 unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
853 unsigned int tid = GET_TID(cpl);
854 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
855 struct tid_info *t = lldi->tids;
856
857 csk = lookup_tid(t, tid);
858 if (unlikely(!csk)) {
859 pr_err("can't find conn. for tid %u.\n", tid);
860 goto rel_skb;
861 }
862
863 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
864 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
865 csk, csk->state, csk->flags, csk->tid, skb, skb->len,
866 pdu_len_ddp);
867
868 spin_lock_bh(&csk->lock);
869
870 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
871 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
872 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
873 csk, csk->state, csk->flags, csk->tid);
874 if (csk->state != CTP_ABORTING)
875 goto abort_conn;
876 else
877 goto discard;
878 }
879
880 cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq);
881 cxgbi_skcb_flags(skb) = 0;
882
883 skb_reset_transport_header(skb);
884 __skb_pull(skb, sizeof(*cpl));
885 __pskb_trim(skb, ntohs(cpl->len));
886
887 if (!csk->skb_ulp_lhdr) {
888 unsigned char *bhs;
889 unsigned int hlen, dlen;
890
891 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
892 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n",
893 csk, csk->state, csk->flags, csk->tid, skb);
894 csk->skb_ulp_lhdr = skb;
895 cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
896
897 if (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt) {
898 pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n",
899 csk->tid, cxgbi_skcb_tcp_seq(skb),
900 csk->rcv_nxt);
901 goto abort_conn;
902 }
903
904 bhs = skb->data;
905 hlen = ntohs(cpl->len);
906 dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF;
907
908 if ((hlen + dlen) != ISCSI_PDU_LEN(pdu_len_ddp) - 40) {
909 pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len "
910 "mismatch %u != %u + %u, seq 0x%x.\n",
911 csk->tid, ISCSI_PDU_LEN(pdu_len_ddp) - 40,
912 hlen, dlen, cxgbi_skcb_tcp_seq(skb));
913 goto abort_conn;
914 }
915
916 cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3);
917 if (dlen)
918 cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len;
919 csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb);
920
921 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
922 "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n",
923 csk, skb, *bhs, hlen, dlen,
924 ntohl(*((unsigned int *)(bhs + 16))),
925 ntohl(*((unsigned int *)(bhs + 24))));
926
927 } else {
928 struct sk_buff *lskb = csk->skb_ulp_lhdr;
929
930 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
931 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
932 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
933 csk, csk->state, csk->flags, skb, lskb);
934 }
935
936 __skb_queue_tail(&csk->receive_queue, skb);
937 spin_unlock_bh(&csk->lock);
938 return;
939
940abort_conn:
941 send_abort_req(csk);
942discard:
943 spin_unlock_bh(&csk->lock);
944rel_skb:
945 __kfree_skb(skb);
946}
947
948static void do_rx_data_ddp(struct cxgbi_device *cdev,
949 struct sk_buff *skb)
950{
951 struct cxgbi_sock *csk;
952 struct sk_buff *lskb;
953 struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data;
954 unsigned int tid = GET_TID(rpl);
955 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
956 struct tid_info *t = lldi->tids;
957 unsigned int status = ntohl(rpl->ddpvld);
958
959 csk = lookup_tid(t, tid);
960 if (unlikely(!csk)) {
961 pr_err("can't find connection for tid %u.\n", tid);
962 goto rel_skb;
963 }
964
965 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
966 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
967 csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr);
968
969 spin_lock_bh(&csk->lock);
970
971 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
972 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
973 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
974 csk, csk->state, csk->flags, csk->tid);
975 if (csk->state != CTP_ABORTING)
976 goto abort_conn;
977 else
978 goto discard;
979 }
980
981 if (!csk->skb_ulp_lhdr) {
982 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid);
983 goto abort_conn;
984 }
985
986 lskb = csk->skb_ulp_lhdr;
987 csk->skb_ulp_lhdr = NULL;
988
989 cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc);
990
991 if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb))
992 pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
993 csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb));
994
995 if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
996 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
997 csk, lskb, status, cxgbi_skcb_flags(lskb));
998 cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR);
999 }
1000 if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
1001 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
1002 csk, lskb, status, cxgbi_skcb_flags(lskb));
1003 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR);
1004 }
1005 if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
1006 log_debug(1 << CXGBI_DBG_PDU_RX,
1007 "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
1008 csk, lskb, status);
1009 cxgbi_skcb_set_flag(lskb, SKCBF_RX_PAD_ERR);
1010 }
1011 if ((status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
1012 !cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA)) {
1013 log_debug(1 << CXGBI_DBG_PDU_RX,
1014 "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
1015 csk, lskb, status);
1016 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA_DDPD);
1017 }
1018 log_debug(1 << CXGBI_DBG_PDU_RX,
1019 "csk 0x%p, lskb 0x%p, f 0x%lx.\n",
1020 csk, lskb, cxgbi_skcb_flags(lskb));
1021
1022 cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS);
1023 cxgbi_conn_pdu_ready(csk);
1024 spin_unlock_bh(&csk->lock);
1025 goto rel_skb;
1026
1027abort_conn:
1028 send_abort_req(csk);
1029discard:
1030 spin_unlock_bh(&csk->lock);
1031rel_skb:
1032 __kfree_skb(skb);
1033}
1034
1035static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
1036{
1037 struct cxgbi_sock *csk;
1038 struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data;
1039 unsigned int tid = GET_TID(rpl);
1040 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1041 struct tid_info *t = lldi->tids;
1042
1043 csk = lookup_tid(t, tid);
1044 if (unlikely(!csk))
1045 pr_err("can't find connection for tid %u.\n", tid);
1046 else {
1047 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1048 "csk 0x%p,%u,0x%lx,%u.\n",
1049 csk, csk->state, csk->flags, csk->tid);
1050 cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una),
1051 rpl->seq_vld);
1052 }
1053 __kfree_skb(skb);
1054}
1055
1056static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1057{
1058 struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
1059 unsigned int tid = GET_TID(rpl);
1060 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1061 struct tid_info *t = lldi->tids;
1062 struct cxgbi_sock *csk;
1063
1064 csk = lookup_tid(t, tid);
1065 if (!csk)
1066 pr_err("can't find conn. for tid %u.\n", tid);
1067
1068 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1069 "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
1070 csk, csk->state, csk->flags, csk->tid, rpl->status);
1071
1072 if (rpl->status != CPL_ERR_NONE)
1073 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
1074 csk, tid, rpl->status);
1075
1076 __kfree_skb(skb);
1077}
1078
1079static int alloc_cpls(struct cxgbi_sock *csk)
1080{
1081 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req),
1082 0, GFP_KERNEL);
1083 if (!csk->cpl_close)
1084 return -ENOMEM;
1085
1086 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req),
1087 0, GFP_KERNEL);
1088 if (!csk->cpl_abort_req)
1089 goto free_cpls;
1090
1091 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl),
1092 0, GFP_KERNEL);
1093 if (!csk->cpl_abort_rpl)
1094 goto free_cpls;
1095 return 0;
1096
1097free_cpls:
1098 cxgbi_sock_free_cpl_skbs(csk);
1099 return -ENOMEM;
1100}
1101
1102static inline void l2t_put(struct cxgbi_sock *csk)
1103{
1104 if (csk->l2t) {
1105 cxgb4_l2t_release(csk->l2t);
1106 csk->l2t = NULL;
1107 cxgbi_sock_put(csk);
1108 }
1109}
1110
1111static void release_offload_resources(struct cxgbi_sock *csk)
1112{
1113 struct cxgb4_lld_info *lldi;
1114
1115 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1116 "csk 0x%p,%u,0x%lx,%u.\n",
1117 csk, csk->state, csk->flags, csk->tid);
1118
1119 cxgbi_sock_free_cpl_skbs(csk);
1120 if (csk->wr_cred != csk->wr_max_cred) {
1121 cxgbi_sock_purge_wr_queue(csk);
1122 cxgbi_sock_reset_wr_list(csk);
1123 }
1124
1125 l2t_put(csk);
1126 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
1127 free_atid(csk);
1128 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
1129 lldi = cxgbi_cdev_priv(csk->cdev);
1130 cxgb4_remove_tid(lldi->tids, 0, csk->tid);
1131 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
1132 cxgbi_sock_put(csk);
1133 }
1134 csk->dst = NULL;
1135 csk->cdev = NULL;
1136}
1137
1138static int init_act_open(struct cxgbi_sock *csk)
1139{
1140 struct cxgbi_device *cdev = csk->cdev;
1141 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1142 struct net_device *ndev = cdev->ports[csk->port_id];
1143 struct port_info *pi = netdev_priv(ndev);
1144 struct sk_buff *skb = NULL;
1145 unsigned int step;
1146
1147 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1148 "csk 0x%p,%u,0x%lx,%u.\n",
1149 csk, csk->state, csk->flags, csk->tid);
1150
1151 csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
1152 if (csk->atid < 0) {
1153 pr_err("%s, NO atid available.\n", ndev->name);
1154 return -EINVAL;
1155 }
1156 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
1157 cxgbi_sock_get(csk);
1158
1159 csk->l2t = cxgb4_l2t_get(lldi->l2t, csk->dst->neighbour, ndev, 0);
1160 if (!csk->l2t) {
1161 pr_err("%s, cannot alloc l2t.\n", ndev->name);
1162 goto rel_resource;
1163 }
1164 cxgbi_sock_get(csk);
1165
1166 skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL);
1167 if (!skb)
1168 goto rel_resource;
1169 skb->sk = (struct sock *)csk;
1170 t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure);
1171
1172 if (!csk->mtu)
1173 csk->mtu = dst_mtu(csk->dst);
1174 cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
1175 csk->tx_chan = cxgb4_port_chan(ndev);
1176 /* SMT two entries per row */
1177 csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1;
1178 step = lldi->ntxq / lldi->nchan;
1179 csk->txq_idx = cxgb4_port_idx(ndev) * step;
1180 step = lldi->nrxq / lldi->nchan;
1181 csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step];
1182 csk->wr_max_cred = csk->wr_cred = lldi->wr_cred;
1183 csk->wr_una_cred = 0;
1184 cxgbi_sock_reset_wr_list(csk);
1185 csk->err = 0;
1186 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1187 "csk 0x%p,p%d,%s, %u,%u,%u, mss %u,%u, smac %u.\n",
1188 csk, pi->port_id, ndev->name, csk->tx_chan,
1189 csk->txq_idx, csk->rss_qid, csk->mtu, csk->mss_idx,
1190 csk->smac_idx);
1191
1192 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1193 send_act_open_req(csk, skb, csk->l2t);
1194 return 0;
1195
1196rel_resource:
1197 if (skb)
1198 __kfree_skb(skb);
1199 return -EINVAL;
1200}
1201
1202cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
1203 [CPL_ACT_ESTABLISH] = do_act_establish,
1204 [CPL_ACT_OPEN_RPL] = do_act_open_rpl,
1205 [CPL_PEER_CLOSE] = do_peer_close,
1206 [CPL_ABORT_REQ_RSS] = do_abort_req_rss,
1207 [CPL_ABORT_RPL_RSS] = do_abort_rpl_rss,
1208 [CPL_CLOSE_CON_RPL] = do_close_con_rpl,
1209 [CPL_FW4_ACK] = do_fw4_ack,
1210 [CPL_ISCSI_HDR] = do_rx_iscsi_hdr,
1211 [CPL_SET_TCB_RPL] = do_set_tcb_rpl,
1212 [CPL_RX_DATA_DDP] = do_rx_data_ddp,
1213};
1214
1215int cxgb4i_ofld_init(struct cxgbi_device *cdev)
1216{
1217 int rc;
1218
1219 if (cxgb4i_max_connect > CXGB4I_MAX_CONN)
1220 cxgb4i_max_connect = CXGB4I_MAX_CONN;
1221
1222 rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base,
1223 cxgb4i_max_connect);
1224 if (rc < 0)
1225 return rc;
1226
1227 cdev->csk_release_offload_resources = release_offload_resources;
1228 cdev->csk_push_tx_frames = push_tx_frames;
1229 cdev->csk_send_abort_req = send_abort_req;
1230 cdev->csk_send_close_req = send_close_req;
1231 cdev->csk_send_rx_credits = send_rx_credits;
1232 cdev->csk_alloc_cpls = alloc_cpls;
1233 cdev->csk_init_act_open = init_act_open;
1234
1235 pr_info("cdev 0x%p, offload up, added.\n", cdev);
1236 return 0;
1237}
1238
1239/*
1240 * functions to program the pagepod in h/w
1241 */
1242#define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */
1243static inline void ulp_mem_io_set_hdr(struct ulp_mem_io *req,
1244 unsigned int wr_len, unsigned int dlen,
1245 unsigned int pm_addr)
1246{
1247 struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1);
1248
1249 INIT_ULPTX_WR(req, wr_len, 0, 0);
1250 req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) | (1 << 23));
1251 req->dlen = htonl(ULP_MEMIO_DATA_LEN(dlen >> 5));
1252 req->lock_addr = htonl(ULP_MEMIO_ADDR(pm_addr >> 5));
1253 req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
1254
1255 idata->cmd_more = htonl(ULPTX_CMD(ULP_TX_SC_IMM));
1256 idata->len = htonl(dlen);
1257}
1258
1259static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id,
1260 struct cxgbi_pagepod_hdr *hdr, unsigned int idx,
1261 unsigned int npods,
1262 struct cxgbi_gather_list *gl,
1263 unsigned int gl_pidx)
1264{
1265 struct cxgbi_ddp_info *ddp = cdev->ddp;
1266 struct sk_buff *skb;
1267 struct ulp_mem_io *req;
1268 struct ulptx_idata *idata;
1269 struct cxgbi_pagepod *ppod;
1270 unsigned int pm_addr = idx * PPOD_SIZE + ddp->llimit;
1271 unsigned int dlen = PPOD_SIZE * npods;
1272 unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
1273 sizeof(struct ulptx_idata) + dlen, 16);
1274 unsigned int i;
1275
1276 skb = alloc_wr(wr_len, 0, GFP_ATOMIC);
1277 if (!skb) {
1278 pr_err("cdev 0x%p, idx %u, npods %u, OOM.\n",
1279 cdev, idx, npods);
1280 return -ENOMEM;
1281 }
1282 req = (struct ulp_mem_io *)skb->head;
1283 set_queue(skb, CPL_PRIORITY_CONTROL, NULL);
1284
1285 ulp_mem_io_set_hdr(req, wr_len, dlen, pm_addr);
1286 idata = (struct ulptx_idata *)(req + 1);
1287 ppod = (struct cxgbi_pagepod *)(idata + 1);
1288
1289 for (i = 0; i < npods; i++, ppod++, gl_pidx += PPOD_PAGES_MAX) {
1290 if (!hdr && !gl)
1291 cxgbi_ddp_ppod_clear(ppod);
1292 else
1293 cxgbi_ddp_ppod_set(ppod, hdr, gl, gl_pidx);
1294 }
1295
1296 cxgb4_ofld_send(cdev->ports[port_id], skb);
1297 return 0;
1298}
1299
1300static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr,
1301 unsigned int idx, unsigned int npods,
1302 struct cxgbi_gather_list *gl)
1303{
1304 unsigned int i, cnt;
1305 int err = 0;
1306
1307 for (i = 0; i < npods; i += cnt, idx += cnt) {
1308 cnt = npods - i;
1309 if (cnt > ULPMEM_IDATA_MAX_NPPODS)
1310 cnt = ULPMEM_IDATA_MAX_NPPODS;
1311 err = ddp_ppod_write_idata(csk->cdev, csk->port_id, hdr,
1312 idx, cnt, gl, 4 * i);
1313 if (err < 0)
1314 break;
1315 }
1316 return err;
1317}
1318
1319static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag,
1320 unsigned int idx, unsigned int npods)
1321{
1322 unsigned int i, cnt;
1323 int err;
1324
1325 for (i = 0; i < npods; i += cnt, idx += cnt) {
1326 cnt = npods - i;
1327 if (cnt > ULPMEM_IDATA_MAX_NPPODS)
1328 cnt = ULPMEM_IDATA_MAX_NPPODS;
1329 err = ddp_ppod_write_idata(chba->cdev, chba->port_id, NULL,
1330 idx, cnt, NULL, 0);
1331 if (err < 0)
1332 break;
1333 }
1334}
1335
1336static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
1337 int pg_idx, bool reply)
1338{
1339 struct sk_buff *skb;
1340 struct cpl_set_tcb_field *req;
1341
1342 if (!pg_idx || pg_idx >= DDP_PGIDX_MAX)
1343 return 0;
1344
1345 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
1346 if (!skb)
1347 return -ENOMEM;
1348
1349 /* set up ulp page size */
1350 req = (struct cpl_set_tcb_field *)skb->head;
1351 INIT_TP_WR(req, csk->tid);
1352 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1353 req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
1354 req->word_cookie = htons(0);
1355 req->mask = cpu_to_be64(0x3 << 8);
1356 req->val = cpu_to_be64(pg_idx << 8);
1357 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
1358
1359 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1360 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
1361
1362 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
1363 return 0;
1364}
1365
1366static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
1367 int hcrc, int dcrc, int reply)
1368{
1369 struct sk_buff *skb;
1370 struct cpl_set_tcb_field *req;
1371
1372 if (!hcrc && !dcrc)
1373 return 0;
1374
1375 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
1376 if (!skb)
1377 return -ENOMEM;
1378
1379 csk->hcrc_len = (hcrc ? 4 : 0);
1380 csk->dcrc_len = (dcrc ? 4 : 0);
1381 /* set up ulp submode */
1382 req = (struct cpl_set_tcb_field *)skb->head;
1383 INIT_TP_WR(req, tid);
1384 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1385 req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
1386 req->word_cookie = htons(0);
1387 req->mask = cpu_to_be64(0x3 << 4);
1388 req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
1389 (dcrc ? ULP_CRC_DATA : 0)) << 4);
1390 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
1391
1392 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1393 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
1394
1395 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
1396 return 0;
1397}
1398
1399static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
1400{
1401 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1402 struct cxgbi_ddp_info *ddp = cdev->ddp;
1403 unsigned int tagmask, pgsz_factor[4];
1404 int err;
1405
1406 if (ddp) {
1407 kref_get(&ddp->refcnt);
1408 pr_warn("cdev 0x%p, ddp 0x%p already set up.\n",
1409 cdev, cdev->ddp);
1410 return -EALREADY;
1411 }
1412
1413 err = cxgbi_ddp_init(cdev, lldi->vr->iscsi.start,
1414 lldi->vr->iscsi.start + lldi->vr->iscsi.size - 1,
1415 lldi->iscsi_iolen, lldi->iscsi_iolen);
1416 if (err < 0)
1417 return err;
1418
1419 ddp = cdev->ddp;
1420
1421 tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
1422 cxgbi_ddp_page_size_factor(pgsz_factor);
1423 cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor);
1424
1425 cdev->csk_ddp_free_gl_skb = NULL;
1426 cdev->csk_ddp_alloc_gl_skb = NULL;
1427 cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
1428 cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
1429 cdev->csk_ddp_set = ddp_set_map;
1430 cdev->csk_ddp_clear = ddp_clear_map;
1431
1432 pr_info("cxgb4i 0x%p tag: sw %u, rsvd %u,%u, mask 0x%x.\n",
1433 cdev, cdev->tag_format.sw_bits, cdev->tag_format.rsvd_bits,
1434 cdev->tag_format.rsvd_shift, cdev->tag_format.rsvd_mask);
1435 pr_info("cxgb4i 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, "
1436 " %u/%u.\n",
1437 cdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask,
1438 ddp->rsvd_tag_mask, ddp->max_txsz, lldi->iscsi_iolen,
1439 ddp->max_rxsz, lldi->iscsi_iolen);
1440 pr_info("cxgb4i 0x%p max payload size: %u/%u, %u/%u.\n",
1441 cdev, cdev->tx_max_size, ddp->max_txsz, cdev->rx_max_size,
1442 ddp->max_rxsz);
1443 return 0;
1444}
1445
1446static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
1447{
1448 struct cxgbi_device *cdev;
1449 struct port_info *pi;
1450 int i, rc;
1451
1452 cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports);
1453 if (!cdev) {
1454 pr_info("t4 device 0x%p, register failed.\n", lldi);
1455 return NULL;
1456 }
1457 pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n",
1458 cdev, lldi->adapter_type, lldi->nports,
1459 lldi->ports[0]->name, lldi->nchan, lldi->ntxq,
1460 lldi->nrxq, lldi->wr_cred);
1461 for (i = 0; i < lldi->nrxq; i++)
1462 log_debug(1 << CXGBI_DBG_DEV,
1463 "t4 0x%p, rxq id #%d: %u.\n",
1464 cdev, i, lldi->rxq_ids[i]);
1465
1466 memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi));
1467 cdev->flags = CXGBI_FLAG_DEV_T4;
1468 cdev->pdev = lldi->pdev;
1469 cdev->ports = lldi->ports;
1470 cdev->nports = lldi->nports;
1471 cdev->mtus = lldi->mtus;
1472 cdev->nmtus = NMTUS;
1473 cdev->snd_win = cxgb4i_snd_win;
1474 cdev->rcv_win = cxgb4i_rcv_win;
1475 cdev->rx_credit_thres = cxgb4i_rx_credit_thres;
1476 cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
1477 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
1478 cdev->itp = &cxgb4i_iscsi_transport;
1479
1480 cdev->pfvf = FW_VIID_PFN_GET(cxgb4_port_viid(lldi->ports[0])) << 8;
1481 pr_info("cdev 0x%p,%s, pfvf %u.\n",
1482 cdev, lldi->ports[0]->name, cdev->pfvf);
1483
1484 rc = cxgb4i_ddp_init(cdev);
1485 if (rc) {
1486 pr_info("t4 0x%p ddp init failed.\n", cdev);
1487 goto err_out;
1488 }
1489 rc = cxgb4i_ofld_init(cdev);
1490 if (rc) {
1491 pr_info("t4 0x%p ofld init failed.\n", cdev);
1492 goto err_out;
1493 }
1494
1495 rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, CXGBI_MAX_CONN,
1496 &cxgb4i_host_template, cxgb4i_stt);
1497 if (rc)
1498 goto err_out;
1499
1500 for (i = 0; i < cdev->nports; i++) {
1501 pi = netdev_priv(lldi->ports[i]);
1502 cdev->hbas[i]->port_id = pi->port_id;
1503 }
1504 return cdev;
1505
1506err_out:
1507 cxgbi_device_unregister(cdev);
1508 return ERR_PTR(-ENOMEM);
1509}
1510
1511#define RX_PULL_LEN 128
1512static int t4_uld_rx_handler(void *handle, const __be64 *rsp,
1513 const struct pkt_gl *pgl)
1514{
1515 const struct cpl_act_establish *rpl;
1516 struct sk_buff *skb;
1517 unsigned int opc;
1518 struct cxgbi_device *cdev = handle;
1519
1520 if (pgl == NULL) {
1521 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
1522
1523 skb = alloc_wr(len, 0, GFP_ATOMIC);
1524 if (!skb)
1525 goto nomem;
1526 skb_copy_to_linear_data(skb, &rsp[1], len);
1527 } else {
1528 if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) {
1529 pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
1530 pgl->va, be64_to_cpu(*rsp),
1531 be64_to_cpu(*(u64 *)pgl->va),
1532 pgl->tot_len);
1533 return 0;
1534 }
1535 skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN);
1536 if (unlikely(!skb))
1537 goto nomem;
1538 }
1539
1540 rpl = (struct cpl_act_establish *)skb->data;
1541 opc = rpl->ot.opcode;
1542 log_debug(1 << CXGBI_DBG_TOE,
1543 "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
1544 cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb);
1545 if (cxgb4i_cplhandlers[opc])
1546 cxgb4i_cplhandlers[opc](cdev, skb);
1547 else {
1548 pr_err("No handler for opcode 0x%x.\n", opc);
1549 __kfree_skb(skb);
1550 }
1551 return 0;
1552nomem:
1553 log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n");
1554 return 1;
1555}
1556
1557static int t4_uld_state_change(void *handle, enum cxgb4_state state)
1558{
1559 struct cxgbi_device *cdev = handle;
1560
1561 switch (state) {
1562 case CXGB4_STATE_UP:
1563 pr_info("cdev 0x%p, UP.\n", cdev);
1564 /* re-initialize */
1565 break;
1566 case CXGB4_STATE_START_RECOVERY:
1567 pr_info("cdev 0x%p, RECOVERY.\n", cdev);
1568 /* close all connections */
1569 break;
1570 case CXGB4_STATE_DOWN:
1571 pr_info("cdev 0x%p, DOWN.\n", cdev);
1572 break;
1573 case CXGB4_STATE_DETACH:
1574 pr_info("cdev 0x%p, DETACH.\n", cdev);
1575 break;
1576 default:
1577 pr_info("cdev 0x%p, unknown state %d.\n", cdev, state);
1578 break;
1579 }
1580 return 0;
1581}
1582
1583static int __init cxgb4i_init_module(void)
1584{
1585 int rc;
1586
1587 printk(KERN_INFO "%s", version);
1588
1589 rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt);
1590 if (rc < 0)
1591 return rc;
1592 cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
1593 return 0;
1594}
1595
1596static void __exit cxgb4i_exit_module(void)
1597{
1598 cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
1599 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
1600 cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);
1601}
1602
1603module_init(cxgb4i_init_module);
1604module_exit(cxgb4i_exit_module);
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h
new file mode 100644
index 000000000000..1096026ba241
--- /dev/null
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h
@@ -0,0 +1,43 @@
1/*
2 * cxgb4i.h: Chelsio T4 iSCSI driver.
3 *
4 * Copyright (c) 2010 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 * Written by: Rakesh Ranjan (rranjan@chelsio.com)
12 */
13
14#ifndef __CXGB4I_H__
15#define __CXGB4I_H__
16
17#define CXGB4I_SCSI_HOST_QDEPTH 1024
18#define CXGB4I_MAX_CONN 16384
19#define CXGB4I_MAX_TARGET CXGB4I_MAX_CONN
20#define CXGB4I_MAX_LUN 0x1000
21
22/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
23#define CXGB4I_TX_HEADER_LEN \
24 (sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr))
25
26struct ulptx_idata {
27 __be32 cmd_more;
28 __be32 len;
29};
30
31struct cpl_rx_data_ddp {
32 union opcode_tid ot;
33 __be16 urg;
34 __be16 len;
35 __be32 seq;
36 union {
37 __be32 nxt_seq;
38 __be32 ddp_report;
39 };
40 __be32 ulp_crc;
41 __be32 ddpvld;
42};
43#endif /* __CXGB4I_H__ */
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
new file mode 100644
index 000000000000..be5661707dfa
--- /dev/null
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -0,0 +1,2612 @@
1/*
2 * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver.
3 *
4 * Copyright (c) 2010 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 * Written by: Rakesh Ranjan (rranjan@chelsio.com)
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
15
16#include <linux/skbuff.h>
17#include <linux/crypto.h>
18#include <linux/scatterlist.h>
19#include <linux/pci.h>
20#include <scsi/scsi.h>
21#include <scsi/scsi_cmnd.h>
22#include <scsi/scsi_host.h>
23#include <linux/if_vlan.h>
24#include <linux/inet.h>
25#include <net/dst.h>
26#include <net/route.h>
27#include <linux/inetdevice.h> /* ip_dev_find */
28#include <net/tcp.h>
29
30static unsigned int dbg_level;
31
32#include "libcxgbi.h"
33
34#define DRV_MODULE_NAME "libcxgbi"
35#define DRV_MODULE_DESC "Chelsio iSCSI driver library"
36#define DRV_MODULE_VERSION "0.9.0"
37#define DRV_MODULE_RELDATE "Jun. 2010"
38
39MODULE_AUTHOR("Chelsio Communications, Inc.");
40MODULE_DESCRIPTION(DRV_MODULE_DESC);
41MODULE_VERSION(DRV_MODULE_VERSION);
42MODULE_LICENSE("GPL");
43
44module_param(dbg_level, uint, 0644);
45MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)");
46
47
48/*
49 * cxgbi device management
50 * maintains a list of the cxgbi devices
51 */
52static LIST_HEAD(cdev_list);
53static DEFINE_MUTEX(cdev_mutex);
54
55int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base,
56 unsigned int max_conn)
57{
58 struct cxgbi_ports_map *pmap = &cdev->pmap;
59
60 pmap->port_csk = cxgbi_alloc_big_mem(max_conn *
61 sizeof(struct cxgbi_sock *),
62 GFP_KERNEL);
63 if (!pmap->port_csk) {
64 pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev, max_conn);
65 return -ENOMEM;
66 }
67
68 pmap->max_connect = max_conn;
69 pmap->sport_base = base;
70 spin_lock_init(&pmap->lock);
71 return 0;
72}
73EXPORT_SYMBOL_GPL(cxgbi_device_portmap_create);
74
75void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev)
76{
77 struct cxgbi_ports_map *pmap = &cdev->pmap;
78 struct cxgbi_sock *csk;
79 int i;
80
81 for (i = 0; i < pmap->max_connect; i++) {
82 if (pmap->port_csk[i]) {
83 csk = pmap->port_csk[i];
84 pmap->port_csk[i] = NULL;
85 log_debug(1 << CXGBI_DBG_SOCK,
86 "csk 0x%p, cdev 0x%p, offload down.\n",
87 csk, cdev);
88 spin_lock_bh(&csk->lock);
89 cxgbi_sock_set_flag(csk, CTPF_OFFLOAD_DOWN);
90 cxgbi_sock_closed(csk);
91 spin_unlock_bh(&csk->lock);
92 cxgbi_sock_put(csk);
93 }
94 }
95}
96EXPORT_SYMBOL_GPL(cxgbi_device_portmap_cleanup);
97
98static inline void cxgbi_device_destroy(struct cxgbi_device *cdev)
99{
100 log_debug(1 << CXGBI_DBG_DEV,
101 "cdev 0x%p, p# %u.\n", cdev, cdev->nports);
102 cxgbi_hbas_remove(cdev);
103 cxgbi_device_portmap_cleanup(cdev);
104 if (cdev->dev_ddp_cleanup)
105 cdev->dev_ddp_cleanup(cdev);
106 else
107 cxgbi_ddp_cleanup(cdev);
108 if (cdev->ddp)
109 cxgbi_ddp_cleanup(cdev);
110 if (cdev->pmap.max_connect)
111 cxgbi_free_big_mem(cdev->pmap.port_csk);
112 kfree(cdev);
113}
114
115struct cxgbi_device *cxgbi_device_register(unsigned int extra,
116 unsigned int nports)
117{
118 struct cxgbi_device *cdev;
119
120 cdev = kzalloc(sizeof(*cdev) + extra + nports *
121 (sizeof(struct cxgbi_hba *) +
122 sizeof(struct net_device *)),
123 GFP_KERNEL);
124 if (!cdev) {
125 pr_warn("nport %d, OOM.\n", nports);
126 return NULL;
127 }
128 cdev->ports = (struct net_device **)(cdev + 1);
129 cdev->hbas = (struct cxgbi_hba **)(((char*)cdev->ports) + nports *
130 sizeof(struct net_device *));
131 if (extra)
132 cdev->dd_data = ((char *)cdev->hbas) +
133 nports * sizeof(struct cxgbi_hba *);
134 spin_lock_init(&cdev->pmap.lock);
135
136 mutex_lock(&cdev_mutex);
137 list_add_tail(&cdev->list_head, &cdev_list);
138 mutex_unlock(&cdev_mutex);
139
140 log_debug(1 << CXGBI_DBG_DEV,
141 "cdev 0x%p, p# %u.\n", cdev, nports);
142 return cdev;
143}
144EXPORT_SYMBOL_GPL(cxgbi_device_register);
145
146void cxgbi_device_unregister(struct cxgbi_device *cdev)
147{
148 log_debug(1 << CXGBI_DBG_DEV,
149 "cdev 0x%p, p# %u,%s.\n",
150 cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : "");
151 mutex_lock(&cdev_mutex);
152 list_del(&cdev->list_head);
153 mutex_unlock(&cdev_mutex);
154 cxgbi_device_destroy(cdev);
155}
156EXPORT_SYMBOL_GPL(cxgbi_device_unregister);
157
158void cxgbi_device_unregister_all(unsigned int flag)
159{
160 struct cxgbi_device *cdev, *tmp;
161
162 mutex_lock(&cdev_mutex);
163 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
164 if ((cdev->flags & flag) == flag) {
165 log_debug(1 << CXGBI_DBG_DEV,
166 "cdev 0x%p, p# %u,%s.\n",
167 cdev, cdev->nports, cdev->nports ?
168 cdev->ports[0]->name : "");
169 list_del(&cdev->list_head);
170 cxgbi_device_destroy(cdev);
171 }
172 }
173 mutex_unlock(&cdev_mutex);
174}
175EXPORT_SYMBOL_GPL(cxgbi_device_unregister_all);
176
177struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev)
178{
179 struct cxgbi_device *cdev, *tmp;
180
181 mutex_lock(&cdev_mutex);
182 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
183 if (cdev->lldev == lldev) {
184 mutex_unlock(&cdev_mutex);
185 return cdev;
186 }
187 }
188 mutex_unlock(&cdev_mutex);
189 log_debug(1 << CXGBI_DBG_DEV,
190 "lldev 0x%p, NO match found.\n", lldev);
191 return NULL;
192}
193EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev);
194
195static struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev,
196 int *port)
197{
198 struct net_device *vdev = NULL;
199 struct cxgbi_device *cdev, *tmp;
200 int i;
201
202 if (ndev->priv_flags & IFF_802_1Q_VLAN) {
203 vdev = ndev;
204 ndev = vlan_dev_real_dev(ndev);
205 log_debug(1 << CXGBI_DBG_DEV,
206 "vlan dev %s -> %s.\n", vdev->name, ndev->name);
207 }
208
209 mutex_lock(&cdev_mutex);
210 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
211 for (i = 0; i < cdev->nports; i++) {
212 if (ndev == cdev->ports[i]) {
213 cdev->hbas[i]->vdev = vdev;
214 mutex_unlock(&cdev_mutex);
215 if (port)
216 *port = i;
217 return cdev;
218 }
219 }
220 }
221 mutex_unlock(&cdev_mutex);
222 log_debug(1 << CXGBI_DBG_DEV,
223 "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name);
224 return NULL;
225}
226
227void cxgbi_hbas_remove(struct cxgbi_device *cdev)
228{
229 int i;
230 struct cxgbi_hba *chba;
231
232 log_debug(1 << CXGBI_DBG_DEV,
233 "cdev 0x%p, p#%u.\n", cdev, cdev->nports);
234
235 for (i = 0; i < cdev->nports; i++) {
236 chba = cdev->hbas[i];
237 if (chba) {
238 cdev->hbas[i] = NULL;
239 iscsi_host_remove(chba->shost);
240 pci_dev_put(cdev->pdev);
241 iscsi_host_free(chba->shost);
242 }
243 }
244}
245EXPORT_SYMBOL_GPL(cxgbi_hbas_remove);
246
247int cxgbi_hbas_add(struct cxgbi_device *cdev, unsigned int max_lun,
248 unsigned int max_id, struct scsi_host_template *sht,
249 struct scsi_transport_template *stt)
250{
251 struct cxgbi_hba *chba;
252 struct Scsi_Host *shost;
253 int i, err;
254
255 log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p#%u.\n", cdev, cdev->nports);
256
257 for (i = 0; i < cdev->nports; i++) {
258 shost = iscsi_host_alloc(sht, sizeof(*chba), 1);
259 if (!shost) {
260 pr_info("0x%p, p%d, %s, host alloc failed.\n",
261 cdev, i, cdev->ports[i]->name);
262 err = -ENOMEM;
263 goto err_out;
264 }
265
266 shost->transportt = stt;
267 shost->max_lun = max_lun;
268 shost->max_id = max_id;
269 shost->max_channel = 0;
270 shost->max_cmd_len = 16;
271
272 chba = iscsi_host_priv(shost);
273 chba->cdev = cdev;
274 chba->ndev = cdev->ports[i];
275 chba->shost = shost;
276
277 log_debug(1 << CXGBI_DBG_DEV,
278 "cdev 0x%p, p#%d %s: chba 0x%p.\n",
279 cdev, i, cdev->ports[i]->name, chba);
280
281 pci_dev_get(cdev->pdev);
282 err = iscsi_host_add(shost, &cdev->pdev->dev);
283 if (err) {
284 pr_info("cdev 0x%p, p#%d %s, host add failed.\n",
285 cdev, i, cdev->ports[i]->name);
286 pci_dev_put(cdev->pdev);
287 scsi_host_put(shost);
288 goto err_out;
289 }
290
291 cdev->hbas[i] = chba;
292 }
293
294 return 0;
295
296err_out:
297 cxgbi_hbas_remove(cdev);
298 return err;
299}
300EXPORT_SYMBOL_GPL(cxgbi_hbas_add);
301
302/*
303 * iSCSI offload
304 *
305 * - source port management
306 * To find a free source port in the port allocation map we use a very simple
307 * rotor scheme to look for the next free port.
308 *
309 * If a source port has been specified make sure that it doesn't collide with
310 * our normal source port allocation map. If it's outside the range of our
311 * allocation/deallocation scheme just let them use it.
312 *
313 * If the source port is outside our allocation range, the caller is
314 * responsible for keeping track of their port usage.
315 */
316static int sock_get_port(struct cxgbi_sock *csk)
317{
318 struct cxgbi_device *cdev = csk->cdev;
319 struct cxgbi_ports_map *pmap = &cdev->pmap;
320 unsigned int start;
321 int idx;
322
323 if (!pmap->max_connect) {
324 pr_err("cdev 0x%p, p#%u %s, NO port map.\n",
325 cdev, csk->port_id, cdev->ports[csk->port_id]->name);
326 return -EADDRNOTAVAIL;
327 }
328
329 if (csk->saddr.sin_port) {
330 pr_err("source port NON-ZERO %u.\n",
331 ntohs(csk->saddr.sin_port));
332 return -EADDRINUSE;
333 }
334
335 spin_lock_bh(&pmap->lock);
336 if (pmap->used >= pmap->max_connect) {
337 spin_unlock_bh(&pmap->lock);
338 pr_info("cdev 0x%p, p#%u %s, ALL ports used.\n",
339 cdev, csk->port_id, cdev->ports[csk->port_id]->name);
340 return -EADDRNOTAVAIL;
341 }
342
343 start = idx = pmap->next;
344 do {
345 if (++idx >= pmap->max_connect)
346 idx = 0;
347 if (!pmap->port_csk[idx]) {
348 pmap->used++;
349 csk->saddr.sin_port =
350 htons(pmap->sport_base + idx);
351 pmap->next = idx;
352 pmap->port_csk[idx] = csk;
353 spin_unlock_bh(&pmap->lock);
354 cxgbi_sock_get(csk);
355 log_debug(1 << CXGBI_DBG_SOCK,
356 "cdev 0x%p, p#%u %s, p %u, %u.\n",
357 cdev, csk->port_id,
358 cdev->ports[csk->port_id]->name,
359 pmap->sport_base + idx, pmap->next);
360 return 0;
361 }
362 } while (idx != start);
363 spin_unlock_bh(&pmap->lock);
364
365 /* should not happen */
366 pr_warn("cdev 0x%p, p#%u %s, next %u?\n",
367 cdev, csk->port_id, cdev->ports[csk->port_id]->name,
368 pmap->next);
369 return -EADDRNOTAVAIL;
370}
371
372static void sock_put_port(struct cxgbi_sock *csk)
373{
374 struct cxgbi_device *cdev = csk->cdev;
375 struct cxgbi_ports_map *pmap = &cdev->pmap;
376
377 if (csk->saddr.sin_port) {
378 int idx = ntohs(csk->saddr.sin_port) - pmap->sport_base;
379
380 csk->saddr.sin_port = 0;
381 if (idx < 0 || idx >= pmap->max_connect) {
382 pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n",
383 cdev, csk->port_id,
384 cdev->ports[csk->port_id]->name,
385 ntohs(csk->saddr.sin_port));
386 return;
387 }
388
389 spin_lock_bh(&pmap->lock);
390 pmap->port_csk[idx] = NULL;
391 pmap->used--;
392 spin_unlock_bh(&pmap->lock);
393
394 log_debug(1 << CXGBI_DBG_SOCK,
395 "cdev 0x%p, p#%u %s, release %u.\n",
396 cdev, csk->port_id, cdev->ports[csk->port_id]->name,
397 pmap->sport_base + idx);
398
399 cxgbi_sock_put(csk);
400 }
401}
402
403/*
404 * iscsi tcp connection
405 */
406void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *csk)
407{
408 if (csk->cpl_close) {
409 kfree_skb(csk->cpl_close);
410 csk->cpl_close = NULL;
411 }
412 if (csk->cpl_abort_req) {
413 kfree_skb(csk->cpl_abort_req);
414 csk->cpl_abort_req = NULL;
415 }
416 if (csk->cpl_abort_rpl) {
417 kfree_skb(csk->cpl_abort_rpl);
418 csk->cpl_abort_rpl = NULL;
419 }
420}
421EXPORT_SYMBOL_GPL(cxgbi_sock_free_cpl_skbs);
422
423static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
424{
425 struct cxgbi_sock *csk = kzalloc(sizeof(*csk), GFP_NOIO);
426
427 if (!csk) {
428 pr_info("alloc csk %zu failed.\n", sizeof(*csk));
429 return NULL;
430 }
431
432 if (cdev->csk_alloc_cpls(csk) < 0) {
433 pr_info("csk 0x%p, alloc cpls failed.\n", csk);
434 kfree(csk);
435 return NULL;
436 }
437
438 spin_lock_init(&csk->lock);
439 kref_init(&csk->refcnt);
440 skb_queue_head_init(&csk->receive_queue);
441 skb_queue_head_init(&csk->write_queue);
442 setup_timer(&csk->retry_timer, NULL, (unsigned long)csk);
443 rwlock_init(&csk->callback_lock);
444 csk->cdev = cdev;
445 csk->flags = 0;
446 cxgbi_sock_set_state(csk, CTP_CLOSED);
447
448 log_debug(1 << CXGBI_DBG_SOCK, "cdev 0x%p, new csk 0x%p.\n", cdev, csk);
449
450 return csk;
451}
452
453static struct rtable *find_route_ipv4(__be32 saddr, __be32 daddr,
454 __be16 sport, __be16 dport, u8 tos)
455{
456 struct rtable *rt;
457 struct flowi fl = {
458 .oif = 0,
459 .nl_u = {
460 .ip4_u = {
461 .daddr = daddr,
462 .saddr = saddr,
463 .tos = tos }
464 },
465 .proto = IPPROTO_TCP,
466 .uli_u = {
467 .ports = {
468 .sport = sport,
469 .dport = dport }
470 }
471 };
472
473 if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
474 return NULL;
475
476 return rt;
477}
478
479static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
480{
481 struct sockaddr_in *daddr = (struct sockaddr_in *)dst_addr;
482 struct dst_entry *dst;
483 struct net_device *ndev;
484 struct cxgbi_device *cdev;
485 struct rtable *rt = NULL;
486 struct cxgbi_sock *csk = NULL;
487 unsigned int mtu = 0;
488 int port = 0xFFFF;
489 int err = 0;
490
491 if (daddr->sin_family != AF_INET) {
492 pr_info("address family 0x%x NOT supported.\n",
493 daddr->sin_family);
494 err = -EAFNOSUPPORT;
495 goto err_out;
496 }
497
498 rt = find_route_ipv4(0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0);
499 if (!rt) {
500 pr_info("no route to ipv4 0x%x, port %u.\n",
501 daddr->sin_addr.s_addr, daddr->sin_port);
502 err = -ENETUNREACH;
503 goto err_out;
504 }
505 dst = &rt->dst;
506 ndev = dst->neighbour->dev;
507
508 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
509 pr_info("multi-cast route %pI4, port %u, dev %s.\n",
510 &daddr->sin_addr.s_addr, ntohs(daddr->sin_port),
511 ndev->name);
512 err = -ENETUNREACH;
513 goto rel_rt;
514 }
515
516 if (ndev->flags & IFF_LOOPBACK) {
517 ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
518 mtu = ndev->mtu;
519 pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
520 dst->neighbour->dev->name, ndev->name, mtu);
521 }
522
523 cdev = cxgbi_device_find_by_netdev(ndev, &port);
524 if (!cdev) {
525 pr_info("dst %pI4, %s, NOT cxgbi device.\n",
526 &daddr->sin_addr.s_addr, ndev->name);
527 err = -ENETUNREACH;
528 goto rel_rt;
529 }
530 log_debug(1 << CXGBI_DBG_SOCK,
531 "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n",
532 &daddr->sin_addr.s_addr, ntohs(daddr->sin_port),
533 port, ndev->name, cdev);
534
535 csk = cxgbi_sock_create(cdev);
536 if (!csk) {
537 err = -ENOMEM;
538 goto rel_rt;
539 }
540 csk->cdev = cdev;
541 csk->port_id = port;
542 csk->mtu = mtu;
543 csk->dst = dst;
544 csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr;
545 csk->daddr.sin_port = daddr->sin_port;
546 csk->saddr.sin_addr.s_addr = rt->rt_src;
547
548 return csk;
549
550rel_rt:
551 ip_rt_put(rt);
552 if (csk)
553 cxgbi_sock_closed(csk);
554err_out:
555 return ERR_PTR(err);
556}
557
558void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn,
559 unsigned int opt)
560{
561 csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn;
562 dst_confirm(csk->dst);
563 smp_mb();
564 cxgbi_sock_set_state(csk, CTP_ESTABLISHED);
565}
566EXPORT_SYMBOL_GPL(cxgbi_sock_established);
567
568static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk)
569{
570 log_debug(1 << CXGBI_DBG_SOCK,
571 "csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n",
572 csk, csk->state, csk->flags, csk->user_data);
573
574 if (csk->state != CTP_ESTABLISHED) {
575 read_lock_bh(&csk->callback_lock);
576 if (csk->user_data)
577 iscsi_conn_failure(csk->user_data,
578 ISCSI_ERR_CONN_FAILED);
579 read_unlock_bh(&csk->callback_lock);
580 }
581}
582
583void cxgbi_sock_closed(struct cxgbi_sock *csk)
584{
585 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
586 csk, (csk)->state, (csk)->flags, (csk)->tid);
587 cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED);
588 if (csk->state == CTP_ACTIVE_OPEN || csk->state == CTP_CLOSED)
589 return;
590 if (csk->saddr.sin_port)
591 sock_put_port(csk);
592 if (csk->dst)
593 dst_release(csk->dst);
594 csk->cdev->csk_release_offload_resources(csk);
595 cxgbi_sock_set_state(csk, CTP_CLOSED);
596 cxgbi_inform_iscsi_conn_closing(csk);
597 cxgbi_sock_put(csk);
598}
599EXPORT_SYMBOL_GPL(cxgbi_sock_closed);
600
601static void need_active_close(struct cxgbi_sock *csk)
602{
603 int data_lost;
604 int close_req = 0;
605
606 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
607 csk, (csk)->state, (csk)->flags, (csk)->tid);
608 spin_lock_bh(&csk->lock);
609 dst_confirm(csk->dst);
610 data_lost = skb_queue_len(&csk->receive_queue);
611 __skb_queue_purge(&csk->receive_queue);
612
613 if (csk->state == CTP_ACTIVE_OPEN)
614 cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED);
615 else if (csk->state == CTP_ESTABLISHED) {
616 close_req = 1;
617 cxgbi_sock_set_state(csk, CTP_ACTIVE_CLOSE);
618 } else if (csk->state == CTP_PASSIVE_CLOSE) {
619 close_req = 1;
620 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2);
621 }
622
623 if (close_req) {
624 if (data_lost)
625 csk->cdev->csk_send_abort_req(csk);
626 else
627 csk->cdev->csk_send_close_req(csk);
628 }
629
630 spin_unlock_bh(&csk->lock);
631}
632
633void cxgbi_sock_fail_act_open(struct cxgbi_sock *csk, int errno)
634{
635 pr_info("csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n",
636 csk, csk->state, csk->flags,
637 &csk->saddr.sin_addr.s_addr, csk->saddr.sin_port,
638 &csk->daddr.sin_addr.s_addr, csk->daddr.sin_port,
639 errno);
640
641 cxgbi_sock_set_state(csk, CTP_CONNECTING);
642 csk->err = errno;
643 cxgbi_sock_closed(csk);
644}
645EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open);
646
647void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb)
648{
649 struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk;
650
651 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
652 csk, (csk)->state, (csk)->flags, (csk)->tid);
653 cxgbi_sock_get(csk);
654 spin_lock_bh(&csk->lock);
655 if (csk->state == CTP_ACTIVE_OPEN)
656 cxgbi_sock_fail_act_open(csk, -EHOSTUNREACH);
657 spin_unlock_bh(&csk->lock);
658 cxgbi_sock_put(csk);
659 __kfree_skb(skb);
660}
661EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure);
662
663void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk)
664{
665 cxgbi_sock_get(csk);
666 spin_lock_bh(&csk->lock);
667 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
668 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_RCVD))
669 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD);
670 else {
671 cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_RCVD);
672 cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING);
673 if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD))
674 pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n",
675 csk, csk->state, csk->flags, csk->tid);
676 cxgbi_sock_closed(csk);
677 }
678 }
679 spin_unlock_bh(&csk->lock);
680 cxgbi_sock_put(csk);
681}
682EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_abort_rpl);
683
684void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *csk)
685{
686 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
687 csk, (csk)->state, (csk)->flags, (csk)->tid);
688 cxgbi_sock_get(csk);
689 spin_lock_bh(&csk->lock);
690
691 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING))
692 goto done;
693
694 switch (csk->state) {
695 case CTP_ESTABLISHED:
696 cxgbi_sock_set_state(csk, CTP_PASSIVE_CLOSE);
697 break;
698 case CTP_ACTIVE_CLOSE:
699 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2);
700 break;
701 case CTP_CLOSE_WAIT_1:
702 cxgbi_sock_closed(csk);
703 break;
704 case CTP_ABORTING:
705 break;
706 default:
707 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
708 csk, csk->state, csk->flags, csk->tid);
709 }
710 cxgbi_inform_iscsi_conn_closing(csk);
711done:
712 spin_unlock_bh(&csk->lock);
713 cxgbi_sock_put(csk);
714}
715EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_peer_close);
716
717void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *csk, u32 snd_nxt)
718{
719 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
720 csk, (csk)->state, (csk)->flags, (csk)->tid);
721 cxgbi_sock_get(csk);
722 spin_lock_bh(&csk->lock);
723
724 csk->snd_una = snd_nxt - 1;
725 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING))
726 goto done;
727
728 switch (csk->state) {
729 case CTP_ACTIVE_CLOSE:
730 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_1);
731 break;
732 case CTP_CLOSE_WAIT_1:
733 case CTP_CLOSE_WAIT_2:
734 cxgbi_sock_closed(csk);
735 break;
736 case CTP_ABORTING:
737 break;
738 default:
739 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
740 csk, csk->state, csk->flags, csk->tid);
741 }
742done:
743 spin_unlock_bh(&csk->lock);
744 cxgbi_sock_put(csk);
745}
746EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_close_conn_rpl);
747
748void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *csk, unsigned int credits,
749 unsigned int snd_una, int seq_chk)
750{
751 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
752 "csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n",
753 csk, csk->state, csk->flags, csk->tid, credits,
754 csk->wr_cred, csk->wr_una_cred, snd_una, seq_chk);
755
756 spin_lock_bh(&csk->lock);
757
758 csk->wr_cred += credits;
759 if (csk->wr_una_cred > csk->wr_max_cred - csk->wr_cred)
760 csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
761
762 while (credits) {
763 struct sk_buff *p = cxgbi_sock_peek_wr(csk);
764
765 if (unlikely(!p)) {
766 pr_err("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n",
767 csk, csk->state, csk->flags, csk->tid, credits,
768 csk->wr_cred, csk->wr_una_cred);
769 break;
770 }
771
772 if (unlikely(credits < p->csum)) {
773 pr_warn("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n",
774 csk, csk->state, csk->flags, csk->tid,
775 credits, csk->wr_cred, csk->wr_una_cred,
776 p->csum);
777 p->csum -= credits;
778 break;
779 } else {
780 cxgbi_sock_dequeue_wr(csk);
781 credits -= p->csum;
782 kfree_skb(p);
783 }
784 }
785
786 cxgbi_sock_check_wr_invariants(csk);
787
788 if (seq_chk) {
789 if (unlikely(before(snd_una, csk->snd_una))) {
790 pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.",
791 csk, csk->state, csk->flags, csk->tid, snd_una,
792 csk->snd_una);
793 goto done;
794 }
795
796 if (csk->snd_una != snd_una) {
797 csk->snd_una = snd_una;
798 dst_confirm(csk->dst);
799 }
800 }
801
802 if (skb_queue_len(&csk->write_queue)) {
803 if (csk->cdev->csk_push_tx_frames(csk, 0))
804 cxgbi_conn_tx_open(csk);
805 } else
806 cxgbi_conn_tx_open(csk);
807done:
808 spin_unlock_bh(&csk->lock);
809}
810EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_wr_ack);
811
812static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock *csk,
813 unsigned short mtu)
814{
815 int i = 0;
816
817 while (i < csk->cdev->nmtus - 1 && csk->cdev->mtus[i + 1] <= mtu)
818 ++i;
819
820 return i;
821}
822
823unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *csk, unsigned int pmtu)
824{
825 unsigned int idx;
826 struct dst_entry *dst = csk->dst;
827
828 csk->advmss = dst_metric(dst, RTAX_ADVMSS);
829
830 if (csk->advmss > pmtu - 40)
831 csk->advmss = pmtu - 40;
832 if (csk->advmss < csk->cdev->mtus[0] - 40)
833 csk->advmss = csk->cdev->mtus[0] - 40;
834 idx = cxgbi_sock_find_best_mtu(csk, csk->advmss + 40);
835
836 return idx;
837}
838EXPORT_SYMBOL_GPL(cxgbi_sock_select_mss);
839
840void cxgbi_sock_skb_entail(struct cxgbi_sock *csk, struct sk_buff *skb)
841{
842 cxgbi_skcb_tcp_seq(skb) = csk->write_seq;
843 __skb_queue_tail(&csk->write_queue, skb);
844}
845EXPORT_SYMBOL_GPL(cxgbi_sock_skb_entail);
846
847void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *csk)
848{
849 struct sk_buff *skb;
850
851 while ((skb = cxgbi_sock_dequeue_wr(csk)) != NULL)
852 kfree_skb(skb);
853}
854EXPORT_SYMBOL_GPL(cxgbi_sock_purge_wr_queue);
855
856void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *csk)
857{
858 int pending = cxgbi_sock_count_pending_wrs(csk);
859
860 if (unlikely(csk->wr_cred + pending != csk->wr_max_cred))
861 pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
862 csk, csk->tid, csk->wr_cred, pending, csk->wr_max_cred);
863}
864EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants);
865
866static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb)
867{
868 struct cxgbi_device *cdev = csk->cdev;
869 struct sk_buff *next;
870 int err, copied = 0;
871
872 spin_lock_bh(&csk->lock);
873
874 if (csk->state != CTP_ESTABLISHED) {
875 log_debug(1 << CXGBI_DBG_PDU_TX,
876 "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n",
877 csk, csk->state, csk->flags, csk->tid);
878 err = -EAGAIN;
879 goto out_err;
880 }
881
882 if (csk->err) {
883 log_debug(1 << CXGBI_DBG_PDU_TX,
884 "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n",
885 csk, csk->state, csk->flags, csk->tid, csk->err);
886 err = -EPIPE;
887 goto out_err;
888 }
889
890 if (csk->write_seq - csk->snd_una >= cdev->snd_win) {
891 log_debug(1 << CXGBI_DBG_PDU_TX,
892 "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n",
893 csk, csk->state, csk->flags, csk->tid, csk->write_seq,
894 csk->snd_una, cdev->snd_win);
895 err = -ENOBUFS;
896 goto out_err;
897 }
898
899 while (skb) {
900 int frags = skb_shinfo(skb)->nr_frags +
901 (skb->len != skb->data_len);
902
903 if (unlikely(skb_headroom(skb) < cdev->skb_tx_rsvd)) {
904 pr_err("csk 0x%p, skb head %u < %u.\n",
905 csk, skb_headroom(skb), cdev->skb_tx_rsvd);
906 err = -EINVAL;
907 goto out_err;
908 }
909
910 if (frags >= SKB_WR_LIST_SIZE) {
911 pr_err("csk 0x%p, frags %d, %u,%u >%u.\n",
912 csk, skb_shinfo(skb)->nr_frags, skb->len,
913 skb->data_len, (uint)(SKB_WR_LIST_SIZE));
914 err = -EINVAL;
915 goto out_err;
916 }
917
918 next = skb->next;
919 skb->next = NULL;
920 cxgbi_skcb_set_flag(skb, SKCBF_TX_NEED_HDR);
921 cxgbi_sock_skb_entail(csk, skb);
922 copied += skb->len;
923 csk->write_seq += skb->len +
924 cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
925 skb = next;
926 }
927done:
928 if (likely(skb_queue_len(&csk->write_queue)))
929 cdev->csk_push_tx_frames(csk, 1);
930 spin_unlock_bh(&csk->lock);
931 return copied;
932
933out_err:
934 if (copied == 0 && err == -EPIPE)
935 copied = csk->err ? csk->err : -EPIPE;
936 else
937 copied = err;
938 goto done;
939}
940
941/*
942 * Direct Data Placement -
943 * Directly place the iSCSI Data-In or Data-Out PDU's payload into pre-posted
944 * final destination host-memory buffers based on the Initiator Task Tag (ITT)
945 * in Data-In or Target Task Tag (TTT) in Data-Out PDUs.
946 * The host memory address is programmed into h/w in the format of pagepod
947 * entries.
948 * The location of the pagepod entry is encoded into ddp tag which is used as
949 * the base for ITT/TTT.
950 */
951
952static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4};
953static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16};
954static unsigned char page_idx = DDP_PGIDX_MAX;
955
956static unsigned char sw_tag_idx_bits;
957static unsigned char sw_tag_age_bits;
958
959/*
960 * Direct-Data Placement page size adjustment
961 */
962static int ddp_adjust_page_table(void)
963{
964 int i;
965 unsigned int base_order, order;
966
967 if (PAGE_SIZE < (1UL << ddp_page_shift[0])) {
968 pr_info("PAGE_SIZE 0x%lx too small, min 0x%lx\n",
969 PAGE_SIZE, 1UL << ddp_page_shift[0]);
970 return -EINVAL;
971 }
972
973 base_order = get_order(1UL << ddp_page_shift[0]);
974 order = get_order(1UL << PAGE_SHIFT);
975
976 for (i = 0; i < DDP_PGIDX_MAX; i++) {
977 /* first is the kernel page size, then just doubling */
978 ddp_page_order[i] = order - base_order + i;
979 ddp_page_shift[i] = PAGE_SHIFT + i;
980 }
981 return 0;
982}
983
984static int ddp_find_page_index(unsigned long pgsz)
985{
986 int i;
987
988 for (i = 0; i < DDP_PGIDX_MAX; i++) {
989 if (pgsz == (1UL << ddp_page_shift[i]))
990 return i;
991 }
992 pr_info("ddp page size %lu not supported.\n", pgsz);
993 return DDP_PGIDX_MAX;
994}
995
996static void ddp_setup_host_page_size(void)
997{
998 if (page_idx == DDP_PGIDX_MAX) {
999 page_idx = ddp_find_page_index(PAGE_SIZE);
1000
1001 if (page_idx == DDP_PGIDX_MAX) {
1002 pr_info("system PAGE %lu, update hw.\n", PAGE_SIZE);
1003 if (ddp_adjust_page_table() < 0) {
1004 pr_info("PAGE %lu, disable ddp.\n", PAGE_SIZE);
1005 return;
1006 }
1007 page_idx = ddp_find_page_index(PAGE_SIZE);
1008 }
1009 pr_info("system PAGE %lu, ddp idx %u.\n", PAGE_SIZE, page_idx);
1010 }
1011}
1012
1013void cxgbi_ddp_page_size_factor(int *pgsz_factor)
1014{
1015 int i;
1016
1017 for (i = 0; i < DDP_PGIDX_MAX; i++)
1018 pgsz_factor[i] = ddp_page_order[i];
1019}
1020EXPORT_SYMBOL_GPL(cxgbi_ddp_page_size_factor);
1021
1022/*
1023 * DDP setup & teardown
1024 */
1025
1026void cxgbi_ddp_ppod_set(struct cxgbi_pagepod *ppod,
1027 struct cxgbi_pagepod_hdr *hdr,
1028 struct cxgbi_gather_list *gl, unsigned int gidx)
1029{
1030 int i;
1031
1032 memcpy(ppod, hdr, sizeof(*hdr));
1033 for (i = 0; i < (PPOD_PAGES_MAX + 1); i++, gidx++) {
1034 ppod->addr[i] = gidx < gl->nelem ?
1035 cpu_to_be64(gl->phys_addr[gidx]) : 0ULL;
1036 }
1037}
1038EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_set);
1039
1040void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod *ppod)
1041{
1042 memset(ppod, 0, sizeof(*ppod));
1043}
1044EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_clear);
1045
1046static inline int ddp_find_unused_entries(struct cxgbi_ddp_info *ddp,
1047 unsigned int start, unsigned int max,
1048 unsigned int count,
1049 struct cxgbi_gather_list *gl)
1050{
1051 unsigned int i, j, k;
1052
1053 /* not enough entries */
1054 if ((max - start) < count) {
1055 log_debug(1 << CXGBI_DBG_DDP,
1056 "NOT enough entries %u+%u < %u.\n", start, count, max);
1057 return -EBUSY;
1058 }
1059
1060 max -= count;
1061 spin_lock(&ddp->map_lock);
1062 for (i = start; i < max;) {
1063 for (j = 0, k = i; j < count; j++, k++) {
1064 if (ddp->gl_map[k])
1065 break;
1066 }
1067 if (j == count) {
1068 for (j = 0, k = i; j < count; j++, k++)
1069 ddp->gl_map[k] = gl;
1070 spin_unlock(&ddp->map_lock);
1071 return i;
1072 }
1073 i += j + 1;
1074 }
1075 spin_unlock(&ddp->map_lock);
1076 log_debug(1 << CXGBI_DBG_DDP,
1077 "NO suitable entries %u available.\n", count);
1078 return -EBUSY;
1079}
1080
1081static inline void ddp_unmark_entries(struct cxgbi_ddp_info *ddp,
1082 int start, int count)
1083{
1084 spin_lock(&ddp->map_lock);
1085 memset(&ddp->gl_map[start], 0,
1086 count * sizeof(struct cxgbi_gather_list *));
1087 spin_unlock(&ddp->map_lock);
1088}
1089
1090static inline void ddp_gl_unmap(struct pci_dev *pdev,
1091 struct cxgbi_gather_list *gl)
1092{
1093 int i;
1094
1095 for (i = 0; i < gl->nelem; i++)
1096 dma_unmap_page(&pdev->dev, gl->phys_addr[i], PAGE_SIZE,
1097 PCI_DMA_FROMDEVICE);
1098}
1099
1100static inline int ddp_gl_map(struct pci_dev *pdev,
1101 struct cxgbi_gather_list *gl)
1102{
1103 int i;
1104
1105 for (i = 0; i < gl->nelem; i++) {
1106 gl->phys_addr[i] = dma_map_page(&pdev->dev, gl->pages[i], 0,
1107 PAGE_SIZE,
1108 PCI_DMA_FROMDEVICE);
1109 if (unlikely(dma_mapping_error(&pdev->dev, gl->phys_addr[i]))) {
1110 log_debug(1 << CXGBI_DBG_DDP,
1111 "page %d 0x%p, 0x%p dma mapping err.\n",
1112 i, gl->pages[i], pdev);
1113 goto unmap;
1114 }
1115 }
1116 return i;
1117unmap:
1118 if (i) {
1119 unsigned int nelem = gl->nelem;
1120
1121 gl->nelem = i;
1122 ddp_gl_unmap(pdev, gl);
1123 gl->nelem = nelem;
1124 }
1125 return -EINVAL;
1126}
1127
1128static void ddp_release_gl(struct cxgbi_gather_list *gl,
1129 struct pci_dev *pdev)
1130{
1131 ddp_gl_unmap(pdev, gl);
1132 kfree(gl);
1133}
1134
1135static struct cxgbi_gather_list *ddp_make_gl(unsigned int xferlen,
1136 struct scatterlist *sgl,
1137 unsigned int sgcnt,
1138 struct pci_dev *pdev,
1139 gfp_t gfp)
1140{
1141 struct cxgbi_gather_list *gl;
1142 struct scatterlist *sg = sgl;
1143 struct page *sgpage = sg_page(sg);
1144 unsigned int sglen = sg->length;
1145 unsigned int sgoffset = sg->offset;
1146 unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >>
1147 PAGE_SHIFT;
1148 int i = 1, j = 0;
1149
1150 if (xferlen < DDP_THRESHOLD) {
1151 log_debug(1 << CXGBI_DBG_DDP,
1152 "xfer %u < threshold %u, no ddp.\n",
1153 xferlen, DDP_THRESHOLD);
1154 return NULL;
1155 }
1156
1157 gl = kzalloc(sizeof(struct cxgbi_gather_list) +
1158 npages * (sizeof(dma_addr_t) +
1159 sizeof(struct page *)), gfp);
1160 if (!gl) {
1161 log_debug(1 << CXGBI_DBG_DDP,
1162 "xfer %u, %u pages, OOM.\n", xferlen, npages);
1163 return NULL;
1164 }
1165
1166 log_debug(1 << CXGBI_DBG_DDP,
1167 "xfer %u, sgl %u, gl max %u.\n", xferlen, sgcnt, npages);
1168
1169 gl->pages = (struct page **)&gl->phys_addr[npages];
1170 gl->nelem = npages;
1171 gl->length = xferlen;
1172 gl->offset = sgoffset;
1173 gl->pages[0] = sgpage;
1174
1175 for (i = 1, sg = sg_next(sgl), j = 0; i < sgcnt;
1176 i++, sg = sg_next(sg)) {
1177 struct page *page = sg_page(sg);
1178
1179 if (sgpage == page && sg->offset == sgoffset + sglen)
1180 sglen += sg->length;
1181 else {
1182 /* make sure the sgl is fit for ddp:
1183 * each has the same page size, and
1184 * all of the middle pages are used completely
1185 */
1186 if ((j && sgoffset) || ((i != sgcnt - 1) &&
1187 ((sglen + sgoffset) & ~PAGE_MASK))) {
1188 log_debug(1 << CXGBI_DBG_DDP,
1189 "page %d/%u, %u + %u.\n",
1190 i, sgcnt, sgoffset, sglen);
1191 goto error_out;
1192 }
1193
1194 j++;
1195 if (j == gl->nelem || sg->offset) {
1196 log_debug(1 << CXGBI_DBG_DDP,
1197 "page %d/%u, offset %u.\n",
1198 j, gl->nelem, sg->offset);
1199 goto error_out;
1200 }
1201 gl->pages[j] = page;
1202 sglen = sg->length;
1203 sgoffset = sg->offset;
1204 sgpage = page;
1205 }
1206 }
1207 gl->nelem = ++j;
1208
1209 if (ddp_gl_map(pdev, gl) < 0)
1210 goto error_out;
1211
1212 return gl;
1213
1214error_out:
1215 kfree(gl);
1216 return NULL;
1217}
1218
1219static void ddp_tag_release(struct cxgbi_hba *chba, u32 tag)
1220{
1221 struct cxgbi_device *cdev = chba->cdev;
1222 struct cxgbi_ddp_info *ddp = cdev->ddp;
1223 u32 idx;
1224
1225 idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask;
1226 if (idx < ddp->nppods) {
1227 struct cxgbi_gather_list *gl = ddp->gl_map[idx];
1228 unsigned int npods;
1229
1230 if (!gl || !gl->nelem) {
1231 pr_warn("tag 0x%x, idx %u, gl 0x%p, %u.\n",
1232 tag, idx, gl, gl ? gl->nelem : 0);
1233 return;
1234 }
1235 npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
1236 log_debug(1 << CXGBI_DBG_DDP,
1237 "tag 0x%x, release idx %u, npods %u.\n",
1238 tag, idx, npods);
1239 cdev->csk_ddp_clear(chba, tag, idx, npods);
1240 ddp_unmark_entries(ddp, idx, npods);
1241 ddp_release_gl(gl, ddp->pdev);
1242 } else
1243 pr_warn("tag 0x%x, idx %u > max %u.\n", tag, idx, ddp->nppods);
1244}
1245
1246static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid,
1247 u32 sw_tag, u32 *tagp, struct cxgbi_gather_list *gl,
1248 gfp_t gfp)
1249{
1250 struct cxgbi_device *cdev = csk->cdev;
1251 struct cxgbi_ddp_info *ddp = cdev->ddp;
1252 struct cxgbi_tag_format *tformat = &cdev->tag_format;
1253 struct cxgbi_pagepod_hdr hdr;
1254 unsigned int npods;
1255 int idx = -1;
1256 int err = -ENOMEM;
1257 u32 tag;
1258
1259 npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
1260 if (ddp->idx_last == ddp->nppods)
1261 idx = ddp_find_unused_entries(ddp, 0, ddp->nppods,
1262 npods, gl);
1263 else {
1264 idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1,
1265 ddp->nppods, npods,
1266 gl);
1267 if (idx < 0 && ddp->idx_last >= npods) {
1268 idx = ddp_find_unused_entries(ddp, 0,
1269 min(ddp->idx_last + npods, ddp->nppods),
1270 npods, gl);
1271 }
1272 }
1273 if (idx < 0) {
1274 log_debug(1 << CXGBI_DBG_DDP,
1275 "xferlen %u, gl %u, npods %u NO DDP.\n",
1276 gl->length, gl->nelem, npods);
1277 return idx;
1278 }
1279
1280 if (cdev->csk_ddp_alloc_gl_skb) {
1281 err = cdev->csk_ddp_alloc_gl_skb(ddp, idx, npods, gfp);
1282 if (err < 0)
1283 goto unmark_entries;
1284 }
1285
1286 tag = cxgbi_ddp_tag_base(tformat, sw_tag);
1287 tag |= idx << PPOD_IDX_SHIFT;
1288
1289 hdr.rsvd = 0;
1290 hdr.vld_tid = htonl(PPOD_VALID_FLAG | PPOD_TID(tid));
1291 hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask);
1292 hdr.max_offset = htonl(gl->length);
1293 hdr.page_offset = htonl(gl->offset);
1294
1295 err = cdev->csk_ddp_set(csk, &hdr, idx, npods, gl);
1296 if (err < 0) {
1297 if (cdev->csk_ddp_free_gl_skb)
1298 cdev->csk_ddp_free_gl_skb(ddp, idx, npods);
1299 goto unmark_entries;
1300 }
1301
1302 ddp->idx_last = idx;
1303 log_debug(1 << CXGBI_DBG_DDP,
1304 "xfer %u, gl %u,%u, tid 0x%x, tag 0x%x->0x%x(%u,%u).\n",
1305 gl->length, gl->nelem, gl->offset, tid, sw_tag, tag, idx,
1306 npods);
1307 *tagp = tag;
1308 return 0;
1309
1310unmark_entries:
1311 ddp_unmark_entries(ddp, idx, npods);
1312 return err;
1313}
1314
1315int cxgbi_ddp_reserve(struct cxgbi_sock *csk, unsigned int *tagp,
1316 unsigned int sw_tag, unsigned int xferlen,
1317 struct scatterlist *sgl, unsigned int sgcnt, gfp_t gfp)
1318{
1319 struct cxgbi_device *cdev = csk->cdev;
1320 struct cxgbi_tag_format *tformat = &cdev->tag_format;
1321 struct cxgbi_gather_list *gl;
1322 int err;
1323
1324 if (page_idx >= DDP_PGIDX_MAX || !cdev->ddp ||
1325 xferlen < DDP_THRESHOLD) {
1326 log_debug(1 << CXGBI_DBG_DDP,
1327 "pgidx %u, xfer %u, NO ddp.\n", page_idx, xferlen);
1328 return -EINVAL;
1329 }
1330
1331 if (!cxgbi_sw_tag_usable(tformat, sw_tag)) {
1332 log_debug(1 << CXGBI_DBG_DDP,
1333 "sw_tag 0x%x NOT usable.\n", sw_tag);
1334 return -EINVAL;
1335 }
1336
1337 gl = ddp_make_gl(xferlen, sgl, sgcnt, cdev->pdev, gfp);
1338 if (!gl)
1339 return -ENOMEM;
1340
1341 err = ddp_tag_reserve(csk, csk->tid, sw_tag, tagp, gl, gfp);
1342 if (err < 0)
1343 ddp_release_gl(gl, cdev->pdev);
1344
1345 return err;
1346}
1347
1348static void ddp_destroy(struct kref *kref)
1349{
1350 struct cxgbi_ddp_info *ddp = container_of(kref,
1351 struct cxgbi_ddp_info,
1352 refcnt);
1353 struct cxgbi_device *cdev = ddp->cdev;
1354 int i = 0;
1355
1356 pr_info("kref 0, destroy ddp 0x%p, cdev 0x%p.\n", ddp, cdev);
1357
1358 while (i < ddp->nppods) {
1359 struct cxgbi_gather_list *gl = ddp->gl_map[i];
1360
1361 if (gl) {
1362 int npods = (gl->nelem + PPOD_PAGES_MAX - 1)
1363 >> PPOD_PAGES_SHIFT;
1364 pr_info("cdev 0x%p, ddp %d + %d.\n", cdev, i, npods);
1365 kfree(gl);
1366 if (cdev->csk_ddp_free_gl_skb)
1367 cdev->csk_ddp_free_gl_skb(ddp, i, npods);
1368 i += npods;
1369 } else
1370 i++;
1371 }
1372 cxgbi_free_big_mem(ddp);
1373}
1374
1375int cxgbi_ddp_cleanup(struct cxgbi_device *cdev)
1376{
1377 struct cxgbi_ddp_info *ddp = cdev->ddp;
1378
1379 log_debug(1 << CXGBI_DBG_DDP,
1380 "cdev 0x%p, release ddp 0x%p.\n", cdev, ddp);
1381 cdev->ddp = NULL;
1382 if (ddp)
1383 return kref_put(&ddp->refcnt, ddp_destroy);
1384 return 0;
1385}
1386EXPORT_SYMBOL_GPL(cxgbi_ddp_cleanup);
1387
1388int cxgbi_ddp_init(struct cxgbi_device *cdev,
1389 unsigned int llimit, unsigned int ulimit,
1390 unsigned int max_txsz, unsigned int max_rxsz)
1391{
1392 struct cxgbi_ddp_info *ddp;
1393 unsigned int ppmax, bits;
1394
1395 ppmax = (ulimit - llimit + 1) >> PPOD_SIZE_SHIFT;
1396 bits = __ilog2_u32(ppmax) + 1;
1397 if (bits > PPOD_IDX_MAX_SIZE)
1398 bits = PPOD_IDX_MAX_SIZE;
1399 ppmax = (1 << (bits - 1)) - 1;
1400
1401 ddp = cxgbi_alloc_big_mem(sizeof(struct cxgbi_ddp_info) +
1402 ppmax * (sizeof(struct cxgbi_gather_list *) +
1403 sizeof(struct sk_buff *)),
1404 GFP_KERNEL);
1405 if (!ddp) {
1406 pr_warn("cdev 0x%p, ddp ppmax %u OOM.\n", cdev, ppmax);
1407 return -ENOMEM;
1408 }
1409 ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1);
1410 ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) +
1411 ppmax * sizeof(struct cxgbi_gather_list *));
1412 cdev->ddp = ddp;
1413
1414 spin_lock_init(&ddp->map_lock);
1415 kref_init(&ddp->refcnt);
1416
1417 ddp->cdev = cdev;
1418 ddp->pdev = cdev->pdev;
1419 ddp->llimit = llimit;
1420 ddp->ulimit = ulimit;
1421 ddp->max_txsz = min_t(unsigned int, max_txsz, ULP2_MAX_PKT_SIZE);
1422 ddp->max_rxsz = min_t(unsigned int, max_rxsz, ULP2_MAX_PKT_SIZE);
1423 ddp->nppods = ppmax;
1424 ddp->idx_last = ppmax;
1425 ddp->idx_bits = bits;
1426 ddp->idx_mask = (1 << bits) - 1;
1427 ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1;
1428
1429 cdev->tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits;
1430 cdev->tag_format.rsvd_bits = ddp->idx_bits;
1431 cdev->tag_format.rsvd_shift = PPOD_IDX_SHIFT;
1432 cdev->tag_format.rsvd_mask = (1 << cdev->tag_format.rsvd_bits) - 1;
1433
1434 pr_info("%s tag format, sw %u, rsvd %u,%u, mask 0x%x.\n",
1435 cdev->ports[0]->name, cdev->tag_format.sw_bits,
1436 cdev->tag_format.rsvd_bits, cdev->tag_format.rsvd_shift,
1437 cdev->tag_format.rsvd_mask);
1438
1439 cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
1440 ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN);
1441 cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
1442 ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN);
1443
1444 log_debug(1 << CXGBI_DBG_DDP,
1445 "%s max payload size: %u/%u, %u/%u.\n",
1446 cdev->ports[0]->name, cdev->tx_max_size, ddp->max_txsz,
1447 cdev->rx_max_size, ddp->max_rxsz);
1448 return 0;
1449}
1450EXPORT_SYMBOL_GPL(cxgbi_ddp_init);
1451
1452/*
1453 * APIs interacting with open-iscsi libraries
1454 */
1455
1456static unsigned char padding[4];
1457
1458static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt)
1459{
1460 struct scsi_cmnd *sc = task->sc;
1461 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
1462 struct cxgbi_conn *cconn = tcp_conn->dd_data;
1463 struct cxgbi_hba *chba = cconn->chba;
1464 struct cxgbi_tag_format *tformat = &chba->cdev->tag_format;
1465 u32 tag = ntohl((__force u32)hdr_itt);
1466
1467 log_debug(1 << CXGBI_DBG_DDP,
1468 "cdev 0x%p, release tag 0x%x.\n", chba->cdev, tag);
1469 if (sc &&
1470 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) &&
1471 cxgbi_is_ddp_tag(tformat, tag))
1472 ddp_tag_release(chba, tag);
1473}
1474
1475static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt)
1476{
1477 struct scsi_cmnd *sc = task->sc;
1478 struct iscsi_conn *conn = task->conn;
1479 struct iscsi_session *sess = conn->session;
1480 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1481 struct cxgbi_conn *cconn = tcp_conn->dd_data;
1482 struct cxgbi_hba *chba = cconn->chba;
1483 struct cxgbi_tag_format *tformat = &chba->cdev->tag_format;
1484 u32 sw_tag = (sess->age << cconn->task_idx_bits) | task->itt;
1485 u32 tag = 0;
1486 int err = -EINVAL;
1487
1488 if (sc &&
1489 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE)) {
1490 err = cxgbi_ddp_reserve(cconn->cep->csk, &tag, sw_tag,
1491 scsi_in(sc)->length,
1492 scsi_in(sc)->table.sgl,
1493 scsi_in(sc)->table.nents,
1494 GFP_ATOMIC);
1495 if (err < 0)
1496 log_debug(1 << CXGBI_DBG_DDP,
1497 "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n",
1498 cconn->cep->csk, task, scsi_in(sc)->length,
1499 scsi_in(sc)->table.nents);
1500 }
1501
1502 if (err < 0)
1503 tag = cxgbi_set_non_ddp_tag(tformat, sw_tag);
1504 /* the itt need to sent in big-endian order */
1505 *hdr_itt = (__force itt_t)htonl(tag);
1506
1507 log_debug(1 << CXGBI_DBG_DDP,
1508 "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n",
1509 chba->cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt);
1510 return 0;
1511}
1512
1513void cxgbi_parse_pdu_itt(struct iscsi_conn *conn, itt_t itt, int *idx, int *age)
1514{
1515 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1516 struct cxgbi_conn *cconn = tcp_conn->dd_data;
1517 struct cxgbi_device *cdev = cconn->chba->cdev;
1518 u32 tag = ntohl((__force u32) itt);
1519 u32 sw_bits;
1520
1521 sw_bits = cxgbi_tag_nonrsvd_bits(&cdev->tag_format, tag);
1522 if (idx)
1523 *idx = sw_bits & ((1 << cconn->task_idx_bits) - 1);
1524 if (age)
1525 *age = (sw_bits >> cconn->task_idx_bits) & ISCSI_AGE_MASK;
1526
1527 log_debug(1 << CXGBI_DBG_DDP,
1528 "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n",
1529 cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF,
1530 age ? *age : 0xFF);
1531}
1532EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt);
1533
1534void cxgbi_conn_tx_open(struct cxgbi_sock *csk)
1535{
1536 struct iscsi_conn *conn = csk->user_data;
1537
1538 if (conn) {
1539 log_debug(1 << CXGBI_DBG_SOCK,
1540 "csk 0x%p, cid %d.\n", csk, conn->id);
1541 iscsi_conn_queue_work(conn);
1542 }
1543}
1544EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open);
1545
1546/*
1547 * pdu receive, interact with libiscsi_tcp
1548 */
1549static inline int read_pdu_skb(struct iscsi_conn *conn,
1550 struct sk_buff *skb,
1551 unsigned int offset,
1552 int offloaded)
1553{
1554 int status = 0;
1555 int bytes_read;
1556
1557 bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status);
1558 switch (status) {
1559 case ISCSI_TCP_CONN_ERR:
1560 pr_info("skb 0x%p, off %u, %d, TCP_ERR.\n",
1561 skb, offset, offloaded);
1562 return -EIO;
1563 case ISCSI_TCP_SUSPENDED:
1564 log_debug(1 << CXGBI_DBG_PDU_RX,
1565 "skb 0x%p, off %u, %d, TCP_SUSPEND, rc %d.\n",
1566 skb, offset, offloaded, bytes_read);
1567 /* no transfer - just have caller flush queue */
1568 return bytes_read;
1569 case ISCSI_TCP_SKB_DONE:
1570 pr_info("skb 0x%p, off %u, %d, TCP_SKB_DONE.\n",
1571 skb, offset, offloaded);
1572 /*
1573 * pdus should always fit in the skb and we should get
1574 * segment done notifcation.
1575 */
1576 iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb.");
1577 return -EFAULT;
1578 case ISCSI_TCP_SEGMENT_DONE:
1579 log_debug(1 << CXGBI_DBG_PDU_RX,
1580 "skb 0x%p, off %u, %d, TCP_SEG_DONE, rc %d.\n",
1581 skb, offset, offloaded, bytes_read);
1582 return bytes_read;
1583 default:
1584 pr_info("skb 0x%p, off %u, %d, invalid status %d.\n",
1585 skb, offset, offloaded, status);
1586 return -EINVAL;
1587 }
1588}
1589
1590static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb)
1591{
1592 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1593
1594 log_debug(1 << CXGBI_DBG_PDU_RX,
1595 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
1596 conn, skb, skb->len, cxgbi_skcb_flags(skb));
1597
1598 if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) {
1599 pr_info("conn 0x%p, skb 0x%p, not hdr.\n", conn, skb);
1600 iscsi_conn_failure(conn, ISCSI_ERR_PROTO);
1601 return -EIO;
1602 }
1603
1604 if (conn->hdrdgst_en &&
1605 cxgbi_skcb_test_flag(skb, SKCBF_RX_HCRC_ERR)) {
1606 pr_info("conn 0x%p, skb 0x%p, hcrc.\n", conn, skb);
1607 iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST);
1608 return -EIO;
1609 }
1610
1611 return read_pdu_skb(conn, skb, 0, 0);
1612}
1613
1614static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb,
1615 struct sk_buff *skb, unsigned int offset)
1616{
1617 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1618 bool offloaded = 0;
1619 int opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK;
1620
1621 log_debug(1 << CXGBI_DBG_PDU_RX,
1622 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
1623 conn, skb, skb->len, cxgbi_skcb_flags(skb));
1624
1625 if (conn->datadgst_en &&
1626 cxgbi_skcb_test_flag(lskb, SKCBF_RX_DCRC_ERR)) {
1627 pr_info("conn 0x%p, skb 0x%p, dcrc 0x%lx.\n",
1628 conn, lskb, cxgbi_skcb_flags(lskb));
1629 iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
1630 return -EIO;
1631 }
1632
1633 if (iscsi_tcp_recv_segment_is_hdr(tcp_conn))
1634 return 0;
1635
1636 /* coalesced, add header digest length */
1637 if (lskb == skb && conn->hdrdgst_en)
1638 offset += ISCSI_DIGEST_SIZE;
1639
1640 if (cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA_DDPD))
1641 offloaded = 1;
1642
1643 if (opcode == ISCSI_OP_SCSI_DATA_IN)
1644 log_debug(1 << CXGBI_DBG_PDU_RX,
1645 "skb 0x%p, op 0x%x, itt 0x%x, %u %s ddp'ed.\n",
1646 skb, opcode, ntohl(tcp_conn->in.hdr->itt),
1647 tcp_conn->in.datalen, offloaded ? "is" : "not");
1648
1649 return read_pdu_skb(conn, skb, offset, offloaded);
1650}
1651
1652static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied)
1653{
1654 struct cxgbi_device *cdev = csk->cdev;
1655 int must_send;
1656 u32 credits;
1657
1658 log_debug(1 << CXGBI_DBG_PDU_RX,
1659 "csk 0x%p,%u,0x%lu,%u, seq %u, wup %u, thre %u, %u.\n",
1660 csk, csk->state, csk->flags, csk->tid, csk->copied_seq,
1661 csk->rcv_wup, cdev->rx_credit_thres,
1662 cdev->rcv_win);
1663
1664 if (csk->state != CTP_ESTABLISHED)
1665 return;
1666
1667 credits = csk->copied_seq - csk->rcv_wup;
1668 if (unlikely(!credits))
1669 return;
1670 if (unlikely(cdev->rx_credit_thres == 0))
1671 return;
1672
1673 must_send = credits + 16384 >= cdev->rcv_win;
1674 if (must_send || credits >= cdev->rx_credit_thres)
1675 csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits);
1676}
1677
1678void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
1679{
1680 struct cxgbi_device *cdev = csk->cdev;
1681 struct iscsi_conn *conn = csk->user_data;
1682 struct sk_buff *skb;
1683 unsigned int read = 0;
1684 int err = 0;
1685
1686 log_debug(1 << CXGBI_DBG_PDU_RX,
1687 "csk 0x%p, conn 0x%p.\n", csk, conn);
1688
1689 if (unlikely(!conn || conn->suspend_rx)) {
1690 log_debug(1 << CXGBI_DBG_PDU_RX,
1691 "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
1692 csk, conn, conn ? conn->id : 0xFF,
1693 conn ? conn->suspend_rx : 0xFF);
1694 return;
1695 }
1696
1697 while (!err) {
1698 skb = skb_peek(&csk->receive_queue);
1699 if (!skb ||
1700 !(cxgbi_skcb_test_flag(skb, SKCBF_RX_STATUS))) {
1701 if (skb)
1702 log_debug(1 << CXGBI_DBG_PDU_RX,
1703 "skb 0x%p, NOT ready 0x%lx.\n",
1704 skb, cxgbi_skcb_flags(skb));
1705 break;
1706 }
1707 __skb_unlink(skb, &csk->receive_queue);
1708
1709 read += cxgbi_skcb_rx_pdulen(skb);
1710 log_debug(1 << CXGBI_DBG_PDU_RX,
1711 "csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n",
1712 csk, skb, skb->len, cxgbi_skcb_flags(skb),
1713 cxgbi_skcb_rx_pdulen(skb));
1714
1715 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) {
1716 err = skb_read_pdu_bhs(conn, skb);
1717 if (err < 0) {
1718 pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, "
1719 "f 0x%lx, plen %u.\n",
1720 csk, skb, skb->len,
1721 cxgbi_skcb_flags(skb),
1722 cxgbi_skcb_rx_pdulen(skb));
1723 goto skb_done;
1724 }
1725 err = skb_read_pdu_data(conn, skb, skb,
1726 err + cdev->skb_rx_extra);
1727 if (err < 0)
1728 pr_err("coalesced data, csk 0x%p, skb 0x%p,%u, "
1729 "f 0x%lx, plen %u.\n",
1730 csk, skb, skb->len,
1731 cxgbi_skcb_flags(skb),
1732 cxgbi_skcb_rx_pdulen(skb));
1733 } else {
1734 err = skb_read_pdu_bhs(conn, skb);
1735 if (err < 0) {
1736 pr_err("bhs, csk 0x%p, skb 0x%p,%u, "
1737 "f 0x%lx, plen %u.\n",
1738 csk, skb, skb->len,
1739 cxgbi_skcb_flags(skb),
1740 cxgbi_skcb_rx_pdulen(skb));
1741 goto skb_done;
1742 }
1743
1744 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) {
1745 struct sk_buff *dskb;
1746
1747 dskb = skb_peek(&csk->receive_queue);
1748 if (!dskb) {
1749 pr_err("csk 0x%p, skb 0x%p,%u, f 0x%lx,"
1750 " plen %u, NO data.\n",
1751 csk, skb, skb->len,
1752 cxgbi_skcb_flags(skb),
1753 cxgbi_skcb_rx_pdulen(skb));
1754 err = -EIO;
1755 goto skb_done;
1756 }
1757 __skb_unlink(dskb, &csk->receive_queue);
1758
1759 err = skb_read_pdu_data(conn, skb, dskb, 0);
1760 if (err < 0)
1761 pr_err("data, csk 0x%p, skb 0x%p,%u, "
1762 "f 0x%lx, plen %u, dskb 0x%p,"
1763 "%u.\n",
1764 csk, skb, skb->len,
1765 cxgbi_skcb_flags(skb),
1766 cxgbi_skcb_rx_pdulen(skb),
1767 dskb, dskb->len);
1768 __kfree_skb(dskb);
1769 } else
1770 err = skb_read_pdu_data(conn, skb, skb, 0);
1771 }
1772skb_done:
1773 __kfree_skb(skb);
1774
1775 if (err < 0)
1776 break;
1777 }
1778
1779 log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, read %u.\n", csk, read);
1780 if (read) {
1781 csk->copied_seq += read;
1782 csk_return_rx_credits(csk, read);
1783 conn->rxdata_octets += read;
1784 }
1785
1786 if (err < 0) {
1787 pr_info("csk 0x%p, 0x%p, rx failed %d, read %u.\n",
1788 csk, conn, err, read);
1789 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1790 }
1791}
1792EXPORT_SYMBOL_GPL(cxgbi_conn_pdu_ready);
1793
1794static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt,
1795 unsigned int offset, unsigned int *off,
1796 struct scatterlist **sgp)
1797{
1798 int i;
1799 struct scatterlist *sg;
1800
1801 for_each_sg(sgl, sg, sgcnt, i) {
1802 if (offset < sg->length) {
1803 *off = offset;
1804 *sgp = sg;
1805 return 0;
1806 }
1807 offset -= sg->length;
1808 }
1809 return -EFAULT;
1810}
1811
1812static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
1813 unsigned int dlen, skb_frag_t *frags,
1814 int frag_max)
1815{
1816 unsigned int datalen = dlen;
1817 unsigned int sglen = sg->length - sgoffset;
1818 struct page *page = sg_page(sg);
1819 int i;
1820
1821 i = 0;
1822 do {
1823 unsigned int copy;
1824
1825 if (!sglen) {
1826 sg = sg_next(sg);
1827 if (!sg) {
1828 pr_warn("sg %d NULL, len %u/%u.\n",
1829 i, datalen, dlen);
1830 return -EINVAL;
1831 }
1832 sgoffset = 0;
1833 sglen = sg->length;
1834 page = sg_page(sg);
1835
1836 }
1837 copy = min(datalen, sglen);
1838 if (i && page == frags[i - 1].page &&
1839 sgoffset + sg->offset ==
1840 frags[i - 1].page_offset + frags[i - 1].size) {
1841 frags[i - 1].size += copy;
1842 } else {
1843 if (i >= frag_max) {
1844 pr_warn("too many pages %u, dlen %u.\n",
1845 frag_max, dlen);
1846 return -EINVAL;
1847 }
1848
1849 frags[i].page = page;
1850 frags[i].page_offset = sg->offset + sgoffset;
1851 frags[i].size = copy;
1852 i++;
1853 }
1854 datalen -= copy;
1855 sgoffset += copy;
1856 sglen -= copy;
1857 } while (datalen);
1858
1859 return i;
1860}
1861
1862int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
1863{
1864 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
1865 struct cxgbi_conn *cconn = tcp_conn->dd_data;
1866 struct cxgbi_device *cdev = cconn->chba->cdev;
1867 struct iscsi_conn *conn = task->conn;
1868 struct iscsi_tcp_task *tcp_task = task->dd_data;
1869 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
1870 struct scsi_cmnd *sc = task->sc;
1871 int headroom = SKB_TX_ISCSI_PDU_HEADER_MAX;
1872
1873 tcp_task->dd_data = tdata;
1874 task->hdr = NULL;
1875
1876 if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) &&
1877 (opcode == ISCSI_OP_SCSI_DATA_OUT ||
1878 (opcode == ISCSI_OP_SCSI_CMD &&
1879 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE))))
1880 /* data could goes into skb head */
1881 headroom += min_t(unsigned int,
1882 SKB_MAX_HEAD(cdev->skb_tx_rsvd),
1883 conn->max_xmit_dlength);
1884
1885 tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC);
1886 if (!tdata->skb) {
1887 pr_warn("alloc skb %u+%u, opcode 0x%x failed.\n",
1888 cdev->skb_tx_rsvd, headroom, opcode);
1889 return -ENOMEM;
1890 }
1891
1892 skb_reserve(tdata->skb, cdev->skb_tx_rsvd);
1893 task->hdr = (struct iscsi_hdr *)tdata->skb->data;
1894 task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */
1895
1896 /* data_out uses scsi_cmd's itt */
1897 if (opcode != ISCSI_OP_SCSI_DATA_OUT)
1898 task_reserve_itt(task, &task->hdr->itt);
1899
1900 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
1901 "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n",
1902 task, opcode, tdata->skb, cdev->skb_tx_rsvd, headroom,
1903 conn->max_xmit_dlength, ntohl(task->hdr->itt));
1904
1905 return 0;
1906}
1907EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu);
1908
1909static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
1910{
1911 u8 submode = 0;
1912
1913 if (hcrc)
1914 submode |= 1;
1915 if (dcrc)
1916 submode |= 2;
1917 cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode;
1918}
1919
1920int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
1921 unsigned int count)
1922{
1923 struct iscsi_conn *conn = task->conn;
1924 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
1925 struct sk_buff *skb = tdata->skb;
1926 unsigned int datalen = count;
1927 int i, padlen = iscsi_padding(count);
1928 struct page *pg;
1929
1930 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
1931 "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n",
1932 task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK,
1933 ntohl(task->cmdsn), ntohl(task->hdr->itt), offset, count);
1934
1935 skb_put(skb, task->hdr_len);
1936 tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0);
1937 if (!count)
1938 return 0;
1939
1940 if (task->sc) {
1941 struct scsi_data_buffer *sdb = scsi_out(task->sc);
1942 struct scatterlist *sg = NULL;
1943 int err;
1944
1945 tdata->offset = offset;
1946 tdata->count = count;
1947 err = sgl_seek_offset(
1948 sdb->table.sgl, sdb->table.nents,
1949 tdata->offset, &tdata->sgoffset, &sg);
1950 if (err < 0) {
1951 pr_warn("tpdu, sgl %u, bad offset %u/%u.\n",
1952 sdb->table.nents, tdata->offset, sdb->length);
1953 return err;
1954 }
1955 err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count,
1956 tdata->frags, MAX_PDU_FRAGS);
1957 if (err < 0) {
1958 pr_warn("tpdu, sgl %u, bad offset %u + %u.\n",
1959 sdb->table.nents, tdata->offset, tdata->count);
1960 return err;
1961 }
1962 tdata->nr_frags = err;
1963
1964 if (tdata->nr_frags > MAX_SKB_FRAGS ||
1965 (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) {
1966 char *dst = skb->data + task->hdr_len;
1967 skb_frag_t *frag = tdata->frags;
1968
1969 /* data fits in the skb's headroom */
1970 for (i = 0; i < tdata->nr_frags; i++, frag++) {
1971 char *src = kmap_atomic(frag->page,
1972 KM_SOFTIRQ0);
1973
1974 memcpy(dst, src+frag->page_offset, frag->size);
1975 dst += frag->size;
1976 kunmap_atomic(src, KM_SOFTIRQ0);
1977 }
1978 if (padlen) {
1979 memset(dst, 0, padlen);
1980 padlen = 0;
1981 }
1982 skb_put(skb, count + padlen);
1983 } else {
1984 /* data fit into frag_list */
1985 for (i = 0; i < tdata->nr_frags; i++)
1986 get_page(tdata->frags[i].page);
1987
1988 memcpy(skb_shinfo(skb)->frags, tdata->frags,
1989 sizeof(skb_frag_t) * tdata->nr_frags);
1990 skb_shinfo(skb)->nr_frags = tdata->nr_frags;
1991 skb->len += count;
1992 skb->data_len += count;
1993 skb->truesize += count;
1994 }
1995
1996 } else {
1997 pg = virt_to_page(task->data);
1998
1999 get_page(pg);
2000 skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data),
2001 count);
2002 skb->len += count;
2003 skb->data_len += count;
2004 skb->truesize += count;
2005 }
2006
2007 if (padlen) {
2008 i = skb_shinfo(skb)->nr_frags;
2009 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
2010 virt_to_page(padding), offset_in_page(padding),
2011 padlen);
2012
2013 skb->data_len += padlen;
2014 skb->truesize += padlen;
2015 skb->len += padlen;
2016 }
2017
2018 return 0;
2019}
2020EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu);
2021
2022int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
2023{
2024 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
2025 struct cxgbi_conn *cconn = tcp_conn->dd_data;
2026 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
2027 struct sk_buff *skb = tdata->skb;
2028 unsigned int datalen;
2029 int err;
2030
2031 if (!skb) {
2032 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2033 "task 0x%p, skb NULL.\n", task);
2034 return 0;
2035 }
2036
2037 datalen = skb->data_len;
2038 tdata->skb = NULL;
2039 err = cxgbi_sock_send_pdus(cconn->cep->csk, skb);
2040 if (err > 0) {
2041 int pdulen = err;
2042
2043 log_debug(1 << CXGBI_DBG_PDU_TX,
2044 "task 0x%p,0x%p, skb 0x%p, len %u/%u, rv %d.\n",
2045 task, task->sc, skb, skb->len, skb->data_len, err);
2046
2047 if (task->conn->hdrdgst_en)
2048 pdulen += ISCSI_DIGEST_SIZE;
2049
2050 if (datalen && task->conn->datadgst_en)
2051 pdulen += ISCSI_DIGEST_SIZE;
2052
2053 task->conn->txdata_octets += pdulen;
2054 return 0;
2055 }
2056
2057 if (err == -EAGAIN || err == -ENOBUFS) {
2058 log_debug(1 << CXGBI_DBG_PDU_TX,
2059 "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
2060 task, skb, skb->len, skb->data_len, err);
2061 /* reset skb to send when we are called again */
2062 tdata->skb = skb;
2063 return err;
2064 }
2065
2066 kfree_skb(skb);
2067 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2068 "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
2069 task->itt, skb, skb->len, skb->data_len, err);
2070 iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
2071 iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
2072 return err;
2073}
2074EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu);
2075
2076void cxgbi_cleanup_task(struct iscsi_task *task)
2077{
2078 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
2079
2080 log_debug(1 << CXGBI_DBG_ISCSI,
2081 "task 0x%p, skb 0x%p, itt 0x%x.\n",
2082 task, tdata->skb, task->hdr_itt);
2083
2084 /* never reached the xmit task callout */
2085 if (tdata->skb)
2086 __kfree_skb(tdata->skb);
2087 memset(tdata, 0, sizeof(*tdata));
2088
2089 task_release_itt(task, task->hdr_itt);
2090 iscsi_tcp_cleanup_task(task);
2091}
2092EXPORT_SYMBOL_GPL(cxgbi_cleanup_task);
2093
2094void cxgbi_get_conn_stats(struct iscsi_cls_conn *cls_conn,
2095 struct iscsi_stats *stats)
2096{
2097 struct iscsi_conn *conn = cls_conn->dd_data;
2098
2099 stats->txdata_octets = conn->txdata_octets;
2100 stats->rxdata_octets = conn->rxdata_octets;
2101 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
2102 stats->dataout_pdus = conn->dataout_pdus_cnt;
2103 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
2104 stats->datain_pdus = conn->datain_pdus_cnt;
2105 stats->r2t_pdus = conn->r2t_pdus_cnt;
2106 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
2107 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
2108 stats->digest_err = 0;
2109 stats->timeout_err = 0;
2110 stats->custom_length = 1;
2111 strcpy(stats->custom[0].desc, "eh_abort_cnt");
2112 stats->custom[0].value = conn->eh_abort_cnt;
2113}
2114EXPORT_SYMBOL_GPL(cxgbi_get_conn_stats);
2115
2116static int cxgbi_conn_max_xmit_dlength(struct iscsi_conn *conn)
2117{
2118 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2119 struct cxgbi_conn *cconn = tcp_conn->dd_data;
2120 struct cxgbi_device *cdev = cconn->chba->cdev;
2121 unsigned int headroom = SKB_MAX_HEAD(cdev->skb_tx_rsvd);
2122 unsigned int max_def = 512 * MAX_SKB_FRAGS;
2123 unsigned int max = max(max_def, headroom);
2124
2125 max = min(cconn->chba->cdev->tx_max_size, max);
2126 if (conn->max_xmit_dlength)
2127 conn->max_xmit_dlength = min(conn->max_xmit_dlength, max);
2128 else
2129 conn->max_xmit_dlength = max;
2130 cxgbi_align_pdu_size(conn->max_xmit_dlength);
2131
2132 return 0;
2133}
2134
2135static int cxgbi_conn_max_recv_dlength(struct iscsi_conn *conn)
2136{
2137 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2138 struct cxgbi_conn *cconn = tcp_conn->dd_data;
2139 unsigned int max = cconn->chba->cdev->rx_max_size;
2140
2141 cxgbi_align_pdu_size(max);
2142
2143 if (conn->max_recv_dlength) {
2144 if (conn->max_recv_dlength > max) {
2145 pr_err("MaxRecvDataSegmentLength %u > %u.\n",
2146 conn->max_recv_dlength, max);
2147 return -EINVAL;
2148 }
2149 conn->max_recv_dlength = min(conn->max_recv_dlength, max);
2150 cxgbi_align_pdu_size(conn->max_recv_dlength);
2151 } else
2152 conn->max_recv_dlength = max;
2153
2154 return 0;
2155}
2156
2157int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
2158 enum iscsi_param param, char *buf, int buflen)
2159{
2160 struct iscsi_conn *conn = cls_conn->dd_data;
2161 struct iscsi_session *session = conn->session;
2162 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2163 struct cxgbi_conn *cconn = tcp_conn->dd_data;
2164 struct cxgbi_sock *csk = cconn->cep->csk;
2165 int value, err = 0;
2166
2167 log_debug(1 << CXGBI_DBG_ISCSI,
2168 "cls_conn 0x%p, param %d, buf(%d) %s.\n",
2169 cls_conn, param, buflen, buf);
2170
2171 switch (param) {
2172 case ISCSI_PARAM_HDRDGST_EN:
2173 err = iscsi_set_param(cls_conn, param, buf, buflen);
2174 if (!err && conn->hdrdgst_en)
2175 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2176 conn->hdrdgst_en,
2177 conn->datadgst_en, 0);
2178 break;
2179 case ISCSI_PARAM_DATADGST_EN:
2180 err = iscsi_set_param(cls_conn, param, buf, buflen);
2181 if (!err && conn->datadgst_en)
2182 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2183 conn->hdrdgst_en,
2184 conn->datadgst_en, 0);
2185 break;
2186 case ISCSI_PARAM_MAX_R2T:
2187 sscanf(buf, "%d", &value);
2188 if (value <= 0 || !is_power_of_2(value))
2189 return -EINVAL;
2190 if (session->max_r2t == value)
2191 break;
2192 iscsi_tcp_r2tpool_free(session);
2193 err = iscsi_set_param(cls_conn, param, buf, buflen);
2194 if (!err && iscsi_tcp_r2tpool_alloc(session))
2195 return -ENOMEM;
2196 case ISCSI_PARAM_MAX_RECV_DLENGTH:
2197 err = iscsi_set_param(cls_conn, param, buf, buflen);
2198 if (!err)
2199 err = cxgbi_conn_max_recv_dlength(conn);
2200 break;
2201 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
2202 err = iscsi_set_param(cls_conn, param, buf, buflen);
2203 if (!err)
2204 err = cxgbi_conn_max_xmit_dlength(conn);
2205 break;
2206 default:
2207 return iscsi_set_param(cls_conn, param, buf, buflen);
2208 }
2209 return err;
2210}
2211EXPORT_SYMBOL_GPL(cxgbi_set_conn_param);
2212
2213int cxgbi_get_conn_param(struct iscsi_cls_conn *cls_conn,
2214 enum iscsi_param param, char *buf)
2215{
2216 struct iscsi_conn *iconn = cls_conn->dd_data;
2217 int len;
2218
2219 log_debug(1 << CXGBI_DBG_ISCSI,
2220 "cls_conn 0x%p, param %d.\n", cls_conn, param);
2221
2222 switch (param) {
2223 case ISCSI_PARAM_CONN_PORT:
2224 spin_lock_bh(&iconn->session->lock);
2225 len = sprintf(buf, "%hu\n", iconn->portal_port);
2226 spin_unlock_bh(&iconn->session->lock);
2227 break;
2228 case ISCSI_PARAM_CONN_ADDRESS:
2229 spin_lock_bh(&iconn->session->lock);
2230 len = sprintf(buf, "%s\n", iconn->portal_address);
2231 spin_unlock_bh(&iconn->session->lock);
2232 break;
2233 default:
2234 return iscsi_conn_get_param(cls_conn, param, buf);
2235 }
2236 return len;
2237}
2238EXPORT_SYMBOL_GPL(cxgbi_get_conn_param);
2239
2240struct iscsi_cls_conn *
2241cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid)
2242{
2243 struct iscsi_cls_conn *cls_conn;
2244 struct iscsi_conn *conn;
2245 struct iscsi_tcp_conn *tcp_conn;
2246 struct cxgbi_conn *cconn;
2247
2248 cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid);
2249 if (!cls_conn)
2250 return NULL;
2251
2252 conn = cls_conn->dd_data;
2253 tcp_conn = conn->dd_data;
2254 cconn = tcp_conn->dd_data;
2255 cconn->iconn = conn;
2256
2257 log_debug(1 << CXGBI_DBG_ISCSI,
2258 "cid %u(0x%x), cls 0x%p,0x%p, conn 0x%p,0x%p,0x%p.\n",
2259 cid, cid, cls_session, cls_conn, conn, tcp_conn, cconn);
2260
2261 return cls_conn;
2262}
2263EXPORT_SYMBOL_GPL(cxgbi_create_conn);
2264
2265int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
2266 struct iscsi_cls_conn *cls_conn,
2267 u64 transport_eph, int is_leading)
2268{
2269 struct iscsi_conn *conn = cls_conn->dd_data;
2270 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2271 struct cxgbi_conn *cconn = tcp_conn->dd_data;
2272 struct iscsi_endpoint *ep;
2273 struct cxgbi_endpoint *cep;
2274 struct cxgbi_sock *csk;
2275 int err;
2276
2277 ep = iscsi_lookup_endpoint(transport_eph);
2278 if (!ep)
2279 return -EINVAL;
2280
2281 /* setup ddp pagesize */
2282 cep = ep->dd_data;
2283 csk = cep->csk;
2284 err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, page_idx, 0);
2285 if (err < 0)
2286 return err;
2287
2288 err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
2289 if (err)
2290 return -EINVAL;
2291
2292 /* calculate the tag idx bits needed for this conn based on cmds_max */
2293 cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1;
2294
2295 write_lock_bh(&csk->callback_lock);
2296 csk->user_data = conn;
2297 cconn->chba = cep->chba;
2298 cconn->cep = cep;
2299 cep->cconn = cconn;
2300 write_unlock_bh(&csk->callback_lock);
2301
2302 cxgbi_conn_max_xmit_dlength(conn);
2303 cxgbi_conn_max_recv_dlength(conn);
2304
2305 spin_lock_bh(&conn->session->lock);
2306 sprintf(conn->portal_address, "%pI4", &csk->daddr.sin_addr.s_addr);
2307 conn->portal_port = ntohs(csk->daddr.sin_port);
2308 spin_unlock_bh(&conn->session->lock);
2309
2310 log_debug(1 << CXGBI_DBG_ISCSI,
2311 "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n",
2312 cls_session, cls_conn, ep, cconn, csk);
2313 /* init recv engine */
2314 iscsi_tcp_hdr_recv_prep(tcp_conn);
2315
2316 return 0;
2317}
2318EXPORT_SYMBOL_GPL(cxgbi_bind_conn);
2319
2320struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *ep,
2321 u16 cmds_max, u16 qdepth,
2322 u32 initial_cmdsn)
2323{
2324 struct cxgbi_endpoint *cep;
2325 struct cxgbi_hba *chba;
2326 struct Scsi_Host *shost;
2327 struct iscsi_cls_session *cls_session;
2328 struct iscsi_session *session;
2329
2330 if (!ep) {
2331 pr_err("missing endpoint.\n");
2332 return NULL;
2333 }
2334
2335 cep = ep->dd_data;
2336 chba = cep->chba;
2337 shost = chba->shost;
2338
2339 BUG_ON(chba != iscsi_host_priv(shost));
2340
2341 cls_session = iscsi_session_setup(chba->cdev->itp, shost,
2342 cmds_max, 0,
2343 sizeof(struct iscsi_tcp_task) +
2344 sizeof(struct cxgbi_task_data),
2345 initial_cmdsn, ISCSI_MAX_TARGET);
2346 if (!cls_session)
2347 return NULL;
2348
2349 session = cls_session->dd_data;
2350 if (iscsi_tcp_r2tpool_alloc(session))
2351 goto remove_session;
2352
2353 log_debug(1 << CXGBI_DBG_ISCSI,
2354 "ep 0x%p, cls sess 0x%p.\n", ep, cls_session);
2355 return cls_session;
2356
2357remove_session:
2358 iscsi_session_teardown(cls_session);
2359 return NULL;
2360}
2361EXPORT_SYMBOL_GPL(cxgbi_create_session);
2362
2363void cxgbi_destroy_session(struct iscsi_cls_session *cls_session)
2364{
2365 log_debug(1 << CXGBI_DBG_ISCSI,
2366 "cls sess 0x%p.\n", cls_session);
2367
2368 iscsi_tcp_r2tpool_free(cls_session->dd_data);
2369 iscsi_session_teardown(cls_session);
2370}
2371EXPORT_SYMBOL_GPL(cxgbi_destroy_session);
2372
2373int cxgbi_set_host_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2374 char *buf, int buflen)
2375{
2376 struct cxgbi_hba *chba = iscsi_host_priv(shost);
2377
2378 if (!chba->ndev) {
2379 shost_printk(KERN_ERR, shost, "Could not get host param. "
2380 "netdev for host not set.\n");
2381 return -ENODEV;
2382 }
2383
2384 log_debug(1 << CXGBI_DBG_ISCSI,
2385 "shost 0x%p, hba 0x%p,%s, param %d, buf(%d) %s.\n",
2386 shost, chba, chba->ndev->name, param, buflen, buf);
2387
2388 switch (param) {
2389 case ISCSI_HOST_PARAM_IPADDRESS:
2390 {
2391 __be32 addr = in_aton(buf);
2392 log_debug(1 << CXGBI_DBG_ISCSI,
2393 "hba %s, req. ipv4 %pI4.\n", chba->ndev->name, &addr);
2394 cxgbi_set_iscsi_ipv4(chba, addr);
2395 return 0;
2396 }
2397 case ISCSI_HOST_PARAM_HWADDRESS:
2398 case ISCSI_HOST_PARAM_NETDEV_NAME:
2399 return 0;
2400 default:
2401 return iscsi_host_set_param(shost, param, buf, buflen);
2402 }
2403}
2404EXPORT_SYMBOL_GPL(cxgbi_set_host_param);
2405
2406int cxgbi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2407 char *buf)
2408{
2409 struct cxgbi_hba *chba = iscsi_host_priv(shost);
2410 int len = 0;
2411
2412 if (!chba->ndev) {
2413 shost_printk(KERN_ERR, shost, "Could not get host param. "
2414 "netdev for host not set.\n");
2415 return -ENODEV;
2416 }
2417
2418 log_debug(1 << CXGBI_DBG_ISCSI,
2419 "shost 0x%p, hba 0x%p,%s, param %d.\n",
2420 shost, chba, chba->ndev->name, param);
2421
2422 switch (param) {
2423 case ISCSI_HOST_PARAM_HWADDRESS:
2424 len = sysfs_format_mac(buf, chba->ndev->dev_addr, 6);
2425 break;
2426 case ISCSI_HOST_PARAM_NETDEV_NAME:
2427 len = sprintf(buf, "%s\n", chba->ndev->name);
2428 break;
2429 case ISCSI_HOST_PARAM_IPADDRESS:
2430 {
2431 __be32 addr;
2432
2433 addr = cxgbi_get_iscsi_ipv4(chba);
2434 len = sprintf(buf, "%pI4", &addr);
2435 log_debug(1 << CXGBI_DBG_ISCSI,
2436 "hba %s, ipv4 %pI4.\n", chba->ndev->name, &addr);
2437 break;
2438 }
2439 default:
2440 return iscsi_host_get_param(shost, param, buf);
2441 }
2442
2443 return len;
2444}
2445EXPORT_SYMBOL_GPL(cxgbi_get_host_param);
2446
2447struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost,
2448 struct sockaddr *dst_addr,
2449 int non_blocking)
2450{
2451 struct iscsi_endpoint *ep;
2452 struct cxgbi_endpoint *cep;
2453 struct cxgbi_hba *hba = NULL;
2454 struct cxgbi_sock *csk;
2455 int err = -EINVAL;
2456
2457 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
2458 "shost 0x%p, non_blocking %d, dst_addr 0x%p.\n",
2459 shost, non_blocking, dst_addr);
2460
2461 if (shost) {
2462 hba = iscsi_host_priv(shost);
2463 if (!hba) {
2464 pr_info("shost 0x%p, priv NULL.\n", shost);
2465 goto err_out;
2466 }
2467 }
2468
2469 csk = cxgbi_check_route(dst_addr);
2470 if (IS_ERR(csk))
2471 return (struct iscsi_endpoint *)csk;
2472 cxgbi_sock_get(csk);
2473
2474 if (!hba)
2475 hba = csk->cdev->hbas[csk->port_id];
2476 else if (hba != csk->cdev->hbas[csk->port_id]) {
2477 pr_info("Could not connect through requested host %u"
2478 "hba 0x%p != 0x%p (%u).\n",
2479 shost->host_no, hba,
2480 csk->cdev->hbas[csk->port_id], csk->port_id);
2481 err = -ENOSPC;
2482 goto release_conn;
2483 }
2484
2485 err = sock_get_port(csk);
2486 if (err)
2487 goto release_conn;
2488
2489 cxgbi_sock_set_state(csk, CTP_CONNECTING);
2490 err = csk->cdev->csk_init_act_open(csk);
2491 if (err)
2492 goto release_conn;
2493
2494 if (cxgbi_sock_is_closing(csk)) {
2495 err = -ENOSPC;
2496 pr_info("csk 0x%p is closing.\n", csk);
2497 goto release_conn;
2498 }
2499
2500 ep = iscsi_create_endpoint(sizeof(*cep));
2501 if (!ep) {
2502 err = -ENOMEM;
2503 pr_info("iscsi alloc ep, OOM.\n");
2504 goto release_conn;
2505 }
2506
2507 cep = ep->dd_data;
2508 cep->csk = csk;
2509 cep->chba = hba;
2510
2511 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
2512 "ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n",
2513 ep, cep, csk, hba, hba->ndev->name);
2514 return ep;
2515
2516release_conn:
2517 cxgbi_sock_put(csk);
2518 cxgbi_sock_closed(csk);
2519err_out:
2520 return ERR_PTR(err);
2521}
2522EXPORT_SYMBOL_GPL(cxgbi_ep_connect);
2523
2524int cxgbi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
2525{
2526 struct cxgbi_endpoint *cep = ep->dd_data;
2527 struct cxgbi_sock *csk = cep->csk;
2528
2529 if (!cxgbi_sock_is_established(csk))
2530 return 0;
2531 return 1;
2532}
2533EXPORT_SYMBOL_GPL(cxgbi_ep_poll);
2534
2535void cxgbi_ep_disconnect(struct iscsi_endpoint *ep)
2536{
2537 struct cxgbi_endpoint *cep = ep->dd_data;
2538 struct cxgbi_conn *cconn = cep->cconn;
2539 struct cxgbi_sock *csk = cep->csk;
2540
2541 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
2542 "ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n",
2543 ep, cep, cconn, csk, csk->state, csk->flags);
2544
2545 if (cconn && cconn->iconn) {
2546 iscsi_suspend_tx(cconn->iconn);
2547 write_lock_bh(&csk->callback_lock);
2548 cep->csk->user_data = NULL;
2549 cconn->cep = NULL;
2550 write_unlock_bh(&csk->callback_lock);
2551 }
2552 iscsi_destroy_endpoint(ep);
2553
2554 if (likely(csk->state >= CTP_ESTABLISHED))
2555 need_active_close(csk);
2556 else
2557 cxgbi_sock_closed(csk);
2558
2559 cxgbi_sock_put(csk);
2560}
2561EXPORT_SYMBOL_GPL(cxgbi_ep_disconnect);
2562
2563int cxgbi_iscsi_init(struct iscsi_transport *itp,
2564 struct scsi_transport_template **stt)
2565{
2566 *stt = iscsi_register_transport(itp);
2567 if (*stt == NULL) {
2568 pr_err("unable to register %s transport 0x%p.\n",
2569 itp->name, itp);
2570 return -ENODEV;
2571 }
2572 log_debug(1 << CXGBI_DBG_ISCSI,
2573 "%s, registered iscsi transport 0x%p.\n",
2574 itp->name, stt);
2575 return 0;
2576}
2577EXPORT_SYMBOL_GPL(cxgbi_iscsi_init);
2578
2579void cxgbi_iscsi_cleanup(struct iscsi_transport *itp,
2580 struct scsi_transport_template **stt)
2581{
2582 if (*stt) {
2583 log_debug(1 << CXGBI_DBG_ISCSI,
2584 "de-register transport 0x%p, %s, stt 0x%p.\n",
2585 itp, itp->name, *stt);
2586 *stt = NULL;
2587 iscsi_unregister_transport(itp);
2588 }
2589}
2590EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup);
2591
2592static int __init libcxgbi_init_module(void)
2593{
2594 sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1;
2595 sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1;
2596
2597 pr_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n",
2598 ISCSI_ITT_MASK, sw_tag_idx_bits,
2599 ISCSI_AGE_MASK, sw_tag_age_bits);
2600
2601 ddp_setup_host_page_size();
2602 return 0;
2603}
2604
2605static void __exit libcxgbi_exit_module(void)
2606{
2607 cxgbi_device_unregister_all(0xFF);
2608 return;
2609}
2610
2611module_init(libcxgbi_init_module);
2612module_exit(libcxgbi_exit_module);
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
new file mode 100644
index 000000000000..c57d59db000c
--- /dev/null
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -0,0 +1,745 @@
1/*
2 * libcxgbi.h: Chelsio common library for T3/T4 iSCSI driver.
3 *
4 * Copyright (c) 2010 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 * Written by: Rakesh Ranjan (rranjan@chelsio.com)
12 */
13
14#ifndef __LIBCXGBI_H__
15#define __LIBCXGBI_H__
16
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/types.h>
20#include <linux/debugfs.h>
21#include <linux/list.h>
22#include <linux/netdevice.h>
23#include <linux/if_vlan.h>
24#include <linux/scatterlist.h>
25#include <linux/skbuff.h>
26#include <linux/vmalloc.h>
27#include <scsi/scsi_device.h>
28#include <scsi/libiscsi_tcp.h>
29
30enum cxgbi_dbg_flag {
31 CXGBI_DBG_ISCSI,
32 CXGBI_DBG_DDP,
33 CXGBI_DBG_TOE,
34 CXGBI_DBG_SOCK,
35
36 CXGBI_DBG_PDU_TX,
37 CXGBI_DBG_PDU_RX,
38 CXGBI_DBG_DEV,
39};
40
41#define log_debug(level, fmt, ...) \
42 do { \
43 if (dbg_level & (level)) \
44 pr_info(fmt, ##__VA_ARGS__); \
45 } while (0)
46
47/* max. connections per adapter */
48#define CXGBI_MAX_CONN 16384
49
50/* always allocate rooms for AHS */
51#define SKB_TX_ISCSI_PDU_HEADER_MAX \
52 (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE)
53
54#define ISCSI_PDU_NONPAYLOAD_LEN 312 /* bhs(48) + ahs(256) + digest(8)*/
55
56/*
57 * align pdu size to multiple of 512 for better performance
58 */
59#define cxgbi_align_pdu_size(n) do { n = (n) & (~511); } while (0)
60
61#define ULP2_MODE_ISCSI 2
62
63#define ULP2_MAX_PKT_SIZE 16224
64#define ULP2_MAX_PDU_PAYLOAD \
65 (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_LEN)
66
67/*
68 * For iscsi connections HW may inserts digest bytes into the pdu. Those digest
69 * bytes are not sent by the host but are part of the TCP payload and therefore
70 * consume TCP sequence space.
71 */
72static const unsigned int ulp2_extra_len[] = { 0, 4, 4, 8 };
73static inline unsigned int cxgbi_ulp_extra_len(int submode)
74{
75 return ulp2_extra_len[submode & 3];
76}
77
78/*
79 * struct pagepod_hdr, pagepod - pagepod format
80 */
81
82#define CPL_RX_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */
83#define CPL_RX_DDP_STATUS_PAD_SHIFT 19 /* pad error */
84#define CPL_RX_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */
85#define CPL_RX_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */
86
87struct cxgbi_pagepod_hdr {
88 u32 vld_tid;
89 u32 pgsz_tag_clr;
90 u32 max_offset;
91 u32 page_offset;
92 u64 rsvd;
93};
94
95#define PPOD_PAGES_MAX 4
96struct cxgbi_pagepod {
97 struct cxgbi_pagepod_hdr hdr;
98 u64 addr[PPOD_PAGES_MAX + 1];
99};
100
101struct cxgbi_tag_format {
102 unsigned char sw_bits;
103 unsigned char rsvd_bits;
104 unsigned char rsvd_shift;
105 unsigned char filler[1];
106 u32 rsvd_mask;
107};
108
109struct cxgbi_gather_list {
110 unsigned int tag;
111 unsigned int length;
112 unsigned int offset;
113 unsigned int nelem;
114 struct page **pages;
115 dma_addr_t phys_addr[0];
116};
117
118struct cxgbi_ddp_info {
119 struct kref refcnt;
120 struct cxgbi_device *cdev;
121 struct pci_dev *pdev;
122 unsigned int max_txsz;
123 unsigned int max_rxsz;
124 unsigned int llimit;
125 unsigned int ulimit;
126 unsigned int nppods;
127 unsigned int idx_last;
128 unsigned char idx_bits;
129 unsigned char filler[3];
130 unsigned int idx_mask;
131 unsigned int rsvd_tag_mask;
132 spinlock_t map_lock;
133 struct cxgbi_gather_list **gl_map;
134 struct sk_buff **gl_skb;
135};
136
137#define DDP_PGIDX_MAX 4
138#define DDP_THRESHOLD 2048
139
140#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */
141
142#define PPOD_SIZE sizeof(struct cxgbi_pagepod) /* 64 */
143#define PPOD_SIZE_SHIFT 6
144
145#define ULPMEM_DSGL_MAX_NPPODS 16 /* 1024/PPOD_SIZE */
146#define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */
147#define PCIE_MEMWIN_MAX_NPPODS 16 /* 1024/PPOD_SIZE */
148
149#define PPOD_COLOR_SHIFT 0
150#define PPOD_COLOR(x) ((x) << PPOD_COLOR_SHIFT)
151
152#define PPOD_IDX_SHIFT 6
153#define PPOD_IDX_MAX_SIZE 24
154
155#define PPOD_TID_SHIFT 0
156#define PPOD_TID(x) ((x) << PPOD_TID_SHIFT)
157
158#define PPOD_TAG_SHIFT 6
159#define PPOD_TAG(x) ((x) << PPOD_TAG_SHIFT)
160
161#define PPOD_VALID_SHIFT 24
162#define PPOD_VALID(x) ((x) << PPOD_VALID_SHIFT)
163#define PPOD_VALID_FLAG PPOD_VALID(1U)
164
165/*
166 * sge_opaque_hdr -
167 * Opaque version of structure the SGE stores at skb->head of TX_DATA packets
168 * and for which we must reserve space.
169 */
170struct sge_opaque_hdr {
171 void *dev;
172 dma_addr_t addr[MAX_SKB_FRAGS + 1];
173};
174
175struct cxgbi_sock {
176 struct cxgbi_device *cdev;
177
178 int tid;
179 int atid;
180 unsigned long flags;
181 unsigned int mtu;
182 unsigned short rss_qid;
183 unsigned short txq_idx;
184 unsigned short advmss;
185 unsigned int tx_chan;
186 unsigned int rx_chan;
187 unsigned int mss_idx;
188 unsigned int smac_idx;
189 unsigned char port_id;
190 int wr_max_cred;
191 int wr_cred;
192 int wr_una_cred;
193 unsigned char hcrc_len;
194 unsigned char dcrc_len;
195
196 void *l2t;
197 struct sk_buff *wr_pending_head;
198 struct sk_buff *wr_pending_tail;
199 struct sk_buff *cpl_close;
200 struct sk_buff *cpl_abort_req;
201 struct sk_buff *cpl_abort_rpl;
202 struct sk_buff *skb_ulp_lhdr;
203 spinlock_t lock;
204 struct kref refcnt;
205 unsigned int state;
206 struct sockaddr_in saddr;
207 struct sockaddr_in daddr;
208 struct dst_entry *dst;
209 struct sk_buff_head receive_queue;
210 struct sk_buff_head write_queue;
211 struct timer_list retry_timer;
212 int err;
213 rwlock_t callback_lock;
214 void *user_data;
215
216 u32 rcv_nxt;
217 u32 copied_seq;
218 u32 rcv_wup;
219 u32 snd_nxt;
220 u32 snd_una;
221 u32 write_seq;
222};
223
224/*
225 * connection states
226 */
227enum cxgbi_sock_states{
228 CTP_CLOSED,
229 CTP_CONNECTING,
230 CTP_ACTIVE_OPEN,
231 CTP_ESTABLISHED,
232 CTP_ACTIVE_CLOSE,
233 CTP_PASSIVE_CLOSE,
234 CTP_CLOSE_WAIT_1,
235 CTP_CLOSE_WAIT_2,
236 CTP_ABORTING,
237};
238
239/*
240 * Connection flags -- many to track some close related events.
241 */
242enum cxgbi_sock_flags {
243 CTPF_ABORT_RPL_RCVD, /*received one ABORT_RPL_RSS message */
244 CTPF_ABORT_REQ_RCVD, /*received one ABORT_REQ_RSS message */
245 CTPF_ABORT_RPL_PENDING, /* expecting an abort reply */
246 CTPF_TX_DATA_SENT, /* already sent a TX_DATA WR */
247 CTPF_ACTIVE_CLOSE_NEEDED,/* need to be closed */
248 CTPF_HAS_ATID, /* reserved atid */
249 CTPF_HAS_TID, /* reserved hw tid */
250 CTPF_OFFLOAD_DOWN, /* offload function off */
251};
252
253struct cxgbi_skb_rx_cb {
254 __u32 ddigest;
255 __u32 pdulen;
256};
257
258struct cxgbi_skb_tx_cb {
259 void *l2t;
260 struct sk_buff *wr_next;
261};
262
263enum cxgbi_skcb_flags {
264 SKCBF_TX_NEED_HDR, /* packet needs a header */
265 SKCBF_RX_COALESCED, /* received whole pdu */
266 SKCBF_RX_HDR, /* recieved pdu header */
267 SKCBF_RX_DATA, /* recieved pdu payload */
268 SKCBF_RX_STATUS, /* recieved ddp status */
269 SKCBF_RX_DATA_DDPD, /* pdu payload ddp'd */
270 SKCBF_RX_HCRC_ERR, /* header digest error */
271 SKCBF_RX_DCRC_ERR, /* data digest error */
272 SKCBF_RX_PAD_ERR, /* padding byte error */
273};
274
275struct cxgbi_skb_cb {
276 unsigned char ulp_mode;
277 unsigned long flags;
278 unsigned int seq;
279 union {
280 struct cxgbi_skb_rx_cb rx;
281 struct cxgbi_skb_tx_cb tx;
282 };
283};
284
285#define CXGBI_SKB_CB(skb) ((struct cxgbi_skb_cb *)&((skb)->cb[0]))
286#define cxgbi_skcb_flags(skb) (CXGBI_SKB_CB(skb)->flags)
287#define cxgbi_skcb_ulp_mode(skb) (CXGBI_SKB_CB(skb)->ulp_mode)
288#define cxgbi_skcb_tcp_seq(skb) (CXGBI_SKB_CB(skb)->seq)
289#define cxgbi_skcb_rx_ddigest(skb) (CXGBI_SKB_CB(skb)->rx.ddigest)
290#define cxgbi_skcb_rx_pdulen(skb) (CXGBI_SKB_CB(skb)->rx.pdulen)
291#define cxgbi_skcb_tx_wr_next(skb) (CXGBI_SKB_CB(skb)->tx.wr_next)
292
293static inline void cxgbi_skcb_set_flag(struct sk_buff *skb,
294 enum cxgbi_skcb_flags flag)
295{
296 __set_bit(flag, &(cxgbi_skcb_flags(skb)));
297}
298
299static inline void cxgbi_skcb_clear_flag(struct sk_buff *skb,
300 enum cxgbi_skcb_flags flag)
301{
302 __clear_bit(flag, &(cxgbi_skcb_flags(skb)));
303}
304
305static inline int cxgbi_skcb_test_flag(struct sk_buff *skb,
306 enum cxgbi_skcb_flags flag)
307{
308 return test_bit(flag, &(cxgbi_skcb_flags(skb)));
309}
310
311static inline void cxgbi_sock_set_flag(struct cxgbi_sock *csk,
312 enum cxgbi_sock_flags flag)
313{
314 __set_bit(flag, &csk->flags);
315 log_debug(1 << CXGBI_DBG_SOCK,
316 "csk 0x%p,%u,0x%lx, bit %d.\n",
317 csk, csk->state, csk->flags, flag);
318}
319
320static inline void cxgbi_sock_clear_flag(struct cxgbi_sock *csk,
321 enum cxgbi_sock_flags flag)
322{
323 __clear_bit(flag, &csk->flags);
324 log_debug(1 << CXGBI_DBG_SOCK,
325 "csk 0x%p,%u,0x%lx, bit %d.\n",
326 csk, csk->state, csk->flags, flag);
327}
328
329static inline int cxgbi_sock_flag(struct cxgbi_sock *csk,
330 enum cxgbi_sock_flags flag)
331{
332 if (csk == NULL)
333 return 0;
334 return test_bit(flag, &csk->flags);
335}
336
337static inline void cxgbi_sock_set_state(struct cxgbi_sock *csk, int state)
338{
339 log_debug(1 << CXGBI_DBG_SOCK,
340 "csk 0x%p,%u,0x%lx, state -> %u.\n",
341 csk, csk->state, csk->flags, state);
342 csk->state = state;
343}
344
345static inline void cxgbi_sock_free(struct kref *kref)
346{
347 struct cxgbi_sock *csk = container_of(kref,
348 struct cxgbi_sock,
349 refcnt);
350 if (csk) {
351 log_debug(1 << CXGBI_DBG_SOCK,
352 "free csk 0x%p, state %u, flags 0x%lx\n",
353 csk, csk->state, csk->flags);
354 kfree(csk);
355 }
356}
357
358static inline void __cxgbi_sock_put(const char *fn, struct cxgbi_sock *csk)
359{
360 log_debug(1 << CXGBI_DBG_SOCK,
361 "%s, put csk 0x%p, ref %u-1.\n",
362 fn, csk, atomic_read(&csk->refcnt.refcount));
363 kref_put(&csk->refcnt, cxgbi_sock_free);
364}
365#define cxgbi_sock_put(csk) __cxgbi_sock_put(__func__, csk)
366
367static inline void __cxgbi_sock_get(const char *fn, struct cxgbi_sock *csk)
368{
369 log_debug(1 << CXGBI_DBG_SOCK,
370 "%s, get csk 0x%p, ref %u+1.\n",
371 fn, csk, atomic_read(&csk->refcnt.refcount));
372 kref_get(&csk->refcnt);
373}
374#define cxgbi_sock_get(csk) __cxgbi_sock_get(__func__, csk)
375
376static inline int cxgbi_sock_is_closing(struct cxgbi_sock *csk)
377{
378 return csk->state >= CTP_ACTIVE_CLOSE;
379}
380
381static inline int cxgbi_sock_is_established(struct cxgbi_sock *csk)
382{
383 return csk->state == CTP_ESTABLISHED;
384}
385
386static inline void cxgbi_sock_purge_write_queue(struct cxgbi_sock *csk)
387{
388 struct sk_buff *skb;
389
390 while ((skb = __skb_dequeue(&csk->write_queue)))
391 __kfree_skb(skb);
392}
393
394static inline unsigned int cxgbi_sock_compute_wscale(unsigned int win)
395{
396 unsigned int wscale = 0;
397
398 while (wscale < 14 && (65535 << wscale) < win)
399 wscale++;
400 return wscale;
401}
402
403static inline struct sk_buff *alloc_wr(int wrlen, int dlen, gfp_t gfp)
404{
405 struct sk_buff *skb = alloc_skb(wrlen + dlen, gfp);
406
407 if (skb) {
408 __skb_put(skb, wrlen);
409 memset(skb->head, 0, wrlen + dlen);
410 } else
411 pr_info("alloc cpl wr skb %u+%u, OOM.\n", wrlen, dlen);
412 return skb;
413}
414
415
416/*
417 * The number of WRs needed for an skb depends on the number of fragments
418 * in the skb and whether it has any payload in its main body. This maps the
419 * length of the gather list represented by an skb into the # of necessary WRs.
420 * The extra two fragments are for iscsi bhs and payload padding.
421 */
422#define SKB_WR_LIST_SIZE (MAX_SKB_FRAGS + 2)
423
424static inline void cxgbi_sock_reset_wr_list(struct cxgbi_sock *csk)
425{
426 csk->wr_pending_head = csk->wr_pending_tail = NULL;
427}
428
429static inline void cxgbi_sock_enqueue_wr(struct cxgbi_sock *csk,
430 struct sk_buff *skb)
431{
432 cxgbi_skcb_tx_wr_next(skb) = NULL;
433 /*
434 * We want to take an extra reference since both us and the driver
435 * need to free the packet before it's really freed. We know there's
436 * just one user currently so we use atomic_set rather than skb_get
437 * to avoid the atomic op.
438 */
439 atomic_set(&skb->users, 2);
440
441 if (!csk->wr_pending_head)
442 csk->wr_pending_head = skb;
443 else
444 cxgbi_skcb_tx_wr_next(csk->wr_pending_tail) = skb;
445 csk->wr_pending_tail = skb;
446}
447
448static inline int cxgbi_sock_count_pending_wrs(const struct cxgbi_sock *csk)
449{
450 int n = 0;
451 const struct sk_buff *skb = csk->wr_pending_head;
452
453 while (skb) {
454 n += skb->csum;
455 skb = cxgbi_skcb_tx_wr_next(skb);
456 }
457 return n;
458}
459
460static inline struct sk_buff *cxgbi_sock_peek_wr(const struct cxgbi_sock *csk)
461{
462 return csk->wr_pending_head;
463}
464
465static inline struct sk_buff *cxgbi_sock_dequeue_wr(struct cxgbi_sock *csk)
466{
467 struct sk_buff *skb = csk->wr_pending_head;
468
469 if (likely(skb)) {
470 csk->wr_pending_head = cxgbi_skcb_tx_wr_next(skb);
471 cxgbi_skcb_tx_wr_next(skb) = NULL;
472 }
473 return skb;
474}
475
476void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *);
477void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *);
478void cxgbi_sock_skb_entail(struct cxgbi_sock *, struct sk_buff *);
479void cxgbi_sock_fail_act_open(struct cxgbi_sock *, int);
480void cxgbi_sock_act_open_req_arp_failure(void *, struct sk_buff *);
481void cxgbi_sock_closed(struct cxgbi_sock *);
482void cxgbi_sock_established(struct cxgbi_sock *, unsigned int, unsigned int);
483void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *);
484void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *);
485void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *, u32);
486void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *, unsigned int, unsigned int,
487 int);
488unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *, unsigned int);
489void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *);
490
491struct cxgbi_hba {
492 struct net_device *ndev;
493 struct net_device *vdev; /* vlan dev */
494 struct Scsi_Host *shost;
495 struct cxgbi_device *cdev;
496 __be32 ipv4addr;
497 unsigned char port_id;
498};
499
500struct cxgbi_ports_map {
501 unsigned int max_connect;
502 unsigned int used;
503 unsigned short sport_base;
504 spinlock_t lock;
505 unsigned int next;
506 struct cxgbi_sock **port_csk;
507};
508
509#define CXGBI_FLAG_DEV_T3 0x1
510#define CXGBI_FLAG_DEV_T4 0x2
511#define CXGBI_FLAG_ADAPTER_RESET 0x4
512#define CXGBI_FLAG_IPV4_SET 0x10
513struct cxgbi_device {
514 struct list_head list_head;
515 unsigned int flags;
516 struct net_device **ports;
517 void *lldev;
518 struct cxgbi_hba **hbas;
519 const unsigned short *mtus;
520 unsigned char nmtus;
521 unsigned char nports;
522 struct pci_dev *pdev;
523 struct dentry *debugfs_root;
524 struct iscsi_transport *itp;
525
526 unsigned int pfvf;
527 unsigned int snd_win;
528 unsigned int rcv_win;
529 unsigned int rx_credit_thres;
530 unsigned int skb_tx_rsvd;
531 unsigned int skb_rx_extra; /* for msg coalesced mode */
532 unsigned int tx_max_size;
533 unsigned int rx_max_size;
534 struct cxgbi_ports_map pmap;
535 struct cxgbi_tag_format tag_format;
536 struct cxgbi_ddp_info *ddp;
537
538 void (*dev_ddp_cleanup)(struct cxgbi_device *);
539 void (*csk_ddp_free_gl_skb)(struct cxgbi_ddp_info *, int, int);
540 int (*csk_ddp_alloc_gl_skb)(struct cxgbi_ddp_info *, int, int, gfp_t);
541 int (*csk_ddp_set)(struct cxgbi_sock *, struct cxgbi_pagepod_hdr *,
542 unsigned int, unsigned int,
543 struct cxgbi_gather_list *);
544 void (*csk_ddp_clear)(struct cxgbi_hba *,
545 unsigned int, unsigned int, unsigned int);
546 int (*csk_ddp_setup_digest)(struct cxgbi_sock *,
547 unsigned int, int, int, int);
548 int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *,
549 unsigned int, int, bool);
550
551 void (*csk_release_offload_resources)(struct cxgbi_sock *);
552 int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *);
553 u32 (*csk_send_rx_credits)(struct cxgbi_sock *, u32);
554 int (*csk_push_tx_frames)(struct cxgbi_sock *, int);
555 void (*csk_send_abort_req)(struct cxgbi_sock *);
556 void (*csk_send_close_req)(struct cxgbi_sock *);
557 int (*csk_alloc_cpls)(struct cxgbi_sock *);
558 int (*csk_init_act_open)(struct cxgbi_sock *);
559
560 void *dd_data;
561};
562#define cxgbi_cdev_priv(cdev) ((cdev)->dd_data)
563
564struct cxgbi_conn {
565 struct cxgbi_endpoint *cep;
566 struct iscsi_conn *iconn;
567 struct cxgbi_hba *chba;
568 u32 task_idx_bits;
569};
570
571struct cxgbi_endpoint {
572 struct cxgbi_conn *cconn;
573 struct cxgbi_hba *chba;
574 struct cxgbi_sock *csk;
575};
576
577#define MAX_PDU_FRAGS ((ULP2_MAX_PDU_PAYLOAD + 512 - 1) / 512)
578struct cxgbi_task_data {
579 unsigned short nr_frags;
580 skb_frag_t frags[MAX_PDU_FRAGS];
581 struct sk_buff *skb;
582 unsigned int offset;
583 unsigned int count;
584 unsigned int sgoffset;
585};
586#define iscsi_task_cxgbi_data(task) \
587 ((task)->dd_data + sizeof(struct iscsi_tcp_task))
588
589static inline int cxgbi_is_ddp_tag(struct cxgbi_tag_format *tformat, u32 tag)
590{
591 return !(tag & (1 << (tformat->rsvd_bits + tformat->rsvd_shift - 1)));
592}
593
594static inline int cxgbi_sw_tag_usable(struct cxgbi_tag_format *tformat,
595 u32 sw_tag)
596{
597 sw_tag >>= (32 - tformat->rsvd_bits);
598 return !sw_tag;
599}
600
601static inline u32 cxgbi_set_non_ddp_tag(struct cxgbi_tag_format *tformat,
602 u32 sw_tag)
603{
604 unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1;
605 u32 mask = (1 << shift) - 1;
606
607 if (sw_tag && (sw_tag & ~mask)) {
608 u32 v1 = sw_tag & ((1 << shift) - 1);
609 u32 v2 = (sw_tag >> (shift - 1)) << shift;
610
611 return v2 | v1 | 1 << shift;
612 }
613
614 return sw_tag | 1 << shift;
615}
616
617static inline u32 cxgbi_ddp_tag_base(struct cxgbi_tag_format *tformat,
618 u32 sw_tag)
619{
620 u32 mask = (1 << tformat->rsvd_shift) - 1;
621
622 if (sw_tag && (sw_tag & ~mask)) {
623 u32 v1 = sw_tag & mask;
624 u32 v2 = sw_tag >> tformat->rsvd_shift;
625
626 v2 <<= tformat->rsvd_bits + tformat->rsvd_shift;
627
628 return v2 | v1;
629 }
630
631 return sw_tag;
632}
633
634static inline u32 cxgbi_tag_rsvd_bits(struct cxgbi_tag_format *tformat,
635 u32 tag)
636{
637 if (cxgbi_is_ddp_tag(tformat, tag))
638 return (tag >> tformat->rsvd_shift) & tformat->rsvd_mask;
639
640 return 0;
641}
642
643static inline u32 cxgbi_tag_nonrsvd_bits(struct cxgbi_tag_format *tformat,
644 u32 tag)
645{
646 unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1;
647 u32 v1, v2;
648
649 if (cxgbi_is_ddp_tag(tformat, tag)) {
650 v1 = tag & ((1 << tformat->rsvd_shift) - 1);
651 v2 = (tag >> (shift + 1)) << tformat->rsvd_shift;
652 } else {
653 u32 mask = (1 << shift) - 1;
654 tag &= ~(1 << shift);
655 v1 = tag & mask;
656 v2 = (tag >> 1) & ~mask;
657 }
658 return v1 | v2;
659}
660
661static inline void *cxgbi_alloc_big_mem(unsigned int size,
662 gfp_t gfp)
663{
664 void *p = kmalloc(size, gfp);
665 if (!p)
666 p = vmalloc(size);
667 if (p)
668 memset(p, 0, size);
669 return p;
670}
671
672static inline void cxgbi_free_big_mem(void *addr)
673{
674 if (is_vmalloc_addr(addr))
675 vfree(addr);
676 else
677 kfree(addr);
678}
679
680static inline void cxgbi_set_iscsi_ipv4(struct cxgbi_hba *chba, __be32 ipaddr)
681{
682 if (chba->cdev->flags & CXGBI_FLAG_IPV4_SET)
683 chba->ipv4addr = ipaddr;
684 else
685 pr_info("set iscsi ipv4 NOT supported, using %s ipv4.\n",
686 chba->ndev->name);
687}
688
689static inline __be32 cxgbi_get_iscsi_ipv4(struct cxgbi_hba *chba)
690{
691 return chba->ipv4addr;
692}
693
694struct cxgbi_device *cxgbi_device_register(unsigned int, unsigned int);
695void cxgbi_device_unregister(struct cxgbi_device *);
696void cxgbi_device_unregister_all(unsigned int flag);
697struct cxgbi_device *cxgbi_device_find_by_lldev(void *);
698int cxgbi_hbas_add(struct cxgbi_device *, unsigned int, unsigned int,
699 struct scsi_host_template *,
700 struct scsi_transport_template *);
701void cxgbi_hbas_remove(struct cxgbi_device *);
702
703int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base,
704 unsigned int max_conn);
705void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev);
706
707void cxgbi_conn_tx_open(struct cxgbi_sock *);
708void cxgbi_conn_pdu_ready(struct cxgbi_sock *);
709int cxgbi_conn_alloc_pdu(struct iscsi_task *, u8);
710int cxgbi_conn_init_pdu(struct iscsi_task *, unsigned int , unsigned int);
711int cxgbi_conn_xmit_pdu(struct iscsi_task *);
712
713void cxgbi_cleanup_task(struct iscsi_task *task);
714
715void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *);
716int cxgbi_set_conn_param(struct iscsi_cls_conn *,
717 enum iscsi_param, char *, int);
718int cxgbi_get_conn_param(struct iscsi_cls_conn *, enum iscsi_param, char *);
719struct iscsi_cls_conn *cxgbi_create_conn(struct iscsi_cls_session *, u32);
720int cxgbi_bind_conn(struct iscsi_cls_session *,
721 struct iscsi_cls_conn *, u64, int);
722void cxgbi_destroy_session(struct iscsi_cls_session *);
723struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *,
724 u16, u16, u32);
725int cxgbi_set_host_param(struct Scsi_Host *,
726 enum iscsi_host_param, char *, int);
727int cxgbi_get_host_param(struct Scsi_Host *, enum iscsi_host_param, char *);
728struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *,
729 struct sockaddr *, int);
730int cxgbi_ep_poll(struct iscsi_endpoint *, int);
731void cxgbi_ep_disconnect(struct iscsi_endpoint *);
732
733int cxgbi_iscsi_init(struct iscsi_transport *,
734 struct scsi_transport_template **);
735void cxgbi_iscsi_cleanup(struct iscsi_transport *,
736 struct scsi_transport_template **);
737void cxgbi_parse_pdu_itt(struct iscsi_conn *, itt_t, int *, int *);
738int cxgbi_ddp_init(struct cxgbi_device *, unsigned int, unsigned int,
739 unsigned int, unsigned int);
740int cxgbi_ddp_cleanup(struct cxgbi_device *);
741void cxgbi_ddp_page_size_factor(int *);
742void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod *);
743void cxgbi_ddp_ppod_set(struct cxgbi_pagepod *, struct cxgbi_pagepod_hdr *,
744 struct cxgbi_gather_list *, unsigned int);
745#endif /*__LIBCXGBI_H__*/
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 1a970a76b1b9..6b729324b8d3 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Generic SCSI-3 ALUA SCSI Device Handler 2 * Generic SCSI-3 ALUA SCSI Device Handler
3 * 3 *
4 * Copyright (C) 2007, 2008 Hannes Reinecke, SUSE Linux Products GmbH. 4 * Copyright (C) 2007-2010 Hannes Reinecke, SUSE Linux Products GmbH.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -20,17 +20,19 @@
20 * 20 *
21 */ 21 */
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/delay.h>
23#include <scsi/scsi.h> 24#include <scsi/scsi.h>
24#include <scsi/scsi_eh.h> 25#include <scsi/scsi_eh.h>
25#include <scsi/scsi_dh.h> 26#include <scsi/scsi_dh.h>
26 27
27#define ALUA_DH_NAME "alua" 28#define ALUA_DH_NAME "alua"
28#define ALUA_DH_VER "1.2" 29#define ALUA_DH_VER "1.3"
29 30
30#define TPGS_STATE_OPTIMIZED 0x0 31#define TPGS_STATE_OPTIMIZED 0x0
31#define TPGS_STATE_NONOPTIMIZED 0x1 32#define TPGS_STATE_NONOPTIMIZED 0x1
32#define TPGS_STATE_STANDBY 0x2 33#define TPGS_STATE_STANDBY 0x2
33#define TPGS_STATE_UNAVAILABLE 0x3 34#define TPGS_STATE_UNAVAILABLE 0x3
35#define TPGS_STATE_LBA_DEPENDENT 0x4
34#define TPGS_STATE_OFFLINE 0xe 36#define TPGS_STATE_OFFLINE 0xe
35#define TPGS_STATE_TRANSITIONING 0xf 37#define TPGS_STATE_TRANSITIONING 0xf
36 38
@@ -39,6 +41,7 @@
39#define TPGS_SUPPORT_NONOPTIMIZED 0x02 41#define TPGS_SUPPORT_NONOPTIMIZED 0x02
40#define TPGS_SUPPORT_STANDBY 0x04 42#define TPGS_SUPPORT_STANDBY 0x04
41#define TPGS_SUPPORT_UNAVAILABLE 0x08 43#define TPGS_SUPPORT_UNAVAILABLE 0x08
44#define TPGS_SUPPORT_LBA_DEPENDENT 0x10
42#define TPGS_SUPPORT_OFFLINE 0x40 45#define TPGS_SUPPORT_OFFLINE 0x40
43#define TPGS_SUPPORT_TRANSITION 0x80 46#define TPGS_SUPPORT_TRANSITION 0x80
44 47
@@ -460,6 +463,8 @@ static char print_alua_state(int state)
460 return 'S'; 463 return 'S';
461 case TPGS_STATE_UNAVAILABLE: 464 case TPGS_STATE_UNAVAILABLE:
462 return 'U'; 465 return 'U';
466 case TPGS_STATE_LBA_DEPENDENT:
467 return 'L';
463 case TPGS_STATE_OFFLINE: 468 case TPGS_STATE_OFFLINE:
464 return 'O'; 469 return 'O';
465 case TPGS_STATE_TRANSITIONING: 470 case TPGS_STATE_TRANSITIONING:
@@ -542,7 +547,9 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
542 int len, k, off, valid_states = 0; 547 int len, k, off, valid_states = 0;
543 char *ucp; 548 char *ucp;
544 unsigned err; 549 unsigned err;
550 unsigned long expiry, interval = 10;
545 551
552 expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT);
546 retry: 553 retry:
547 err = submit_rtpg(sdev, h); 554 err = submit_rtpg(sdev, h);
548 555
@@ -553,7 +560,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
553 return SCSI_DH_IO; 560 return SCSI_DH_IO;
554 561
555 err = alua_check_sense(sdev, &sense_hdr); 562 err = alua_check_sense(sdev, &sense_hdr);
556 if (err == ADD_TO_MLQUEUE) 563 if (err == ADD_TO_MLQUEUE && time_before(jiffies, expiry))
557 goto retry; 564 goto retry;
558 sdev_printk(KERN_INFO, sdev, 565 sdev_printk(KERN_INFO, sdev,
559 "%s: rtpg sense code %02x/%02x/%02x\n", 566 "%s: rtpg sense code %02x/%02x/%02x\n",
@@ -587,38 +594,37 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
587 } 594 }
588 595
589 sdev_printk(KERN_INFO, sdev, 596 sdev_printk(KERN_INFO, sdev,
590 "%s: port group %02x state %c supports %c%c%c%c%c%c\n", 597 "%s: port group %02x state %c supports %c%c%c%c%c%c%c\n",
591 ALUA_DH_NAME, h->group_id, print_alua_state(h->state), 598 ALUA_DH_NAME, h->group_id, print_alua_state(h->state),
592 valid_states&TPGS_SUPPORT_TRANSITION?'T':'t', 599 valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
593 valid_states&TPGS_SUPPORT_OFFLINE?'O':'o', 600 valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
601 valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
594 valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u', 602 valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
595 valid_states&TPGS_SUPPORT_STANDBY?'S':'s', 603 valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
596 valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n', 604 valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
597 valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a'); 605 valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
598 606
599 if (h->tpgs & TPGS_MODE_EXPLICIT) { 607 switch (h->state) {
600 switch (h->state) { 608 case TPGS_STATE_TRANSITIONING:
601 case TPGS_STATE_TRANSITIONING: 609 if (time_before(jiffies, expiry)) {
602 /* State transition, retry */ 610 /* State transition, retry */
611 interval *= 10;
612 msleep(interval);
603 goto retry; 613 goto retry;
604 break;
605 case TPGS_STATE_OFFLINE:
606 /* Path is offline, fail */
607 err = SCSI_DH_DEV_OFFLINED;
608 break;
609 default:
610 break;
611 } 614 }
612 } else { 615 /* Transitioning time exceeded, set port to standby */
613 /* Only Implicit ALUA support */ 616 err = SCSI_DH_RETRY;
614 if (h->state == TPGS_STATE_OPTIMIZED || 617 h->state = TPGS_STATE_STANDBY;
615 h->state == TPGS_STATE_NONOPTIMIZED || 618 break;
616 h->state == TPGS_STATE_STANDBY) 619 case TPGS_STATE_OFFLINE:
617 /* Useable path if active */ 620 case TPGS_STATE_UNAVAILABLE:
618 err = SCSI_DH_OK; 621 /* Path unuseable for unavailable/offline */
619 else 622 err = SCSI_DH_DEV_OFFLINED;
620 /* Path unuseable for unavailable/offline */ 623 break;
621 err = SCSI_DH_DEV_OFFLINED; 624 default:
625 /* Useable path if active */
626 err = SCSI_DH_OK;
627 break;
622 } 628 }
623 return err; 629 return err;
624} 630}
@@ -672,7 +678,9 @@ static int alua_activate(struct scsi_device *sdev,
672 goto out; 678 goto out;
673 } 679 }
674 680
675 if (h->tpgs & TPGS_MODE_EXPLICIT && h->state != TPGS_STATE_OPTIMIZED) { 681 if (h->tpgs & TPGS_MODE_EXPLICIT &&
682 h->state != TPGS_STATE_OPTIMIZED &&
683 h->state != TPGS_STATE_LBA_DEPENDENT) {
676 h->callback_fn = fn; 684 h->callback_fn = fn;
677 h->callback_data = data; 685 h->callback_data = data;
678 err = submit_stpg(h); 686 err = submit_stpg(h);
@@ -698,8 +706,11 @@ static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
698 struct alua_dh_data *h = get_alua_data(sdev); 706 struct alua_dh_data *h = get_alua_data(sdev);
699 int ret = BLKPREP_OK; 707 int ret = BLKPREP_OK;
700 708
701 if (h->state != TPGS_STATE_OPTIMIZED && 709 if (h->state == TPGS_STATE_TRANSITIONING)
702 h->state != TPGS_STATE_NONOPTIMIZED) { 710 ret = BLKPREP_DEFER;
711 else if (h->state != TPGS_STATE_OPTIMIZED &&
712 h->state != TPGS_STATE_NONOPTIMIZED &&
713 h->state != TPGS_STATE_LBA_DEPENDENT) {
703 ret = BLKPREP_KILL; 714 ret = BLKPREP_KILL;
704 req->cmd_flags |= REQ_QUIET; 715 req->cmd_flags |= REQ_QUIET;
705 } 716 }
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 9eb7a9ebccae..bb63f1a1f808 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -80,8 +80,6 @@ static struct libfc_function_template fnic_transport_template = {
80static int fnic_slave_alloc(struct scsi_device *sdev) 80static int fnic_slave_alloc(struct scsi_device *sdev)
81{ 81{
82 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 82 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
83 struct fc_lport *lp = shost_priv(sdev->host);
84 struct fnic *fnic = lport_priv(lp);
85 83
86 sdev->tagged_supported = 1; 84 sdev->tagged_supported = 1;
87 85
@@ -89,8 +87,6 @@ static int fnic_slave_alloc(struct scsi_device *sdev)
89 return -ENXIO; 87 return -ENXIO;
90 88
91 scsi_activate_tcq(sdev, FNIC_DFLT_QUEUE_DEPTH); 89 scsi_activate_tcq(sdev, FNIC_DFLT_QUEUE_DEPTH);
92 rport->dev_loss_tmo = fnic->config.port_down_timeout / 1000;
93
94 return 0; 90 return 0;
95} 91}
96 92
@@ -113,6 +109,15 @@ static struct scsi_host_template fnic_host_template = {
113 .shost_attrs = fnic_attrs, 109 .shost_attrs = fnic_attrs,
114}; 110};
115 111
112static void
113fnic_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
114{
115 if (timeout)
116 rport->dev_loss_tmo = timeout;
117 else
118 rport->dev_loss_tmo = 1;
119}
120
116static void fnic_get_host_speed(struct Scsi_Host *shost); 121static void fnic_get_host_speed(struct Scsi_Host *shost);
117static struct scsi_transport_template *fnic_fc_transport; 122static struct scsi_transport_template *fnic_fc_transport;
118static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *); 123static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *);
@@ -140,6 +145,7 @@ static struct fc_function_template fnic_fc_functions = {
140 .show_starget_port_name = 1, 145 .show_starget_port_name = 1,
141 .show_starget_port_id = 1, 146 .show_starget_port_id = 1,
142 .show_rport_dev_loss_tmo = 1, 147 .show_rport_dev_loss_tmo = 1,
148 .set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo,
143 .issue_fc_host_lip = fnic_reset, 149 .issue_fc_host_lip = fnic_reset,
144 .get_fc_host_stats = fnic_get_stats, 150 .get_fc_host_stats = fnic_get_stats,
145 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), 151 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
@@ -706,6 +712,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
706 goto err_out_free_exch_mgr; 712 goto err_out_free_exch_mgr;
707 } 713 }
708 fc_host_maxframe_size(lp->host) = lp->mfs; 714 fc_host_maxframe_size(lp->host) = lp->mfs;
715 fc_host_dev_loss_tmo(lp->host) = fnic->config.port_down_timeout / 1000;
709 716
710 sprintf(fc_host_symbolic_name(lp->host), 717 sprintf(fc_host_symbolic_name(lp->host),
711 DRV_NAME " v" DRV_VERSION " over %s", fnic->name); 718 DRV_NAME " v" DRV_VERSION " over %s", fnic->name);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 9f75a6d519a2..00d08b25425f 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -50,7 +50,6 @@ static unsigned int max_lun = IBMVFC_MAX_LUN;
50static unsigned int max_targets = IBMVFC_MAX_TARGETS; 50static unsigned int max_targets = IBMVFC_MAX_TARGETS;
51static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT; 51static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
52static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS; 52static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
53static unsigned int dev_loss_tmo = IBMVFC_DEV_LOSS_TMO;
54static unsigned int ibmvfc_debug = IBMVFC_DEBUG; 53static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
55static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL; 54static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
56static LIST_HEAD(ibmvfc_head); 55static LIST_HEAD(ibmvfc_head);
@@ -84,11 +83,6 @@ MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
84module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR); 83module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
85MODULE_PARM_DESC(debug, "Enable driver debug information. " 84MODULE_PARM_DESC(debug, "Enable driver debug information. "
86 "[Default=" __stringify(IBMVFC_DEBUG) "]"); 85 "[Default=" __stringify(IBMVFC_DEBUG) "]");
87module_param_named(dev_loss_tmo, dev_loss_tmo, uint, S_IRUGO | S_IWUSR);
88MODULE_PARM_DESC(dev_loss_tmo, "Maximum number of seconds that the FC "
89 "transport should insulate the loss of a remote port. Once this "
90 "value is exceeded, the scsi target is removed. "
91 "[Default=" __stringify(IBMVFC_DEV_LOSS_TMO) "]");
92module_param_named(log_level, log_level, uint, 0); 86module_param_named(log_level, log_level, uint, 0);
93MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. " 87MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
94 "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]"); 88 "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
@@ -2496,41 +2490,66 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
2496 LEAVE; 2490 LEAVE;
2497} 2491}
2498 2492
2499static const struct { 2493static const struct ibmvfc_async_desc ae_desc [] = {
2500 enum ibmvfc_async_event ae; 2494 { IBMVFC_AE_ELS_PLOGI, "PLOGI", IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2501 const char *desc; 2495 { IBMVFC_AE_ELS_LOGO, "LOGO", IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2502} ae_desc [] = { 2496 { IBMVFC_AE_ELS_PRLO, "PRLO", IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2503 { IBMVFC_AE_ELS_PLOGI, "PLOGI" }, 2497 { IBMVFC_AE_SCN_NPORT, "N-Port SCN", IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2504 { IBMVFC_AE_ELS_LOGO, "LOGO" }, 2498 { IBMVFC_AE_SCN_GROUP, "Group SCN", IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2505 { IBMVFC_AE_ELS_PRLO, "PRLO" }, 2499 { IBMVFC_AE_SCN_DOMAIN, "Domain SCN", IBMVFC_DEFAULT_LOG_LEVEL },
2506 { IBMVFC_AE_SCN_NPORT, "N-Port SCN" }, 2500 { IBMVFC_AE_SCN_FABRIC, "Fabric SCN", IBMVFC_DEFAULT_LOG_LEVEL },
2507 { IBMVFC_AE_SCN_GROUP, "Group SCN" }, 2501 { IBMVFC_AE_LINK_UP, "Link Up", IBMVFC_DEFAULT_LOG_LEVEL },
2508 { IBMVFC_AE_SCN_DOMAIN, "Domain SCN" }, 2502 { IBMVFC_AE_LINK_DOWN, "Link Down", IBMVFC_DEFAULT_LOG_LEVEL },
2509 { IBMVFC_AE_SCN_FABRIC, "Fabric SCN" }, 2503 { IBMVFC_AE_LINK_DEAD, "Link Dead", IBMVFC_DEFAULT_LOG_LEVEL },
2510 { IBMVFC_AE_LINK_UP, "Link Up" }, 2504 { IBMVFC_AE_HALT, "Halt", IBMVFC_DEFAULT_LOG_LEVEL },
2511 { IBMVFC_AE_LINK_DOWN, "Link Down" }, 2505 { IBMVFC_AE_RESUME, "Resume", IBMVFC_DEFAULT_LOG_LEVEL },
2512 { IBMVFC_AE_LINK_DEAD, "Link Dead" }, 2506 { IBMVFC_AE_ADAPTER_FAILED, "Adapter Failed", IBMVFC_DEFAULT_LOG_LEVEL },
2513 { IBMVFC_AE_HALT, "Halt" },
2514 { IBMVFC_AE_RESUME, "Resume" },
2515 { IBMVFC_AE_ADAPTER_FAILED, "Adapter Failed" },
2516}; 2507};
2517 2508
2518static const char *unknown_ae = "Unknown async"; 2509static const struct ibmvfc_async_desc unknown_ae = {
2510 0, "Unknown async", IBMVFC_DEFAULT_LOG_LEVEL
2511};
2519 2512
2520/** 2513/**
2521 * ibmvfc_get_ae_desc - Get text description for async event 2514 * ibmvfc_get_ae_desc - Get text description for async event
2522 * @ae: async event 2515 * @ae: async event
2523 * 2516 *
2524 **/ 2517 **/
2525static const char *ibmvfc_get_ae_desc(u64 ae) 2518static const struct ibmvfc_async_desc *ibmvfc_get_ae_desc(u64 ae)
2526{ 2519{
2527 int i; 2520 int i;
2528 2521
2529 for (i = 0; i < ARRAY_SIZE(ae_desc); i++) 2522 for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
2530 if (ae_desc[i].ae == ae) 2523 if (ae_desc[i].ae == ae)
2531 return ae_desc[i].desc; 2524 return &ae_desc[i];
2525
2526 return &unknown_ae;
2527}
2528
2529static const struct {
2530 enum ibmvfc_ae_link_state state;
2531 const char *desc;
2532} link_desc [] = {
2533 { IBMVFC_AE_LS_LINK_UP, " link up" },
2534 { IBMVFC_AE_LS_LINK_BOUNCED, " link bounced" },
2535 { IBMVFC_AE_LS_LINK_DOWN, " link down" },
2536 { IBMVFC_AE_LS_LINK_DEAD, " link dead" },
2537};
2532 2538
2533 return unknown_ae; 2539/**
2540 * ibmvfc_get_link_state - Get text description for link state
2541 * @state: link state
2542 *
2543 **/
2544static const char *ibmvfc_get_link_state(enum ibmvfc_ae_link_state state)
2545{
2546 int i;
2547
2548 for (i = 0; i < ARRAY_SIZE(link_desc); i++)
2549 if (link_desc[i].state == state)
2550 return link_desc[i].desc;
2551
2552 return "";
2534} 2553}
2535 2554
2536/** 2555/**
@@ -2542,11 +2561,12 @@ static const char *ibmvfc_get_ae_desc(u64 ae)
2542static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, 2561static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2543 struct ibmvfc_host *vhost) 2562 struct ibmvfc_host *vhost)
2544{ 2563{
2545 const char *desc = ibmvfc_get_ae_desc(crq->event); 2564 const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(crq->event);
2546 struct ibmvfc_target *tgt; 2565 struct ibmvfc_target *tgt;
2547 2566
2548 ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx," 2567 ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx,"
2549 " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name); 2568 " node_name: %llx%s\n", desc->desc, crq->scsi_id, crq->wwpn, crq->node_name,
2569 ibmvfc_get_link_state(crq->link_state));
2550 2570
2551 switch (crq->event) { 2571 switch (crq->event) {
2552 case IBMVFC_AE_RESUME: 2572 case IBMVFC_AE_RESUME:
@@ -2788,7 +2808,6 @@ static int ibmvfc_target_alloc(struct scsi_target *starget)
2788static int ibmvfc_slave_configure(struct scsi_device *sdev) 2808static int ibmvfc_slave_configure(struct scsi_device *sdev)
2789{ 2809{
2790 struct Scsi_Host *shost = sdev->host; 2810 struct Scsi_Host *shost = sdev->host;
2791 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
2792 unsigned long flags = 0; 2811 unsigned long flags = 0;
2793 2812
2794 spin_lock_irqsave(shost->host_lock, flags); 2813 spin_lock_irqsave(shost->host_lock, flags);
@@ -2800,8 +2819,6 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
2800 scsi_activate_tcq(sdev, sdev->queue_depth); 2819 scsi_activate_tcq(sdev, sdev->queue_depth);
2801 } else 2820 } else
2802 scsi_deactivate_tcq(sdev, sdev->queue_depth); 2821 scsi_deactivate_tcq(sdev, sdev->queue_depth);
2803
2804 rport->dev_loss_tmo = dev_loss_tmo;
2805 spin_unlock_irqrestore(shost->host_lock, flags); 2822 spin_unlock_irqrestore(shost->host_lock, flags);
2806 return 0; 2823 return 0;
2807} 2824}
@@ -4285,8 +4302,10 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
4285 spin_unlock_irqrestore(vhost->host->host_lock, flags); 4302 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4286 rc = ibmvfc_reset_crq(vhost); 4303 rc = ibmvfc_reset_crq(vhost);
4287 spin_lock_irqsave(vhost->host->host_lock, flags); 4304 spin_lock_irqsave(vhost->host->host_lock, flags);
4288 if (rc || (rc = ibmvfc_send_crq_init(vhost)) || 4305 if (rc == H_CLOSED)
4289 (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) { 4306 vio_enable_interrupts(to_vio_dev(vhost->dev));
4307 else if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
4308 (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
4290 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); 4309 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4291 dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc); 4310 dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
4292 } 4311 }
@@ -4744,6 +4763,8 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
4744 if ((rc = scsi_add_host(shost, dev))) 4763 if ((rc = scsi_add_host(shost, dev)))
4745 goto release_event_pool; 4764 goto release_event_pool;
4746 4765
4766 fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO;
4767
4747 if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj, 4768 if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
4748 &ibmvfc_trace_attr))) { 4769 &ibmvfc_trace_attr))) {
4749 dev_err(dev, "Failed to create trace file. rc=%d\n", rc); 4770 dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 608af394c8cf..ef663e7c9bbc 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -541,6 +541,12 @@ enum ibmvfc_async_event {
541 IBMVFC_AE_ADAPTER_FAILED = 0x1000, 541 IBMVFC_AE_ADAPTER_FAILED = 0x1000,
542}; 542};
543 543
544struct ibmvfc_async_desc {
545 enum ibmvfc_async_event ae;
546 const char *desc;
547 int log_level;
548};
549
544struct ibmvfc_crq { 550struct ibmvfc_crq {
545 volatile u8 valid; 551 volatile u8 valid;
546 volatile u8 format; 552 volatile u8 format;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 52568588039f..df9a12c8b373 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1096,6 +1096,7 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,
1096 res->bus = cfgtew->u.cfgte->res_addr.bus; 1096 res->bus = cfgtew->u.cfgte->res_addr.bus;
1097 res->target = cfgtew->u.cfgte->res_addr.target; 1097 res->target = cfgtew->u.cfgte->res_addr.target;
1098 res->lun = cfgtew->u.cfgte->res_addr.lun; 1098 res->lun = cfgtew->u.cfgte->res_addr.lun;
1099 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1099 } 1100 }
1100 1101
1101 ipr_update_ata_class(res, proto); 1102 ipr_update_ata_class(res, proto);
@@ -1142,7 +1143,7 @@ static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
1142 int i; 1143 int i;
1143 char *p = buffer; 1144 char *p = buffer;
1144 1145
1145 res_path[0] = '\0'; 1146 *p = '\0';
1146 p += snprintf(p, buffer + len - p, "%02X", res_path[0]); 1147 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1147 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++) 1148 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1148 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]); 1149 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
@@ -1670,7 +1671,7 @@ static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1670 1671
1671 array_entry = error->array_member; 1672 array_entry = error->array_member;
1672 num_entries = min_t(u32, be32_to_cpu(error->num_entries), 1673 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1673 sizeof(error->array_member)); 1674 ARRAY_SIZE(error->array_member));
1674 1675
1675 for (i = 0; i < num_entries; i++, array_entry++) { 1676 for (i = 0; i < num_entries; i++, array_entry++) {
1676 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) 1677 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
@@ -2151,8 +2152,8 @@ static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2151 ipr_err_separator; 2152 ipr_err_separator;
2152 2153
2153 array_entry = error->array_member; 2154 array_entry = error->array_member;
2154 num_entries = min_t(u32, be32_to_cpu(error->num_entries), 2155 num_entries = min_t(u32, error->num_entries,
2155 sizeof(error->array_member)); 2156 ARRAY_SIZE(error->array_member));
2156 2157
2157 for (i = 0; i < num_entries; i++, array_entry++) { 2158 for (i = 0; i < num_entries; i++, array_entry++) {
2158 2159
@@ -2166,10 +2167,10 @@ static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2166 2167
2167 ipr_err("Array Member %d:\n", i); 2168 ipr_err("Array Member %d:\n", i);
2168 ipr_log_ext_vpd(&array_entry->vpd); 2169 ipr_log_ext_vpd(&array_entry->vpd);
2169 ipr_err("Current Location: %s", 2170 ipr_err("Current Location: %s\n",
2170 ipr_format_res_path(array_entry->res_path, buffer, 2171 ipr_format_res_path(array_entry->res_path, buffer,
2171 sizeof(buffer))); 2172 sizeof(buffer)));
2172 ipr_err("Expected Location: %s", 2173 ipr_err("Expected Location: %s\n",
2173 ipr_format_res_path(array_entry->expected_res_path, 2174 ipr_format_res_path(array_entry->expected_res_path,
2174 buffer, sizeof(buffer))); 2175 buffer, sizeof(buffer)));
2175 2176
@@ -4089,6 +4090,7 @@ static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4089/** 4090/**
4090 * ipr_show_adapter_handle - Show the adapter's resource handle for this device 4091 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4091 * @dev: device struct 4092 * @dev: device struct
4093 * @attr: device attribute structure
4092 * @buf: buffer 4094 * @buf: buffer
4093 * 4095 *
4094 * Return value: 4096 * Return value:
@@ -4122,6 +4124,7 @@ static struct device_attribute ipr_adapter_handle_attr = {
4122 * ipr_show_resource_path - Show the resource path or the resource address for 4124 * ipr_show_resource_path - Show the resource path or the resource address for
4123 * this device. 4125 * this device.
4124 * @dev: device struct 4126 * @dev: device struct
4127 * @attr: device attribute structure
4125 * @buf: buffer 4128 * @buf: buffer
4126 * 4129 *
4127 * Return value: 4130 * Return value:
@@ -4159,8 +4162,45 @@ static struct device_attribute ipr_resource_path_attr = {
4159}; 4162};
4160 4163
4161/** 4164/**
4165 * ipr_show_device_id - Show the device_id for this device.
4166 * @dev: device struct
4167 * @attr: device attribute structure
4168 * @buf: buffer
4169 *
4170 * Return value:
4171 * number of bytes printed to buffer
4172 **/
4173static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4174{
4175 struct scsi_device *sdev = to_scsi_device(dev);
4176 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4177 struct ipr_resource_entry *res;
4178 unsigned long lock_flags = 0;
4179 ssize_t len = -ENXIO;
4180
4181 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4182 res = (struct ipr_resource_entry *)sdev->hostdata;
4183 if (res && ioa_cfg->sis64)
4184 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4185 else if (res)
4186 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4187
4188 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4189 return len;
4190}
4191
4192static struct device_attribute ipr_device_id_attr = {
4193 .attr = {
4194 .name = "device_id",
4195 .mode = S_IRUGO,
4196 },
4197 .show = ipr_show_device_id
4198};
4199
4200/**
4162 * ipr_show_resource_type - Show the resource type for this device. 4201 * ipr_show_resource_type - Show the resource type for this device.
4163 * @dev: device struct 4202 * @dev: device struct
4203 * @attr: device attribute structure
4164 * @buf: buffer 4204 * @buf: buffer
4165 * 4205 *
4166 * Return value: 4206 * Return value:
@@ -4195,6 +4235,7 @@ static struct device_attribute ipr_resource_type_attr = {
4195static struct device_attribute *ipr_dev_attrs[] = { 4235static struct device_attribute *ipr_dev_attrs[] = {
4196 &ipr_adapter_handle_attr, 4236 &ipr_adapter_handle_attr,
4197 &ipr_resource_path_attr, 4237 &ipr_resource_path_attr,
4238 &ipr_device_id_attr,
4198 &ipr_resource_type_attr, 4239 &ipr_resource_type_attr,
4199 NULL, 4240 NULL,
4200}; 4241};
@@ -4898,39 +4939,15 @@ static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4898/** 4939/**
4899 * ipr_handle_other_interrupt - Handle "other" interrupts 4940 * ipr_handle_other_interrupt - Handle "other" interrupts
4900 * @ioa_cfg: ioa config struct 4941 * @ioa_cfg: ioa config struct
4942 * @int_reg: interrupt register
4901 * 4943 *
4902 * Return value: 4944 * Return value:
4903 * IRQ_NONE / IRQ_HANDLED 4945 * IRQ_NONE / IRQ_HANDLED
4904 **/ 4946 **/
4905static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg) 4947static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4948 volatile u32 int_reg)
4906{ 4949{
4907 irqreturn_t rc = IRQ_HANDLED; 4950 irqreturn_t rc = IRQ_HANDLED;
4908 volatile u32 int_reg, int_mask_reg;
4909
4910 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
4911 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
4912
4913 /* If an interrupt on the adapter did not occur, ignore it.
4914 * Or in the case of SIS 64, check for a stage change interrupt.
4915 */
4916 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
4917 if (ioa_cfg->sis64) {
4918 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4919 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4920 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
4921
4922 /* clear stage change */
4923 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
4924 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4925 list_del(&ioa_cfg->reset_cmd->queue);
4926 del_timer(&ioa_cfg->reset_cmd->timer);
4927 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4928 return IRQ_HANDLED;
4929 }
4930 }
4931
4932 return IRQ_NONE;
4933 }
4934 4951
4935 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 4952 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4936 /* Mask the interrupt */ 4953 /* Mask the interrupt */
@@ -4991,7 +5008,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4991{ 5008{
4992 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; 5009 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4993 unsigned long lock_flags = 0; 5010 unsigned long lock_flags = 0;
4994 volatile u32 int_reg; 5011 volatile u32 int_reg, int_mask_reg;
4995 u32 ioasc; 5012 u32 ioasc;
4996 u16 cmd_index; 5013 u16 cmd_index;
4997 int num_hrrq = 0; 5014 int num_hrrq = 0;
@@ -5006,6 +5023,33 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5006 return IRQ_NONE; 5023 return IRQ_NONE;
5007 } 5024 }
5008 5025
5026 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5027 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
5028
5029 /* If an interrupt on the adapter did not occur, ignore it.
5030 * Or in the case of SIS 64, check for a stage change interrupt.
5031 */
5032 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
5033 if (ioa_cfg->sis64) {
5034 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5035 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5036 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5037
5038 /* clear stage change */
5039 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5040 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5041 list_del(&ioa_cfg->reset_cmd->queue);
5042 del_timer(&ioa_cfg->reset_cmd->timer);
5043 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5044 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5045 return IRQ_HANDLED;
5046 }
5047 }
5048
5049 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5050 return IRQ_NONE;
5051 }
5052
5009 while (1) { 5053 while (1) {
5010 ipr_cmd = NULL; 5054 ipr_cmd = NULL;
5011 5055
@@ -5045,7 +5089,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5045 /* Clear the PCI interrupt */ 5089 /* Clear the PCI interrupt */
5046 do { 5090 do {
5047 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); 5091 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5048 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5092 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
5049 } while (int_reg & IPR_PCII_HRRQ_UPDATED && 5093 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5050 num_hrrq++ < IPR_MAX_HRRQ_RETRIES); 5094 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5051 5095
@@ -5060,7 +5104,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5060 } 5104 }
5061 5105
5062 if (unlikely(rc == IRQ_NONE)) 5106 if (unlikely(rc == IRQ_NONE))
5063 rc = ipr_handle_other_interrupt(ioa_cfg); 5107 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5064 5108
5065 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5109 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5066 return rc; 5110 return rc;
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 4d31625ab9cf..aa8bb2f2c6ee 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -26,6 +26,7 @@
26#ifndef _IPR_H 26#ifndef _IPR_H
27#define _IPR_H 27#define _IPR_H
28 28
29#include <asm/unaligned.h>
29#include <linux/types.h> 30#include <linux/types.h>
30#include <linux/completion.h> 31#include <linux/completion.h>
31#include <linux/libata.h> 32#include <linux/libata.h>
@@ -37,8 +38,8 @@
37/* 38/*
38 * Literals 39 * Literals
39 */ 40 */
40#define IPR_DRIVER_VERSION "2.5.0" 41#define IPR_DRIVER_VERSION "2.5.1"
41#define IPR_DRIVER_DATE "(February 11, 2010)" 42#define IPR_DRIVER_DATE "(August 10, 2010)"
42 43
43/* 44/*
44 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding 45 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -318,6 +319,11 @@ struct ipr_ext_vpd {
318 __be32 wwid[2]; 319 __be32 wwid[2];
319}__attribute__((packed)); 320}__attribute__((packed));
320 321
322struct ipr_ext_vpd64 {
323 struct ipr_vpd vpd;
324 __be32 wwid[4];
325}__attribute__((packed));
326
321struct ipr_std_inq_data { 327struct ipr_std_inq_data {
322 u8 peri_qual_dev_type; 328 u8 peri_qual_dev_type;
323#define IPR_STD_INQ_PERI_QUAL(peri) ((peri) >> 5) 329#define IPR_STD_INQ_PERI_QUAL(peri) ((peri) >> 5)
@@ -372,7 +378,7 @@ struct ipr_config_table_entry {
372 378
373 struct ipr_res_addr res_addr; 379 struct ipr_res_addr res_addr;
374 __be32 res_handle; 380 __be32 res_handle;
375 __be32 reserved4[2]; 381 __be32 lun_wwn[2];
376 struct ipr_std_inq_data std_inq_data; 382 struct ipr_std_inq_data std_inq_data;
377}__attribute__ ((packed, aligned (4))); 383}__attribute__ ((packed, aligned (4)));
378 384
@@ -394,7 +400,7 @@ struct ipr_config_table_entry64 {
394 __be64 res_path; 400 __be64 res_path;
395 struct ipr_std_inq_data std_inq_data; 401 struct ipr_std_inq_data std_inq_data;
396 u8 reserved2[4]; 402 u8 reserved2[4];
397 __be64 reserved3[2]; // description text 403 __be64 reserved3[2];
398 u8 reserved4[8]; 404 u8 reserved4[8];
399}__attribute__ ((packed, aligned (8))); 405}__attribute__ ((packed, aligned (8)));
400 406
@@ -913,7 +919,7 @@ struct ipr_hostrcb_type_24_error {
913 u8 array_id; 919 u8 array_id;
914 u8 last_res_path[8]; 920 u8 last_res_path[8];
915 u8 protection_level[8]; 921 u8 protection_level[8];
916 struct ipr_ext_vpd array_vpd; 922 struct ipr_ext_vpd64 array_vpd;
917 u8 description[16]; 923 u8 description[16];
918 u8 reserved2[3]; 924 u8 reserved2[3];
919 u8 num_entries; 925 u8 num_entries;
@@ -1210,6 +1216,7 @@ struct ipr_resource_entry {
1210 1216
1211 __be32 res_handle; 1217 __be32 res_handle;
1212 __be64 dev_id; 1218 __be64 dev_id;
1219 __be64 lun_wwn;
1213 struct scsi_lun dev_lun; 1220 struct scsi_lun dev_lun;
1214 u8 res_path[8]; 1221 u8 res_path[8];
1215 1222
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 042153cbbde1..e1a395b438ee 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -162,6 +162,10 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
162 unsigned int xfer = 0; 162 unsigned int xfer = 0;
163 unsigned int si; 163 unsigned int si;
164 164
165 /* If the device fell off, no sense in issuing commands */
166 if (dev->gone)
167 return AC_ERR_SYSTEM;
168
165 task = sas_alloc_task(GFP_ATOMIC); 169 task = sas_alloc_task(GFP_ATOMIC);
166 if (!task) 170 if (!task)
167 return AC_ERR_SYSTEM; 171 return AC_ERR_SYSTEM;
@@ -347,6 +351,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
347static struct ata_port_operations sas_sata_ops = { 351static struct ata_port_operations sas_sata_ops = {
348 .phy_reset = sas_ata_phy_reset, 352 .phy_reset = sas_ata_phy_reset,
349 .post_internal_cmd = sas_ata_post_internal, 353 .post_internal_cmd = sas_ata_post_internal,
354 .qc_defer = ata_std_qc_defer,
350 .qc_prep = ata_noop_qc_prep, 355 .qc_prep = ata_noop_qc_prep,
351 .qc_issue = sas_ata_qc_issue, 356 .qc_issue = sas_ata_qc_issue,
352 .qc_fill_rtf = sas_ata_qc_fill_rtf, 357 .qc_fill_rtf = sas_ata_qc_fill_rtf,
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 83dd5070a15c..505ffe358293 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -175,10 +175,10 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
175 switch (resp->result) { 175 switch (resp->result) {
176 case SMP_RESP_PHY_VACANT: 176 case SMP_RESP_PHY_VACANT:
177 phy->phy_state = PHY_VACANT; 177 phy->phy_state = PHY_VACANT;
178 return; 178 break;
179 default: 179 default:
180 phy->phy_state = PHY_NOT_PRESENT; 180 phy->phy_state = PHY_NOT_PRESENT;
181 return; 181 break;
182 case SMP_RESP_FUNC_ACC: 182 case SMP_RESP_FUNC_ACC:
183 phy->phy_state = PHY_EMPTY; /* do not know yet */ 183 phy->phy_state = PHY_EMPTY; /* do not know yet */
184 break; 184 break;
@@ -209,7 +209,10 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
209 phy->phy->negotiated_linkrate = phy->linkrate; 209 phy->phy->negotiated_linkrate = phy->linkrate;
210 210
211 if (!rediscover) 211 if (!rediscover)
212 sas_phy_add(phy->phy); 212 if (sas_phy_add(phy->phy)) {
213 sas_phy_free(phy->phy);
214 return;
215 }
213 216
214 SAS_DPRINTK("ex %016llx phy%02d:%c attached: %016llx\n", 217 SAS_DPRINTK("ex %016llx phy%02d:%c attached: %016llx\n",
215 SAS_ADDR(dev->sas_addr), phy->phy_id, 218 SAS_ADDR(dev->sas_addr), phy->phy_id,
@@ -1724,6 +1727,7 @@ static void sas_unregister_ex_tree(struct domain_device *dev)
1724 struct domain_device *child, *n; 1727 struct domain_device *child, *n;
1725 1728
1726 list_for_each_entry_safe(child, n, &ex->children, siblings) { 1729 list_for_each_entry_safe(child, n, &ex->children, siblings) {
1730 child->gone = 1;
1727 if (child->dev_type == EDGE_DEV || 1731 if (child->dev_type == EDGE_DEV ||
1728 child->dev_type == FANOUT_DEV) 1732 child->dev_type == FANOUT_DEV)
1729 sas_unregister_ex_tree(child); 1733 sas_unregister_ex_tree(child);
@@ -1744,6 +1748,7 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
1744 &ex_dev->children, siblings) { 1748 &ex_dev->children, siblings) {
1745 if (SAS_ADDR(child->sas_addr) == 1749 if (SAS_ADDR(child->sas_addr) ==
1746 SAS_ADDR(phy->attached_sas_addr)) { 1750 SAS_ADDR(phy->attached_sas_addr)) {
1751 child->gone = 1;
1747 if (child->dev_type == EDGE_DEV || 1752 if (child->dev_type == EDGE_DEV ||
1748 child->dev_type == FANOUT_DEV) 1753 child->dev_type == FANOUT_DEV)
1749 sas_unregister_ex_tree(child); 1754 sas_unregister_ex_tree(child);
@@ -1752,6 +1757,7 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
1752 break; 1757 break;
1753 } 1758 }
1754 } 1759 }
1760 parent->gone = 1;
1755 sas_disable_routing(parent, phy->attached_sas_addr); 1761 sas_disable_routing(parent, phy->attached_sas_addr);
1756 } 1762 }
1757 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); 1763 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 535085cd27ec..55f09e92ab59 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -217,6 +217,13 @@ int sas_queuecommand(struct scsi_cmnd *cmd,
217 goto out; 217 goto out;
218 } 218 }
219 219
220 /* If the device fell off, no sense in issuing commands */
221 if (dev->gone) {
222 cmd->result = DID_BAD_TARGET << 16;
223 scsi_done(cmd);
224 goto out;
225 }
226
220 res = -ENOMEM; 227 res = -ENOMEM;
221 task = sas_create_task(cmd, dev, GFP_ATOMIC); 228 task = sas_create_task(cmd, dev, GFP_ATOMIC);
222 if (!task) 229 if (!task)
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 23ce45708335..f681eea57730 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -586,6 +586,11 @@ lpfc_issue_lip(struct Scsi_Host *shost)
586 phba->cfg_link_speed); 586 phba->cfg_link_speed);
587 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, 587 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
588 phba->fc_ratov * 2); 588 phba->fc_ratov * 2);
589 if ((mbxstatus == MBX_SUCCESS) &&
590 (pmboxq->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
591 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
592 "2859 SLI authentication is required "
593 "for INIT_LINK but has not done yet\n");
589 } 594 }
590 595
591 lpfc_set_loopback_flag(phba); 596 lpfc_set_loopback_flag(phba);
@@ -2159,6 +2164,11 @@ lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
2159 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { 2164 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
2160 vport->cfg_nodev_tmo = val; 2165 vport->cfg_nodev_tmo = val;
2161 vport->cfg_devloss_tmo = val; 2166 vport->cfg_devloss_tmo = val;
2167 /*
2168 * For compat: set the fc_host dev loss so new rports
2169 * will get the value.
2170 */
2171 fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
2162 lpfc_update_rport_devloss_tmo(vport); 2172 lpfc_update_rport_devloss_tmo(vport);
2163 return 0; 2173 return 0;
2164 } 2174 }
@@ -2208,6 +2218,7 @@ lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val)
2208 vport->cfg_nodev_tmo = val; 2218 vport->cfg_nodev_tmo = val;
2209 vport->cfg_devloss_tmo = val; 2219 vport->cfg_devloss_tmo = val;
2210 vport->dev_loss_tmo_changed = 1; 2220 vport->dev_loss_tmo_changed = 1;
2221 fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
2211 lpfc_update_rport_devloss_tmo(vport); 2222 lpfc_update_rport_devloss_tmo(vport);
2212 return 0; 2223 return 0;
2213 } 2224 }
@@ -3776,6 +3787,11 @@ sysfs_mbox_read(struct file *filp, struct kobject *kobj,
3776 case MBX_PORT_CAPABILITIES: 3787 case MBX_PORT_CAPABILITIES:
3777 case MBX_PORT_IOV_CONTROL: 3788 case MBX_PORT_IOV_CONTROL:
3778 break; 3789 break;
3790 case MBX_SECURITY_MGMT:
3791 case MBX_AUTH_PORT:
3792 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
3793 return -EPERM;
3794 break;
3779 case MBX_READ_SPARM64: 3795 case MBX_READ_SPARM64:
3780 case MBX_READ_LA: 3796 case MBX_READ_LA:
3781 case MBX_READ_LA64: 3797 case MBX_READ_LA64:
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 49d0cf99c24c..f5d60b55f53a 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -259,6 +259,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
259 struct bsg_job_data *dd_data; 259 struct bsg_job_data *dd_data;
260 uint32_t creg_val; 260 uint32_t creg_val;
261 int rc = 0; 261 int rc = 0;
262 int iocb_stat;
262 263
263 /* in case no data is transferred */ 264 /* in case no data is transferred */
264 job->reply->reply_payload_rcv_len = 0; 265 job->reply->reply_payload_rcv_len = 0;
@@ -373,14 +374,13 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
373 readl(phba->HCregaddr); /* flush */ 374 readl(phba->HCregaddr); /* flush */
374 } 375 }
375 376
376 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 377 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
377 378 if (iocb_stat == IOCB_SUCCESS)
378 if (rc == IOCB_SUCCESS)
379 return 0; /* done for now */ 379 return 0; /* done for now */
380 else if (rc == IOCB_BUSY) 380 else if (iocb_stat == IOCB_BUSY)
381 rc = EAGAIN; 381 rc = -EAGAIN;
382 else 382 else
383 rc = EIO; 383 rc = -EIO;
384 384
385 385
386 /* iocb failed so cleanup */ 386 /* iocb failed so cleanup */
@@ -631,9 +631,9 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
631 if (rc == IOCB_SUCCESS) 631 if (rc == IOCB_SUCCESS)
632 return 0; /* done for now */ 632 return 0; /* done for now */
633 else if (rc == IOCB_BUSY) 633 else if (rc == IOCB_BUSY)
634 rc = EAGAIN; 634 rc = -EAGAIN;
635 else 635 else
636 rc = EIO; 636 rc = -EIO;
637 637
638 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 638 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
639 job->request_payload.sg_cnt, DMA_TO_DEVICE); 639 job->request_payload.sg_cnt, DMA_TO_DEVICE);
@@ -1299,7 +1299,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1299 /* Allocate buffer for command iocb */ 1299 /* Allocate buffer for command iocb */
1300 ctiocb = lpfc_sli_get_iocbq(phba); 1300 ctiocb = lpfc_sli_get_iocbq(phba);
1301 if (!ctiocb) { 1301 if (!ctiocb) {
1302 rc = ENOMEM; 1302 rc = -ENOMEM;
1303 goto no_ctiocb; 1303 goto no_ctiocb;
1304 } 1304 }
1305 1305
@@ -1518,7 +1518,7 @@ lpfc_bsg_diag_mode(struct fc_bsg_job *job)
1518 loopback_mode = (struct diag_mode_set *) 1518 loopback_mode = (struct diag_mode_set *)
1519 job->request->rqst_data.h_vendor.vendor_cmd; 1519 job->request->rqst_data.h_vendor.vendor_cmd;
1520 link_flags = loopback_mode->type; 1520 link_flags = loopback_mode->type;
1521 timeout = loopback_mode->timeout; 1521 timeout = loopback_mode->timeout * 100;
1522 1522
1523 if ((phba->link_state == LPFC_HBA_ERROR) || 1523 if ((phba->link_state == LPFC_HBA_ERROR) ||
1524 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || 1524 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
@@ -1649,17 +1649,18 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi)
1649 1649
1650 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1650 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1651 if (!mbox) 1651 if (!mbox)
1652 return ENOMEM; 1652 return -ENOMEM;
1653 1653
1654 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID, 1654 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
1655 (uint8_t *)&phba->pport->fc_sparam, mbox, 0); 1655 (uint8_t *)&phba->pport->fc_sparam, mbox, 0);
1656 if (status) { 1656 if (status) {
1657 mempool_free(mbox, phba->mbox_mem_pool); 1657 mempool_free(mbox, phba->mbox_mem_pool);
1658 return ENOMEM; 1658 return -ENOMEM;
1659 } 1659 }
1660 1660
1661 dmabuff = (struct lpfc_dmabuf *) mbox->context1; 1661 dmabuff = (struct lpfc_dmabuf *) mbox->context1;
1662 mbox->context1 = NULL; 1662 mbox->context1 = NULL;
1663 mbox->context2 = NULL;
1663 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 1664 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
1664 1665
1665 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { 1666 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
@@ -1667,7 +1668,7 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi)
1667 kfree(dmabuff); 1668 kfree(dmabuff);
1668 if (status != MBX_TIMEOUT) 1669 if (status != MBX_TIMEOUT)
1669 mempool_free(mbox, phba->mbox_mem_pool); 1670 mempool_free(mbox, phba->mbox_mem_pool);
1670 return ENODEV; 1671 return -ENODEV;
1671 } 1672 }
1672 1673
1673 *rpi = mbox->u.mb.un.varWords[0]; 1674 *rpi = mbox->u.mb.un.varWords[0];
@@ -1693,7 +1694,7 @@ static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
1693 /* Allocate mboxq structure */ 1694 /* Allocate mboxq structure */
1694 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1695 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1695 if (mbox == NULL) 1696 if (mbox == NULL)
1696 return ENOMEM; 1697 return -ENOMEM;
1697 1698
1698 lpfc_unreg_login(phba, 0, rpi, mbox); 1699 lpfc_unreg_login(phba, 0, rpi, mbox);
1699 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 1700 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
@@ -1701,7 +1702,7 @@ static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
1701 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { 1702 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
1702 if (status != MBX_TIMEOUT) 1703 if (status != MBX_TIMEOUT)
1703 mempool_free(mbox, phba->mbox_mem_pool); 1704 mempool_free(mbox, phba->mbox_mem_pool);
1704 return EIO; 1705 return -EIO;
1705 } 1706 }
1706 1707
1707 mempool_free(mbox, phba->mbox_mem_pool); 1708 mempool_free(mbox, phba->mbox_mem_pool);
@@ -1730,6 +1731,8 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
1730 struct ulp_bde64 *bpl = NULL; 1731 struct ulp_bde64 *bpl = NULL;
1731 struct lpfc_sli_ct_request *ctreq = NULL; 1732 struct lpfc_sli_ct_request *ctreq = NULL;
1732 int ret_val = 0; 1733 int ret_val = 0;
1734 int time_left;
1735 int iocb_stat = 0;
1733 unsigned long flags; 1736 unsigned long flags;
1734 1737
1735 *txxri = 0; 1738 *txxri = 0;
@@ -1737,7 +1740,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
1737 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, 1740 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
1738 SLI_CT_ELX_LOOPBACK); 1741 SLI_CT_ELX_LOOPBACK);
1739 if (!evt) 1742 if (!evt)
1740 return ENOMEM; 1743 return -ENOMEM;
1741 1744
1742 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1745 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1743 list_add(&evt->node, &phba->ct_ev_waiters); 1746 list_add(&evt->node, &phba->ct_ev_waiters);
@@ -1770,7 +1773,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
1770 if (cmdiocbq == NULL || rspiocbq == NULL || 1773 if (cmdiocbq == NULL || rspiocbq == NULL ||
1771 dmabuf == NULL || bpl == NULL || ctreq == NULL || 1774 dmabuf == NULL || bpl == NULL || ctreq == NULL ||
1772 dmabuf->virt == NULL) { 1775 dmabuf->virt == NULL) {
1773 ret_val = ENOMEM; 1776 ret_val = -ENOMEM;
1774 goto err_get_xri_exit; 1777 goto err_get_xri_exit;
1775 } 1778 }
1776 1779
@@ -1806,24 +1809,24 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
1806 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 1809 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
1807 cmdiocbq->vport = phba->pport; 1810 cmdiocbq->vport = phba->pport;
1808 1811
1809 ret_val = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 1812 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
1810 rspiocbq, 1813 rspiocbq,
1811 (phba->fc_ratov * 2) 1814 (phba->fc_ratov * 2)
1812 + LPFC_DRVR_TIMEOUT); 1815 + LPFC_DRVR_TIMEOUT);
1813 if (ret_val) 1816 if (iocb_stat) {
1817 ret_val = -EIO;
1814 goto err_get_xri_exit; 1818 goto err_get_xri_exit;
1815 1819 }
1816 *txxri = rsp->ulpContext; 1820 *txxri = rsp->ulpContext;
1817 1821
1818 evt->waiting = 1; 1822 evt->waiting = 1;
1819 evt->wait_time_stamp = jiffies; 1823 evt->wait_time_stamp = jiffies;
1820 ret_val = wait_event_interruptible_timeout( 1824 time_left = wait_event_interruptible_timeout(
1821 evt->wq, !list_empty(&evt->events_to_see), 1825 evt->wq, !list_empty(&evt->events_to_see),
1822 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); 1826 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
1823 if (list_empty(&evt->events_to_see)) 1827 if (list_empty(&evt->events_to_see))
1824 ret_val = (ret_val) ? EINTR : ETIMEDOUT; 1828 ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
1825 else { 1829 else {
1826 ret_val = IOCB_SUCCESS;
1827 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1830 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1828 list_move(evt->events_to_see.prev, &evt->events_to_get); 1831 list_move(evt->events_to_see.prev, &evt->events_to_get);
1829 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1832 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
@@ -1845,7 +1848,7 @@ err_get_xri_exit:
1845 kfree(dmabuf); 1848 kfree(dmabuf);
1846 } 1849 }
1847 1850
1848 if (cmdiocbq && (ret_val != IOCB_TIMEDOUT)) 1851 if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT))
1849 lpfc_sli_release_iocbq(phba, cmdiocbq); 1852 lpfc_sli_release_iocbq(phba, cmdiocbq);
1850 if (rspiocbq) 1853 if (rspiocbq)
1851 lpfc_sli_release_iocbq(phba, rspiocbq); 1854 lpfc_sli_release_iocbq(phba, rspiocbq);
@@ -1959,6 +1962,7 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
1959 uint32_t num_bde; 1962 uint32_t num_bde;
1960 struct lpfc_dmabufext *rxbuffer = NULL; 1963 struct lpfc_dmabufext *rxbuffer = NULL;
1961 int ret_val = 0; 1964 int ret_val = 0;
1965 int iocb_stat;
1962 int i = 0; 1966 int i = 0;
1963 1967
1964 cmdiocbq = lpfc_sli_get_iocbq(phba); 1968 cmdiocbq = lpfc_sli_get_iocbq(phba);
@@ -1973,7 +1977,7 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
1973 } 1977 }
1974 1978
1975 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) { 1979 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
1976 ret_val = ENOMEM; 1980 ret_val = -ENOMEM;
1977 goto err_post_rxbufs_exit; 1981 goto err_post_rxbufs_exit;
1978 } 1982 }
1979 1983
@@ -2022,16 +2026,16 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
2022 cmd->ulpClass = CLASS3; 2026 cmd->ulpClass = CLASS3;
2023 cmd->ulpContext = rxxri; 2027 cmd->ulpContext = rxxri;
2024 2028
2025 ret_val = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 2029 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
2026 2030 0);
2027 if (ret_val == IOCB_ERROR) { 2031 if (iocb_stat == IOCB_ERROR) {
2028 diag_cmd_data_free(phba, 2032 diag_cmd_data_free(phba,
2029 (struct lpfc_dmabufext *)mp[0]); 2033 (struct lpfc_dmabufext *)mp[0]);
2030 if (mp[1]) 2034 if (mp[1])
2031 diag_cmd_data_free(phba, 2035 diag_cmd_data_free(phba,
2032 (struct lpfc_dmabufext *)mp[1]); 2036 (struct lpfc_dmabufext *)mp[1]);
2033 dmp = list_entry(next, struct lpfc_dmabuf, list); 2037 dmp = list_entry(next, struct lpfc_dmabuf, list);
2034 ret_val = EIO; 2038 ret_val = -EIO;
2035 goto err_post_rxbufs_exit; 2039 goto err_post_rxbufs_exit;
2036 } 2040 }
2037 2041
@@ -2045,7 +2049,7 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
2045 cmdiocbq = lpfc_sli_get_iocbq(phba); 2049 cmdiocbq = lpfc_sli_get_iocbq(phba);
2046 if (!cmdiocbq) { 2050 if (!cmdiocbq) {
2047 dmp = list_entry(next, struct lpfc_dmabuf, list); 2051 dmp = list_entry(next, struct lpfc_dmabuf, list);
2048 ret_val = EIO; 2052 ret_val = -EIO;
2049 goto err_post_rxbufs_exit; 2053 goto err_post_rxbufs_exit;
2050 } 2054 }
2051 2055
@@ -2111,6 +2115,8 @@ lpfc_bsg_diag_test(struct fc_bsg_job *job)
2111 uint32_t num_bde; 2115 uint32_t num_bde;
2112 uint8_t *ptr = NULL, *rx_databuf = NULL; 2116 uint8_t *ptr = NULL, *rx_databuf = NULL;
2113 int rc = 0; 2117 int rc = 0;
2118 int time_left;
2119 int iocb_stat;
2114 unsigned long flags; 2120 unsigned long flags;
2115 void *dataout = NULL; 2121 void *dataout = NULL;
2116 uint32_t total_mem; 2122 uint32_t total_mem;
@@ -2185,22 +2191,18 @@ lpfc_bsg_diag_test(struct fc_bsg_job *job)
2185 ptr, size); 2191 ptr, size);
2186 2192
2187 rc = lpfcdiag_loop_self_reg(phba, &rpi); 2193 rc = lpfcdiag_loop_self_reg(phba, &rpi);
2188 if (rc) { 2194 if (rc)
2189 rc = -ENOMEM;
2190 goto loopback_test_exit; 2195 goto loopback_test_exit;
2191 }
2192 2196
2193 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri); 2197 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
2194 if (rc) { 2198 if (rc) {
2195 lpfcdiag_loop_self_unreg(phba, rpi); 2199 lpfcdiag_loop_self_unreg(phba, rpi);
2196 rc = -ENOMEM;
2197 goto loopback_test_exit; 2200 goto loopback_test_exit;
2198 } 2201 }
2199 2202
2200 rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size); 2203 rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
2201 if (rc) { 2204 if (rc) {
2202 lpfcdiag_loop_self_unreg(phba, rpi); 2205 lpfcdiag_loop_self_unreg(phba, rpi);
2203 rc = -ENOMEM;
2204 goto loopback_test_exit; 2206 goto loopback_test_exit;
2205 } 2207 }
2206 2208
@@ -2290,21 +2292,22 @@ lpfc_bsg_diag_test(struct fc_bsg_job *job)
2290 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 2292 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2291 cmdiocbq->vport = phba->pport; 2293 cmdiocbq->vport = phba->pport;
2292 2294
2293 rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq, 2295 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
2294 (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT); 2296 rspiocbq, (phba->fc_ratov * 2) +
2297 LPFC_DRVR_TIMEOUT);
2295 2298
2296 if ((rc != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) { 2299 if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) {
2297 rc = -EIO; 2300 rc = -EIO;
2298 goto err_loopback_test_exit; 2301 goto err_loopback_test_exit;
2299 } 2302 }
2300 2303
2301 evt->waiting = 1; 2304 evt->waiting = 1;
2302 rc = wait_event_interruptible_timeout( 2305 time_left = wait_event_interruptible_timeout(
2303 evt->wq, !list_empty(&evt->events_to_see), 2306 evt->wq, !list_empty(&evt->events_to_see),
2304 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); 2307 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
2305 evt->waiting = 0; 2308 evt->waiting = 0;
2306 if (list_empty(&evt->events_to_see)) 2309 if (list_empty(&evt->events_to_see))
2307 rc = (rc) ? -EINTR : -ETIMEDOUT; 2310 rc = (time_left) ? -EINTR : -ETIMEDOUT;
2308 else { 2311 else {
2309 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2312 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2310 list_move(evt->events_to_see.prev, &evt->events_to_get); 2313 list_move(evt->events_to_see.prev, &evt->events_to_get);
@@ -2470,6 +2473,17 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2470 to += sizeof(MAILBOX_t); 2473 to += sizeof(MAILBOX_t);
2471 size = pmboxq->u.mb.un.varWords[5]; 2474 size = pmboxq->u.mb.un.varWords[5];
2472 memcpy(to, from, size); 2475 memcpy(to, from, size);
2476 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
2477 (pmboxq->u.mb.mbxCommand == MBX_SLI4_CONFIG)) {
2478 struct lpfc_mbx_nembed_cmd *nembed_sge =
2479 (struct lpfc_mbx_nembed_cmd *)
2480 &pmboxq->u.mb.un.varWords[0];
2481
2482 from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
2483 virt;
2484 to += sizeof(MAILBOX_t);
2485 size = nembed_sge->sge[0].length;
2486 memcpy(to, from, size);
2473 } else if (pmboxq->u.mb.mbxCommand == MBX_READ_EVENT_LOG) { 2487 } else if (pmboxq->u.mb.mbxCommand == MBX_READ_EVENT_LOG) {
2474 from = (uint8_t *)dd_data->context_un. 2488 from = (uint8_t *)dd_data->context_un.
2475 mbox.dmp->dma.virt; 2489 mbox.dmp->dma.virt;
@@ -2911,6 +2925,59 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2911 from += sizeof(MAILBOX_t); 2925 from += sizeof(MAILBOX_t);
2912 memcpy((uint8_t *)dmp->dma.virt, from, 2926 memcpy((uint8_t *)dmp->dma.virt, from,
2913 bde->tus.f.bdeSize); 2927 bde->tus.f.bdeSize);
2928 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
2929 struct lpfc_mbx_nembed_cmd *nembed_sge;
2930 struct mbox_header *header;
2931 uint32_t receive_length;
2932
2933 /* rebuild the command for sli4 using our own buffers
2934 * like we do for biu diags
2935 */
2936 header = (struct mbox_header *)&pmb->un.varWords[0];
2937 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
2938 &pmb->un.varWords[0];
2939 receive_length = nembed_sge->sge[0].length;
2940
2941 /* receive length cannot be greater than mailbox
2942 * extension size
2943 */
2944 if ((receive_length == 0) ||
2945 (receive_length > MAILBOX_EXT_SIZE)) {
2946 rc = -ERANGE;
2947 goto job_done;
2948 }
2949
2950 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2951 if (!rxbmp) {
2952 rc = -ENOMEM;
2953 goto job_done;
2954 }
2955
2956 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2957 if (!rxbmp->virt) {
2958 rc = -ENOMEM;
2959 goto job_done;
2960 }
2961
2962 INIT_LIST_HEAD(&rxbmp->list);
2963 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2964 dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length,
2965 0);
2966 if (!dmp) {
2967 rc = -ENOMEM;
2968 goto job_done;
2969 }
2970
2971 INIT_LIST_HEAD(&dmp->dma.list);
2972 nembed_sge->sge[0].pa_hi = putPaddrHigh(dmp->dma.phys);
2973 nembed_sge->sge[0].pa_lo = putPaddrLow(dmp->dma.phys);
2974 /* copy the transmit data found in the mailbox
2975 * extension area
2976 */
2977 from = (uint8_t *)mb;
2978 from += sizeof(MAILBOX_t);
2979 memcpy((uint8_t *)dmp->dma.virt, from,
2980 header->cfg_mhdr.payload_length);
2914 } 2981 }
2915 } 2982 }
2916 2983
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 8d09191c327e..e6ca12f6c6cb 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -3250,6 +3250,8 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3250 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi); 3250 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
3251 3251
3252 pmb->context1 = NULL; 3252 pmb->context1 = NULL;
3253 pmb->context2 = NULL;
3254
3253 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3255 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3254 kfree(mp); 3256 kfree(mp);
3255 mempool_free(pmb, phba->mbox_mem_pool); 3257 mempool_free(pmb, phba->mbox_mem_pool);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 1f62ea8c165d..c3d7174e3469 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1015,7 +1015,6 @@ static void
1015lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 1015lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1016{ 1016{
1017 struct lpfc_vport *vport = mboxq->vport; 1017 struct lpfc_vport *vport = mboxq->vport;
1018 unsigned long flags;
1019 1018
1020 if (mboxq->u.mb.mbxStatus) { 1019 if (mboxq->u.mb.mbxStatus) {
1021 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 1020 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
@@ -1029,18 +1028,18 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1029 /* Start FCoE discovery by sending a FLOGI. */ 1028 /* Start FCoE discovery by sending a FLOGI. */
1030 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi); 1029 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
1031 /* Set the FCFI registered flag */ 1030 /* Set the FCFI registered flag */
1032 spin_lock_irqsave(&phba->hbalock, flags); 1031 spin_lock_irq(&phba->hbalock);
1033 phba->fcf.fcf_flag |= FCF_REGISTERED; 1032 phba->fcf.fcf_flag |= FCF_REGISTERED;
1034 spin_unlock_irqrestore(&phba->hbalock, flags); 1033 spin_unlock_irq(&phba->hbalock);
1035 /* If there is a pending FCoE event, restart FCF table scan. */ 1034 /* If there is a pending FCoE event, restart FCF table scan. */
1036 if (lpfc_check_pending_fcoe_event(phba, 1)) { 1035 if (lpfc_check_pending_fcoe_event(phba, 1)) {
1037 mempool_free(mboxq, phba->mbox_mem_pool); 1036 mempool_free(mboxq, phba->mbox_mem_pool);
1038 return; 1037 return;
1039 } 1038 }
1040 spin_lock_irqsave(&phba->hbalock, flags); 1039 spin_lock_irq(&phba->hbalock);
1041 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1040 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1042 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1041 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1043 spin_unlock_irqrestore(&phba->hbalock, flags); 1042 spin_unlock_irq(&phba->hbalock);
1044 if (vport->port_state != LPFC_FLOGI) 1043 if (vport->port_state != LPFC_FLOGI)
1045 lpfc_initial_flogi(vport); 1044 lpfc_initial_flogi(vport);
1046 1045
@@ -1240,14 +1239,13 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1240{ 1239{
1241 LPFC_MBOXQ_t *fcf_mbxq; 1240 LPFC_MBOXQ_t *fcf_mbxq;
1242 int rc; 1241 int rc;
1243 unsigned long flags;
1244 1242
1245 spin_lock_irqsave(&phba->hbalock, flags); 1243 spin_lock_irq(&phba->hbalock);
1246 1244
1247 /* If the FCF is not availabe do nothing. */ 1245 /* If the FCF is not availabe do nothing. */
1248 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { 1246 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1249 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1247 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1250 spin_unlock_irqrestore(&phba->hbalock, flags); 1248 spin_unlock_irq(&phba->hbalock);
1251 return; 1249 return;
1252 } 1250 }
1253 1251
@@ -1255,19 +1253,19 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1255 if (phba->fcf.fcf_flag & FCF_REGISTERED) { 1253 if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1256 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1254 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1257 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1255 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1258 spin_unlock_irqrestore(&phba->hbalock, flags); 1256 spin_unlock_irq(&phba->hbalock);
1259 if (phba->pport->port_state != LPFC_FLOGI) 1257 if (phba->pport->port_state != LPFC_FLOGI)
1260 lpfc_initial_flogi(phba->pport); 1258 lpfc_initial_flogi(phba->pport);
1261 return; 1259 return;
1262 } 1260 }
1263 spin_unlock_irqrestore(&phba->hbalock, flags); 1261 spin_unlock_irq(&phba->hbalock);
1264 1262
1265 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, 1263 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
1266 GFP_KERNEL); 1264 GFP_KERNEL);
1267 if (!fcf_mbxq) { 1265 if (!fcf_mbxq) {
1268 spin_lock_irqsave(&phba->hbalock, flags); 1266 spin_lock_irq(&phba->hbalock);
1269 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1267 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1270 spin_unlock_irqrestore(&phba->hbalock, flags); 1268 spin_unlock_irq(&phba->hbalock);
1271 return; 1269 return;
1272 } 1270 }
1273 1271
@@ -1276,9 +1274,9 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1276 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; 1274 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
1277 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); 1275 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1278 if (rc == MBX_NOT_FINISHED) { 1276 if (rc == MBX_NOT_FINISHED) {
1279 spin_lock_irqsave(&phba->hbalock, flags); 1277 spin_lock_irq(&phba->hbalock);
1280 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1278 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1281 spin_unlock_irqrestore(&phba->hbalock, flags); 1279 spin_unlock_irq(&phba->hbalock);
1282 mempool_free(fcf_mbxq, phba->mbox_mem_pool); 1280 mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1283 } 1281 }
1284 1282
@@ -2851,6 +2849,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2851 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2849 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2852 2850
2853 pmb->context1 = NULL; 2851 pmb->context1 = NULL;
2852 pmb->context2 = NULL;
2854 2853
2855 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) 2854 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
2856 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 2855 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
@@ -3149,6 +3148,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3149 ndlp = (struct lpfc_nodelist *) pmb->context2; 3148 ndlp = (struct lpfc_nodelist *) pmb->context2;
3150 pmb->context1 = NULL; 3149 pmb->context1 = NULL;
3151 pmb->context2 = NULL; 3150 pmb->context2 = NULL;
3151
3152 if (mb->mbxStatus) { 3152 if (mb->mbxStatus) {
3153 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 3153 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
3154 "0258 Register Fabric login error: 0x%x\n", 3154 "0258 Register Fabric login error: 0x%x\n",
@@ -3218,6 +3218,9 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3218 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3218 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3219 struct lpfc_vport *vport = pmb->vport; 3219 struct lpfc_vport *vport = pmb->vport;
3220 3220
3221 pmb->context1 = NULL;
3222 pmb->context2 = NULL;
3223
3221 if (mb->mbxStatus) { 3224 if (mb->mbxStatus) {
3222out: 3225out:
3223 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3226 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
@@ -3249,8 +3252,6 @@ out:
3249 return; 3252 return;
3250 } 3253 }
3251 3254
3252 pmb->context1 = NULL;
3253
3254 ndlp->nlp_rpi = mb->un.varWords[0]; 3255 ndlp->nlp_rpi = mb->un.varWords[0];
3255 ndlp->nlp_flag |= NLP_RPI_VALID; 3256 ndlp->nlp_flag |= NLP_RPI_VALID;
3256 ndlp->nlp_type |= NLP_FABRIC; 3257 ndlp->nlp_type |= NLP_FABRIC;
@@ -4784,6 +4785,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4784 struct lpfc_vport *vport = pmb->vport; 4785 struct lpfc_vport *vport = pmb->vport;
4785 4786
4786 pmb->context1 = NULL; 4787 pmb->context1 = NULL;
4788 pmb->context2 = NULL;
4787 4789
4788 ndlp->nlp_rpi = mb->un.varWords[0]; 4790 ndlp->nlp_rpi = mb->un.varWords[0];
4789 ndlp->nlp_flag |= NLP_RPI_VALID; 4791 ndlp->nlp_flag |= NLP_RPI_VALID;
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 1676f61291e7..a631647051d9 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1380,6 +1380,9 @@ typedef struct { /* FireFly BIU registers */
1380#define MBX_INIT_VFI 0xA3 1380#define MBX_INIT_VFI 0xA3
1381#define MBX_INIT_VPI 0xA4 1381#define MBX_INIT_VPI 0xA4
1382 1382
1383#define MBX_AUTH_PORT 0xF8
1384#define MBX_SECURITY_MGMT 0xF9
1385
1383/* IOCB Commands */ 1386/* IOCB Commands */
1384 1387
1385#define CMD_RCV_SEQUENCE_CX 0x01 1388#define CMD_RCV_SEQUENCE_CX 0x01
@@ -1502,7 +1505,8 @@ typedef struct { /* FireFly BIU registers */
1502#define MBXERR_DMA_ERROR 15 1505#define MBXERR_DMA_ERROR 15
1503#define MBXERR_ERROR 16 1506#define MBXERR_ERROR 16
1504#define MBXERR_LINK_DOWN 0x33 1507#define MBXERR_LINK_DOWN 0x33
1505#define MBX_NOT_FINISHED 255 1508#define MBXERR_SEC_NO_PERMISSION 0xF02
1509#define MBX_NOT_FINISHED 255
1506 1510
1507#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */ 1511#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */
1508#define MBX_TIMEOUT 0xfffffe /* time-out expired waiting for */ 1512#define MBX_TIMEOUT 0xfffffe /* time-out expired waiting for */
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index da9ba06ad583..295c7ddb36c1 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1076,21 +1076,16 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1076 } else { 1076 } else {
1077 /* 1077 /*
1078 * If heart beat timeout called with hb_outstanding set 1078 * If heart beat timeout called with hb_outstanding set
1079 * we need to take the HBA offline. 1079 * we need to give the hb mailbox cmd a chance to
1080 * complete or TMO.
1080 */ 1081 */
1081 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1082 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1082 "0459 Adapter heartbeat failure, " 1083 "0459 Adapter heartbeat still out"
1083 "taking this port offline.\n"); 1084 "standing:last compl time was %d ms.\n",
1084 1085 jiffies_to_msecs(jiffies
1085 spin_lock_irq(&phba->hbalock); 1086 - phba->last_completion_time));
1086 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1087 mod_timer(&phba->hb_tmofunc,
1087 spin_unlock_irq(&phba->hbalock); 1088 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1088
1089 lpfc_offline_prep(phba);
1090 lpfc_offline(phba);
1091 lpfc_unblock_mgmt_io(phba);
1092 phba->link_state = LPFC_HBA_ERROR;
1093 lpfc_hba_down_post(phba);
1094 } 1089 }
1095 } 1090 }
1096} 1091}
@@ -1277,13 +1272,21 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1277 if (phba->hba_flag & DEFER_ERATT) 1272 if (phba->hba_flag & DEFER_ERATT)
1278 lpfc_handle_deferred_eratt(phba); 1273 lpfc_handle_deferred_eratt(phba);
1279 1274
1280 if (phba->work_hs & HS_FFER6) { 1275 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1281 /* Re-establishing Link */ 1276 if (phba->work_hs & HS_FFER6)
1282 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1277 /* Re-establishing Link */
1283 "1301 Re-establishing Link " 1278 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1284 "Data: x%x x%x x%x\n", 1279 "1301 Re-establishing Link "
1285 phba->work_hs, 1280 "Data: x%x x%x x%x\n",
1286 phba->work_status[0], phba->work_status[1]); 1281 phba->work_hs, phba->work_status[0],
1282 phba->work_status[1]);
1283 if (phba->work_hs & HS_FFER8)
1284 /* Device Zeroization */
1285 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1286 "2861 Host Authentication device "
1287 "zeroization Data:x%x x%x x%x\n",
1288 phba->work_hs, phba->work_status[0],
1289 phba->work_status[1]);
1287 1290
1288 spin_lock_irq(&phba->hbalock); 1291 spin_lock_irq(&phba->hbalock);
1289 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1292 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
@@ -2817,6 +2820,8 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
2817 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 2820 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
2818 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 2821 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
2819 2822
2823 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
2824
2820 /* This value is also unchanging */ 2825 /* This value is also unchanging */
2821 memset(fc_host_active_fc4s(shost), 0, 2826 memset(fc_host_active_fc4s(shost), 0,
2822 sizeof(fc_host_active_fc4s(shost))); 2827 sizeof(fc_host_active_fc4s(shost)));
@@ -2883,65 +2888,6 @@ lpfc_stop_port(struct lpfc_hba *phba)
2883} 2888}
2884 2889
2885/** 2890/**
2886 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port.
2887 * @phba: pointer to lpfc hba data structure.
2888 *
2889 * This routine is invoked to remove the driver default fcf record from
2890 * the port. This routine currently acts on FCF Index 0.
2891 *
2892 **/
2893void
2894lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2895{
2896 int rc = 0;
2897 LPFC_MBOXQ_t *mboxq;
2898 struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record;
2899 uint32_t mbox_tmo, req_len;
2900 uint32_t shdr_status, shdr_add_status;
2901
2902 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2903 if (!mboxq) {
2904 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2905 "2020 Failed to allocate mbox for ADD_FCF cmd\n");
2906 return;
2907 }
2908
2909 req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) -
2910 sizeof(struct lpfc_sli4_cfg_mhdr);
2911 rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2912 LPFC_MBOX_OPCODE_FCOE_DELETE_FCF,
2913 req_len, LPFC_SLI4_MBX_EMBED);
2914 /*
2915 * In phase 1, there is a single FCF index, 0. In phase2, the driver
2916 * supports multiple FCF indices.
2917 */
2918 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
2919 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
2920 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
2921 phba->fcf.current_rec.fcf_indx);
2922
2923 if (!phba->sli4_hba.intr_enable)
2924 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2925 else {
2926 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
2927 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
2928 }
2929 /* The IOCTL status is embedded in the mailbox subheader. */
2930 shdr_status = bf_get(lpfc_mbox_hdr_status,
2931 &del_fcf_record->header.cfg_shdr.response);
2932 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2933 &del_fcf_record->header.cfg_shdr.response);
2934 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2935 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2936 "2516 DEL FCF of default FCF Index failed "
2937 "mbx status x%x, status x%x add_status x%x\n",
2938 rc, shdr_status, shdr_add_status);
2939 }
2940 if (rc != MBX_TIMEOUT)
2941 mempool_free(mboxq, phba->mbox_mem_pool);
2942}
2943
2944/**
2945 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 2891 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
2946 * @phba: Pointer to hba for which this call is being executed. 2892 * @phba: Pointer to hba for which this call is being executed.
2947 * 2893 *
@@ -4283,12 +4229,6 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4283{ 4229{
4284 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 4230 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4285 4231
4286 /* unregister default FCFI from the HBA */
4287 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
4288
4289 /* Free the default FCR table */
4290 lpfc_sli_remove_dflt_fcf(phba);
4291
4292 /* Free memory allocated for msi-x interrupt vector entries */ 4232 /* Free memory allocated for msi-x interrupt vector entries */
4293 kfree(phba->sli4_hba.msix_entries); 4233 kfree(phba->sli4_hba.msix_entries);
4294 4234
@@ -4316,9 +4256,6 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4316 lpfc_sli4_cq_event_release_all(phba); 4256 lpfc_sli4_cq_event_release_all(phba);
4317 lpfc_sli4_cq_event_pool_destroy(phba); 4257 lpfc_sli4_cq_event_pool_destroy(phba);
4318 4258
4319 /* Reset SLI4 HBA FCoE function */
4320 lpfc_pci_function_reset(phba);
4321
4322 /* Free the bsmbx region. */ 4259 /* Free the bsmbx region. */
4323 lpfc_destroy_bootstrap_mbox(phba); 4260 lpfc_destroy_bootstrap_mbox(phba);
4324 4261
@@ -4545,7 +4482,6 @@ lpfc_free_sgl_list(struct lpfc_hba *phba)
4545{ 4482{
4546 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 4483 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4547 LIST_HEAD(sglq_list); 4484 LIST_HEAD(sglq_list);
4548 int rc = 0;
4549 4485
4550 spin_lock_irq(&phba->hbalock); 4486 spin_lock_irq(&phba->hbalock);
4551 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 4487 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
@@ -4558,11 +4494,6 @@ lpfc_free_sgl_list(struct lpfc_hba *phba)
4558 kfree(sglq_entry); 4494 kfree(sglq_entry);
4559 phba->sli4_hba.total_sglq_bufs--; 4495 phba->sli4_hba.total_sglq_bufs--;
4560 } 4496 }
4561 rc = lpfc_sli4_remove_all_sgl_pages(phba);
4562 if (rc) {
4563 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4564 "2005 Unable to deregister pages from HBA: %x\n", rc);
4565 }
4566 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4497 kfree(phba->sli4_hba.lpfc_els_sgl_array);
4567} 4498}
4568 4499
@@ -4725,8 +4656,8 @@ out_free_mem:
4725 * 4656 *
4726 * Return codes 4657 * Return codes
4727 * 0 - successful 4658 * 0 - successful
4728 * ENOMEM - No availble memory 4659 * -ENOMEM - No availble memory
4729 * EIO - The mailbox failed to complete successfully. 4660 * -EIO - The mailbox failed to complete successfully.
4730 **/ 4661 **/
4731int 4662int
4732lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 4663lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
@@ -5419,7 +5350,7 @@ lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
5419 * 5350 *
5420 * Return codes 5351 * Return codes
5421 * 0 - successful 5352 * 0 - successful
5422 * ENOMEM - could not allocated memory. 5353 * -ENOMEM - could not allocated memory.
5423 **/ 5354 **/
5424static int 5355static int
5425lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 5356lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
@@ -5518,8 +5449,8 @@ lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
5518 * 5449 *
5519 * Return codes 5450 * Return codes
5520 * 0 - successful 5451 * 0 - successful
5521 * ENOMEM - No availble memory 5452 * -ENOMEM - No availble memory
5522 * EIO - The mailbox failed to complete successfully. 5453 * -EIO - The mailbox failed to complete successfully.
5523 **/ 5454 **/
5524static int 5455static int
5525lpfc_sli4_read_config(struct lpfc_hba *phba) 5456lpfc_sli4_read_config(struct lpfc_hba *phba)
@@ -5622,8 +5553,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5622 * 5553 *
5623 * Return codes 5554 * Return codes
5624 * 0 - successful 5555 * 0 - successful
5625 * ENOMEM - No availble memory 5556 * -ENOMEM - No availble memory
5626 * EIO - The mailbox failed to complete successfully. 5557 * -EIO - The mailbox failed to complete successfully.
5627 **/ 5558 **/
5628static int 5559static int
5629lpfc_setup_endian_order(struct lpfc_hba *phba) 5560lpfc_setup_endian_order(struct lpfc_hba *phba)
@@ -5671,8 +5602,8 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
5671 * 5602 *
5672 * Return codes 5603 * Return codes
5673 * 0 - successful 5604 * 0 - successful
5674 * ENOMEM - No availble memory 5605 * -ENOMEM - No availble memory
5675 * EIO - The mailbox failed to complete successfully. 5606 * -EIO - The mailbox failed to complete successfully.
5676 **/ 5607 **/
5677static int 5608static int
5678lpfc_sli4_queue_create(struct lpfc_hba *phba) 5609lpfc_sli4_queue_create(struct lpfc_hba *phba)
@@ -5966,8 +5897,8 @@ out_error:
5966 * 5897 *
5967 * Return codes 5898 * Return codes
5968 * 0 - successful 5899 * 0 - successful
5969 * ENOMEM - No availble memory 5900 * -ENOMEM - No availble memory
5970 * EIO - The mailbox failed to complete successfully. 5901 * -EIO - The mailbox failed to complete successfully.
5971 **/ 5902 **/
5972static void 5903static void
5973lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 5904lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
@@ -6030,8 +5961,8 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6030 * 5961 *
6031 * Return codes 5962 * Return codes
6032 * 0 - successful 5963 * 0 - successful
6033 * ENOMEM - No availble memory 5964 * -ENOMEM - No availble memory
6034 * EIO - The mailbox failed to complete successfully. 5965 * -EIO - The mailbox failed to complete successfully.
6035 **/ 5966 **/
6036int 5967int
6037lpfc_sli4_queue_setup(struct lpfc_hba *phba) 5968lpfc_sli4_queue_setup(struct lpfc_hba *phba)
@@ -6275,8 +6206,8 @@ out_error:
6275 * 6206 *
6276 * Return codes 6207 * Return codes
6277 * 0 - successful 6208 * 0 - successful
6278 * ENOMEM - No availble memory 6209 * -ENOMEM - No availble memory
6279 * EIO - The mailbox failed to complete successfully. 6210 * -EIO - The mailbox failed to complete successfully.
6280 **/ 6211 **/
6281void 6212void
6282lpfc_sli4_queue_unset(struct lpfc_hba *phba) 6213lpfc_sli4_queue_unset(struct lpfc_hba *phba)
@@ -6481,8 +6412,8 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
6481 * 6412 *
6482 * Return codes 6413 * Return codes
6483 * 0 - successful 6414 * 0 - successful
6484 * ENOMEM - No availble memory 6415 * -ENOMEM - No availble memory
6485 * EIO - The mailbox failed to complete successfully. 6416 * -EIO - The mailbox failed to complete successfully.
6486 **/ 6417 **/
6487int 6418int
6488lpfc_pci_function_reset(struct lpfc_hba *phba) 6419lpfc_pci_function_reset(struct lpfc_hba *phba)
@@ -6592,50 +6523,6 @@ lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
6592} 6523}
6593 6524
6594/** 6525/**
6595 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device
6596 * @phba: pointer to lpfc hba data structure.
6597 * @fcfi: fcf index.
6598 *
6599 * This routine is invoked to unregister a FCFI from device.
6600 **/
6601void
6602lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
6603{
6604 LPFC_MBOXQ_t *mbox;
6605 uint32_t mbox_tmo;
6606 int rc;
6607 unsigned long flags;
6608
6609 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6610
6611 if (!mbox)
6612 return;
6613
6614 lpfc_unreg_fcfi(mbox, fcfi);
6615
6616 if (!phba->sli4_hba.intr_enable)
6617 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6618 else {
6619 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
6620 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6621 }
6622 if (rc != MBX_TIMEOUT)
6623 mempool_free(mbox, phba->mbox_mem_pool);
6624 if (rc != MBX_SUCCESS)
6625 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6626 "2517 Unregister FCFI command failed "
6627 "status %d, mbxStatus x%x\n", rc,
6628 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6629 else {
6630 spin_lock_irqsave(&phba->hbalock, flags);
6631 /* Mark the FCFI is no longer registered */
6632 phba->fcf.fcf_flag &=
6633 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_SCAN_DONE);
6634 spin_unlock_irqrestore(&phba->hbalock, flags);
6635 }
6636}
6637
6638/**
6639 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 6526 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
6640 * @phba: pointer to lpfc hba data structure. 6527 * @phba: pointer to lpfc hba data structure.
6641 * 6528 *
@@ -7372,10 +7259,14 @@ lpfc_sli4_unset_hba(struct lpfc_hba *phba)
7372 7259
7373 phba->pport->work_port_events = 0; 7260 phba->pport->work_port_events = 0;
7374 7261
7375 lpfc_sli4_hba_down(phba); 7262 /* Stop the SLI4 device port */
7263 lpfc_stop_port(phba);
7376 7264
7377 lpfc_sli4_disable_intr(phba); 7265 lpfc_sli4_disable_intr(phba);
7378 7266
7267 /* Reset SLI4 HBA FCoE function */
7268 lpfc_pci_function_reset(phba);
7269
7379 return; 7270 return;
7380} 7271}
7381 7272
@@ -7424,15 +7315,15 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
7424 spin_unlock_irq(&phba->hbalock); 7315 spin_unlock_irq(&phba->hbalock);
7425 } 7316 }
7426 7317
7427 /* Tear down the queues in the HBA */
7428 lpfc_sli4_queue_unset(phba);
7429
7430 /* Disable PCI subsystem interrupt */ 7318 /* Disable PCI subsystem interrupt */
7431 lpfc_sli4_disable_intr(phba); 7319 lpfc_sli4_disable_intr(phba);
7432 7320
7433 /* Stop kthread signal shall trigger work_done one more time */ 7321 /* Stop kthread signal shall trigger work_done one more time */
7434 kthread_stop(phba->worker_thread); 7322 kthread_stop(phba->worker_thread);
7435 7323
7324 /* Reset SLI4 HBA FCoE function */
7325 lpfc_pci_function_reset(phba);
7326
7436 /* Stop the SLI4 device port */ 7327 /* Stop the SLI4 device port */
7437 phba->pport->work_port_events = 0; 7328 phba->pport->work_port_events = 0;
7438} 7329}
@@ -8368,7 +8259,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
8368 list_del_init(&vport->listentry); 8259 list_del_init(&vport->listentry);
8369 spin_unlock_irq(&phba->hbalock); 8260 spin_unlock_irq(&phba->hbalock);
8370 8261
8371 /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi 8262 /* Perform scsi free before driver resource_unset since scsi
8372 * buffers are released to their corresponding pools here. 8263 * buffers are released to their corresponding pools here.
8373 */ 8264 */
8374 lpfc_scsi_free(phba); 8265 lpfc_scsi_free(phba);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 2e51aa6b45b3..3a658953486c 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -599,6 +599,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
599 iocb->ulpClass = CLASS3; 599 iocb->ulpClass = CLASS3;
600 psb->status = IOSTAT_SUCCESS; 600 psb->status = IOSTAT_SUCCESS;
601 /* Put it back into the SCSI buffer list */ 601 /* Put it back into the SCSI buffer list */
602 psb->cur_iocbq.context1 = psb;
602 lpfc_release_scsi_buf_s3(phba, psb); 603 lpfc_release_scsi_buf_s3(phba, psb);
603 604
604 } 605 }
@@ -849,6 +850,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
849 iocb->ulpBdeCount = 1; 850 iocb->ulpBdeCount = 1;
850 iocb->ulpLe = 1; 851 iocb->ulpLe = 1;
851 iocb->ulpClass = CLASS3; 852 iocb->ulpClass = CLASS3;
853 psb->cur_iocbq.context1 = psb;
852 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 854 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
853 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE; 855 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
854 else 856 else
@@ -2276,15 +2278,24 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2276 * Check SLI validation that all the transfer was actually done 2278 * Check SLI validation that all the transfer was actually done
2277 * (fcpi_parm should be zero). Apply check only to reads. 2279 * (fcpi_parm should be zero). Apply check only to reads.
2278 */ 2280 */
2279 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && 2281 } else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
2280 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
2281 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 2282 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
2282 "9029 FCP Read Check Error Data: " 2283 "9029 FCP Read Check Error Data: "
2283 "x%x x%x x%x x%x\n", 2284 "x%x x%x x%x x%x x%x\n",
2284 be32_to_cpu(fcpcmd->fcpDl), 2285 be32_to_cpu(fcpcmd->fcpDl),
2285 be32_to_cpu(fcprsp->rspResId), 2286 be32_to_cpu(fcprsp->rspResId),
2286 fcpi_parm, cmnd->cmnd[0]); 2287 fcpi_parm, cmnd->cmnd[0], scsi_status);
2287 host_status = DID_ERROR; 2288 switch (scsi_status) {
2289 case SAM_STAT_GOOD:
2290 case SAM_STAT_CHECK_CONDITION:
2291 /* Fabric dropped a data frame. Fail any successful
2292 * command in which we detected dropped frames.
2293 * A status of good or some check conditions could
2294 * be considered a successful command.
2295 */
2296 host_status = DID_ERROR;
2297 break;
2298 }
2288 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 2299 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
2289 } 2300 }
2290 2301
@@ -3072,7 +3083,14 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
3072 if (ret) 3083 if (ret)
3073 return ret; 3084 return ret;
3074 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 3085 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
3075 BUG_ON(!lpfc_cmd); 3086 if (!lpfc_cmd) {
3087 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3088 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
3089 "x%x ID %d "
3090 "LUN %d snum %#lx\n", ret, cmnd->device->id,
3091 cmnd->device->lun, cmnd->serial_number);
3092 return SUCCESS;
3093 }
3076 3094
3077 /* 3095 /*
3078 * If pCmd field of the corresponding lpfc_scsi_buf structure 3096 * If pCmd field of the corresponding lpfc_scsi_buf structure
@@ -3656,7 +3674,6 @@ lpfc_slave_alloc(struct scsi_device *sdev)
3656 * 3674 *
3657 * This routine configures following items 3675 * This routine configures following items
3658 * - Tag command queuing support for @sdev if supported. 3676 * - Tag command queuing support for @sdev if supported.
3659 * - Dev loss time out value of fc_rport.
3660 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set. 3677 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
3661 * 3678 *
3662 * Return codes: 3679 * Return codes:
@@ -3667,21 +3684,12 @@ lpfc_slave_configure(struct scsi_device *sdev)
3667{ 3684{
3668 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 3685 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
3669 struct lpfc_hba *phba = vport->phba; 3686 struct lpfc_hba *phba = vport->phba;
3670 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
3671 3687
3672 if (sdev->tagged_supported) 3688 if (sdev->tagged_supported)
3673 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth); 3689 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
3674 else 3690 else
3675 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth); 3691 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
3676 3692
3677 /*
3678 * Initialize the fc transport attributes for the target
3679 * containing this scsi device. Also note that the driver's
3680 * target pointer is stored in the starget_data for the
3681 * driver's sysfs entry point functions.
3682 */
3683 rport->dev_loss_tmo = vport->cfg_devloss_tmo;
3684
3685 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 3693 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
3686 lpfc_sli_handle_fast_ring_event(phba, 3694 lpfc_sli_handle_fast_ring_event(phba,
3687 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); 3695 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index fb8905f893f5..0d1e187b005d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1677,6 +1677,8 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1677 case MBX_RESUME_RPI: 1677 case MBX_RESUME_RPI:
1678 case MBX_READ_EVENT_LOG_STATUS: 1678 case MBX_READ_EVENT_LOG_STATUS:
1679 case MBX_READ_EVENT_LOG: 1679 case MBX_READ_EVENT_LOG:
1680 case MBX_SECURITY_MGMT:
1681 case MBX_AUTH_PORT:
1680 ret = mbxCommand; 1682 ret = mbxCommand;
1681 break; 1683 break;
1682 default: 1684 default:
@@ -1730,10 +1732,11 @@ lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
1730void 1732void
1731lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1733lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1732{ 1734{
1735 struct lpfc_vport *vport = pmb->vport;
1733 struct lpfc_dmabuf *mp; 1736 struct lpfc_dmabuf *mp;
1737 struct lpfc_nodelist *ndlp;
1734 uint16_t rpi, vpi; 1738 uint16_t rpi, vpi;
1735 int rc; 1739 int rc;
1736 struct lpfc_vport *vport = pmb->vport;
1737 1740
1738 mp = (struct lpfc_dmabuf *) (pmb->context1); 1741 mp = (struct lpfc_dmabuf *) (pmb->context1);
1739 1742
@@ -1774,6 +1777,19 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1774 return; 1777 return;
1775 } 1778 }
1776 1779
1780 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
1781 ndlp = (struct lpfc_nodelist *)pmb->context2;
1782 lpfc_nlp_put(ndlp);
1783 pmb->context2 = NULL;
1784 }
1785
1786 /* Check security permission status on INIT_LINK mailbox command */
1787 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
1788 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
1789 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
1790 "2860 SLI authentication is required "
1791 "for INIT_LINK but has not done yet\n");
1792
1777 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 1793 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
1778 lpfc_sli4_mbox_cmd_free(phba, pmb); 1794 lpfc_sli4_mbox_cmd_free(phba, pmb);
1779 else 1795 else
@@ -3651,11 +3667,15 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
3651 i = 0; 3667 i = 0;
3652 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 3668 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
3653 3669
3654 /* Check every 100ms for 5 retries, then every 500ms for 5, then 3670 /* Check every 10ms for 10 retries, then every 100ms for 90
3655 * every 2.5 sec for 5, then reset board and every 2.5 sec for 3671 * retries, then every 1 sec for 50 retires for a total of
3656 * 4. 3672 * ~60 seconds before reset the board again and check every
3673 * 1 sec for 50 retries. The up to 60 seconds before the
3674 * board ready is required by the Falcon FIPS zeroization
3675 * complete, and any reset the board in between shall cause
3676 * restart of zeroization, further delay the board ready.
3657 */ 3677 */
3658 if (i++ >= 20) { 3678 if (i++ >= 200) {
3659 /* Adapter failed to init, timeout, status reg 3679 /* Adapter failed to init, timeout, status reg
3660 <status> */ 3680 <status> */
3661 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3681 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -3683,16 +3703,15 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
3683 return -EIO; 3703 return -EIO;
3684 } 3704 }
3685 3705
3686 if (i <= 5) { 3706 if (i <= 10)
3687 msleep(10); 3707 msleep(10);
3688 } else if (i <= 10) { 3708 else if (i <= 100)
3689 msleep(500); 3709 msleep(100);
3690 } else { 3710 else
3691 msleep(2500); 3711 msleep(1000);
3692 }
3693 3712
3694 if (i == 15) { 3713 if (i == 150) {
3695 /* Do post */ 3714 /* Do post */
3696 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3715 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3697 lpfc_sli_brdrestart(phba); 3716 lpfc_sli_brdrestart(phba);
3698 } 3717 }
@@ -4186,7 +4205,7 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
4186 * 4205 *
4187 * Return codes 4206 * Return codes
4188 * 0 - successful 4207 * 0 - successful
4189 * ENOMEM - could not allocated memory. 4208 * -ENOMEM - could not allocated memory.
4190 **/ 4209 **/
4191static int 4210static int
4192lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 4211lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
@@ -5943,6 +5962,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5943 uint8_t command_type = ELS_COMMAND_NON_FIP; 5962 uint8_t command_type = ELS_COMMAND_NON_FIP;
5944 uint8_t cmnd; 5963 uint8_t cmnd;
5945 uint16_t xritag; 5964 uint16_t xritag;
5965 uint16_t abrt_iotag;
5966 struct lpfc_iocbq *abrtiocbq;
5946 struct ulp_bde64 *bpl = NULL; 5967 struct ulp_bde64 *bpl = NULL;
5947 uint32_t els_id = ELS_ID_DEFAULT; 5968 uint32_t els_id = ELS_ID_DEFAULT;
5948 int numBdes, i; 5969 int numBdes, i;
@@ -6155,9 +6176,17 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6155 case CMD_ABORT_XRI_CX: 6176 case CMD_ABORT_XRI_CX:
6156 /* words 0-2 memcpy should be 0 rserved */ 6177 /* words 0-2 memcpy should be 0 rserved */
6157 /* port will send abts */ 6178 /* port will send abts */
6158 if (iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 6179 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
6180 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
6181 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
6182 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
6183 } else
6184 fip = 0;
6185
6186 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
6159 /* 6187 /*
6160 * The link is down so the fw does not need to send abts 6188 * The link is down, or the command was ELS_FIP
6189 * so the fw does not need to send abts
6161 * on the wire. 6190 * on the wire.
6162 */ 6191 */
6163 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); 6192 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
@@ -6901,37 +6930,6 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
6901} 6930}
6902 6931
6903/** 6932/**
6904 * lpfc_sli4_hba_down - PCI function resource cleanup for the SLI4 HBA
6905 * @phba: Pointer to HBA context object.
6906 *
6907 * This function cleans up all queues, iocb, buffers, mailbox commands while
6908 * shutting down the SLI4 HBA FCoE function. This function is called with no
6909 * lock held and always returns 1.
6910 *
6911 * This function does the following to cleanup driver FCoE function resources:
6912 * - Free discovery resources for each virtual port
6913 * - Cleanup any pending fabric iocbs
6914 * - Iterate through the iocb txq and free each entry in the list.
6915 * - Free up any buffer posted to the HBA.
6916 * - Clean up all the queue entries: WQ, RQ, MQ, EQ, CQ, etc.
6917 * - Free mailbox commands in the mailbox queue.
6918 **/
6919int
6920lpfc_sli4_hba_down(struct lpfc_hba *phba)
6921{
6922 /* Stop the SLI4 device port */
6923 lpfc_stop_port(phba);
6924
6925 /* Tear down the queues in the HBA */
6926 lpfc_sli4_queue_unset(phba);
6927
6928 /* unregister default FCFI from the HBA */
6929 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
6930
6931 return 1;
6932}
6933
6934/**
6935 * lpfc_sli_pcimem_bcopy - SLI memory copy function 6933 * lpfc_sli_pcimem_bcopy - SLI memory copy function
6936 * @srcp: Source memory pointer. 6934 * @srcp: Source memory pointer.
6937 * @destp: Destination memory pointer. 6935 * @destp: Destination memory pointer.
@@ -7888,7 +7886,7 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba)
7888 /* Check if there is a deferred error condition is active */ 7886 /* Check if there is a deferred error condition is active */
7889 if ((HS_FFER1 & phba->work_hs) && 7887 if ((HS_FFER1 & phba->work_hs) &&
7890 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 7888 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
7891 HS_FFER6 | HS_FFER7) & phba->work_hs)) { 7889 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
7892 phba->hba_flag |= DEFER_ERATT; 7890 phba->hba_flag |= DEFER_ERATT;
7893 /* Clear all interrupt enable conditions */ 7891 /* Clear all interrupt enable conditions */
7894 writel(0, phba->HCregaddr); 7892 writel(0, phba->HCregaddr);
@@ -8204,7 +8202,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
8204 */ 8202 */
8205 if ((HS_FFER1 & phba->work_hs) && 8203 if ((HS_FFER1 & phba->work_hs) &&
8206 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 8204 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
8207 HS_FFER6 | HS_FFER7) & phba->work_hs)) { 8205 HS_FFER6 | HS_FFER7 | HS_FFER8) &
8206 phba->work_hs)) {
8208 phba->hba_flag |= DEFER_ERATT; 8207 phba->hba_flag |= DEFER_ERATT;
8209 /* Clear all interrupt enable conditions */ 8208 /* Clear all interrupt enable conditions */
8210 writel(0, phba->HCregaddr); 8209 writel(0, phba->HCregaddr);
@@ -8476,7 +8475,7 @@ lpfc_sli_intr_handler(int irq, void *dev_id)
8476 * If there is deferred error attention, do not check for any interrupt. 8475 * If there is deferred error attention, do not check for any interrupt.
8477 */ 8476 */
8478 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 8477 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8479 spin_unlock_irq(&phba->hbalock); 8478 spin_unlock(&phba->hbalock);
8480 return IRQ_NONE; 8479 return IRQ_NONE;
8481 } 8480 }
8482 8481
@@ -9724,8 +9723,8 @@ out_fail:
9724 * command to finish before continuing. 9723 * command to finish before continuing.
9725 * 9724 *
9726 * On success this function will return a zero. If unable to allocate enough 9725 * On success this function will return a zero. If unable to allocate enough
9727 * memory this function will return ENOMEM. If the queue create mailbox command 9726 * memory this function will return -ENOMEM. If the queue create mailbox command
9728 * fails this function will return ENXIO. 9727 * fails this function will return -ENXIO.
9729 **/ 9728 **/
9730uint32_t 9729uint32_t
9731lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax) 9730lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
@@ -9840,8 +9839,8 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
9840 * command to finish before continuing. 9839 * command to finish before continuing.
9841 * 9840 *
9842 * On success this function will return a zero. If unable to allocate enough 9841 * On success this function will return a zero. If unable to allocate enough
9843 * memory this function will return ENOMEM. If the queue create mailbox command 9842 * memory this function will return -ENOMEM. If the queue create mailbox command
9844 * fails this function will return ENXIO. 9843 * fails this function will return -ENXIO.
9845 **/ 9844 **/
9846uint32_t 9845uint32_t
9847lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, 9846lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
@@ -10011,8 +10010,8 @@ lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
10011 * command to finish before continuing. 10010 * command to finish before continuing.
10012 * 10011 *
10013 * On success this function will return a zero. If unable to allocate enough 10012 * On success this function will return a zero. If unable to allocate enough
10014 * memory this function will return ENOMEM. If the queue create mailbox command 10013 * memory this function will return -ENOMEM. If the queue create mailbox command
10015 * fails this function will return ENXIO. 10014 * fails this function will return -ENXIO.
10016 **/ 10015 **/
10017int32_t 10016int32_t
10018lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, 10017lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
@@ -10146,8 +10145,8 @@ out:
10146 * command to finish before continuing. 10145 * command to finish before continuing.
10147 * 10146 *
10148 * On success this function will return a zero. If unable to allocate enough 10147 * On success this function will return a zero. If unable to allocate enough
10149 * memory this function will return ENOMEM. If the queue create mailbox command 10148 * memory this function will return -ENOMEM. If the queue create mailbox command
10150 * fails this function will return ENXIO. 10149 * fails this function will return -ENXIO.
10151 **/ 10150 **/
10152uint32_t 10151uint32_t
10153lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, 10152lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
@@ -10234,8 +10233,8 @@ out:
10234 * mailbox command to finish before continuing. 10233 * mailbox command to finish before continuing.
10235 * 10234 *
10236 * On success this function will return a zero. If unable to allocate enough 10235 * On success this function will return a zero. If unable to allocate enough
10237 * memory this function will return ENOMEM. If the queue create mailbox command 10236 * memory this function will return -ENOMEM. If the queue create mailbox command
10238 * fails this function will return ENXIO. 10237 * fails this function will return -ENXIO.
10239 **/ 10238 **/
10240uint32_t 10239uint32_t
10241lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, 10240lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
@@ -10403,7 +10402,7 @@ out:
10403 * The @eq struct is used to get the queue ID of the queue to destroy. 10402 * The @eq struct is used to get the queue ID of the queue to destroy.
10404 * 10403 *
10405 * On success this function will return a zero. If the queue destroy mailbox 10404 * On success this function will return a zero. If the queue destroy mailbox
10406 * command fails this function will return ENXIO. 10405 * command fails this function will return -ENXIO.
10407 **/ 10406 **/
10408uint32_t 10407uint32_t
10409lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) 10408lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
@@ -10458,7 +10457,7 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
10458 * The @cq struct is used to get the queue ID of the queue to destroy. 10457 * The @cq struct is used to get the queue ID of the queue to destroy.
10459 * 10458 *
10460 * On success this function will return a zero. If the queue destroy mailbox 10459 * On success this function will return a zero. If the queue destroy mailbox
10461 * command fails this function will return ENXIO. 10460 * command fails this function will return -ENXIO.
10462 **/ 10461 **/
10463uint32_t 10462uint32_t
10464lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) 10463lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
@@ -10511,7 +10510,7 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
10511 * The @mq struct is used to get the queue ID of the queue to destroy. 10510 * The @mq struct is used to get the queue ID of the queue to destroy.
10512 * 10511 *
10513 * On success this function will return a zero. If the queue destroy mailbox 10512 * On success this function will return a zero. If the queue destroy mailbox
10514 * command fails this function will return ENXIO. 10513 * command fails this function will return -ENXIO.
10515 **/ 10514 **/
10516uint32_t 10515uint32_t
10517lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) 10516lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
@@ -10564,7 +10563,7 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
10564 * The @wq struct is used to get the queue ID of the queue to destroy. 10563 * The @wq struct is used to get the queue ID of the queue to destroy.
10565 * 10564 *
10566 * On success this function will return a zero. If the queue destroy mailbox 10565 * On success this function will return a zero. If the queue destroy mailbox
10567 * command fails this function will return ENXIO. 10566 * command fails this function will return -ENXIO.
10568 **/ 10567 **/
10569uint32_t 10568uint32_t
10570lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) 10569lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
@@ -10616,7 +10615,7 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
10616 * The @rq struct is used to get the queue ID of the queue to destroy. 10615 * The @rq struct is used to get the queue ID of the queue to destroy.
10617 * 10616 *
10618 * On success this function will return a zero. If the queue destroy mailbox 10617 * On success this function will return a zero. If the queue destroy mailbox
10619 * command fails this function will return ENXIO. 10618 * command fails this function will return -ENXIO.
10620 **/ 10619 **/
10621uint32_t 10620uint32_t
10622lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, 10621lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
@@ -10758,51 +10757,6 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
10758 } 10757 }
10759 return 0; 10758 return 0;
10760} 10759}
10761/**
10762 * lpfc_sli4_remove_all_sgl_pages - Post scatter gather list for an XRI to HBA
10763 * @phba: The virtual port for which this call being executed.
10764 *
10765 * This routine will remove all of the sgl pages registered with the hba.
10766 *
10767 * Return codes:
10768 * 0 - Success
10769 * -ENXIO, -ENOMEM - Failure
10770 **/
10771int
10772lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *phba)
10773{
10774 LPFC_MBOXQ_t *mbox;
10775 int rc;
10776 uint32_t shdr_status, shdr_add_status;
10777 union lpfc_sli4_cfg_shdr *shdr;
10778
10779 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10780 if (!mbox)
10781 return -ENOMEM;
10782
10783 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10784 LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES, 0,
10785 LPFC_SLI4_MBX_EMBED);
10786 if (!phba->sli4_hba.intr_enable)
10787 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10788 else
10789 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
10790 /* The IOCTL status is embedded in the mailbox subheader. */
10791 shdr = (union lpfc_sli4_cfg_shdr *)
10792 &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
10793 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10794 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10795 if (rc != MBX_TIMEOUT)
10796 mempool_free(mbox, phba->mbox_mem_pool);
10797 if (shdr_status || shdr_add_status || rc) {
10798 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10799 "2512 REMOVE_ALL_SGL_PAGES mailbox failed with "
10800 "status x%x add_status x%x, mbx status x%x\n",
10801 shdr_status, shdr_add_status, rc);
10802 rc = -ENXIO;
10803 }
10804 return rc;
10805}
10806 10760
10807/** 10761/**
10808 * lpfc_sli4_next_xritag - Get an xritag for the io 10762 * lpfc_sli4_next_xritag - Get an xritag for the io
@@ -11819,7 +11773,7 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
11819 * 11773 *
11820 * Return codes 11774 * Return codes
11821 * 0 - successful 11775 * 0 - successful
11822 * EIO - The mailbox failed to complete successfully. 11776 * -EIO - The mailbox failed to complete successfully.
11823 * When this error occurs, the driver is not guaranteed 11777 * When this error occurs, the driver is not guaranteed
11824 * to have any rpi regions posted to the device and 11778 * to have any rpi regions posted to the device and
11825 * must either attempt to repost the regions or take a 11779 * must either attempt to repost the regions or take a
@@ -11857,8 +11811,8 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
11857 * 11811 *
11858 * Return codes 11812 * Return codes
11859 * 0 - successful 11813 * 0 - successful
11860 * ENOMEM - No available memory 11814 * -ENOMEM - No available memory
11861 * EIO - The mailbox failed to complete successfully. 11815 * -EIO - The mailbox failed to complete successfully.
11862 **/ 11816 **/
11863int 11817int
11864lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) 11818lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
@@ -12805,8 +12759,11 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12805 LPFC_MBOXQ_t *mb, *nextmb; 12759 LPFC_MBOXQ_t *mb, *nextmb;
12806 struct lpfc_dmabuf *mp; 12760 struct lpfc_dmabuf *mp;
12807 struct lpfc_nodelist *ndlp; 12761 struct lpfc_nodelist *ndlp;
12762 struct lpfc_nodelist *act_mbx_ndlp = NULL;
12808 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 12763 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
12764 LIST_HEAD(mbox_cmd_list);
12809 12765
12766 /* Clean up internally queued mailbox commands with the vport */
12810 spin_lock_irq(&phba->hbalock); 12767 spin_lock_irq(&phba->hbalock);
12811 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 12768 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
12812 if (mb->vport != vport) 12769 if (mb->vport != vport)
@@ -12816,6 +12773,28 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12816 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 12773 (mb->u.mb.mbxCommand != MBX_REG_VPI))
12817 continue; 12774 continue;
12818 12775
12776 list_del(&mb->list);
12777 list_add_tail(&mb->list, &mbox_cmd_list);
12778 }
12779 /* Clean up active mailbox command with the vport */
12780 mb = phba->sli.mbox_active;
12781 if (mb && (mb->vport == vport)) {
12782 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
12783 (mb->u.mb.mbxCommand == MBX_REG_VPI))
12784 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12785 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
12786 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
12787 /* Put reference count for delayed processing */
12788 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
12789 /* Unregister the RPI when mailbox complete */
12790 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
12791 }
12792 }
12793 spin_unlock_irq(&phba->hbalock);
12794
12795 /* Release the cleaned-up mailbox commands */
12796 while (!list_empty(&mbox_cmd_list)) {
12797 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
12819 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 12798 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
12820 if (phba->sli_rev == LPFC_SLI_REV4) 12799 if (phba->sli_rev == LPFC_SLI_REV4)
12821 __lpfc_sli4_free_rpi(phba, 12800 __lpfc_sli4_free_rpi(phba,
@@ -12826,36 +12805,24 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12826 kfree(mp); 12805 kfree(mp);
12827 } 12806 }
12828 ndlp = (struct lpfc_nodelist *) mb->context2; 12807 ndlp = (struct lpfc_nodelist *) mb->context2;
12808 mb->context2 = NULL;
12829 if (ndlp) { 12809 if (ndlp) {
12830 spin_lock_irq(shost->host_lock); 12810 spin_lock(shost->host_lock);
12831 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 12811 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
12832 spin_unlock_irq(shost->host_lock); 12812 spin_unlock(shost->host_lock);
12833 lpfc_nlp_put(ndlp); 12813 lpfc_nlp_put(ndlp);
12834 mb->context2 = NULL;
12835 } 12814 }
12836 } 12815 }
12837 list_del(&mb->list);
12838 mempool_free(mb, phba->mbox_mem_pool); 12816 mempool_free(mb, phba->mbox_mem_pool);
12839 } 12817 }
12840 mb = phba->sli.mbox_active; 12818
12841 if (mb && (mb->vport == vport)) { 12819 /* Release the ndlp with the cleaned-up active mailbox command */
12842 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || 12820 if (act_mbx_ndlp) {
12843 (mb->u.mb.mbxCommand == MBX_REG_VPI)) 12821 spin_lock(shost->host_lock);
12844 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12822 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
12845 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 12823 spin_unlock(shost->host_lock);
12846 ndlp = (struct lpfc_nodelist *) mb->context2; 12824 lpfc_nlp_put(act_mbx_ndlp);
12847 if (ndlp) {
12848 spin_lock_irq(shost->host_lock);
12849 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
12850 spin_unlock_irq(shost->host_lock);
12851 lpfc_nlp_put(ndlp);
12852 mb->context2 = NULL;
12853 }
12854 /* Unregister the RPI when mailbox complete */
12855 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
12856 }
12857 } 12825 }
12858 spin_unlock_irq(&phba->hbalock);
12859} 12826}
12860 12827
12861/** 12828/**
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index a3b24d99a2a7..a0ca572ec28b 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -481,7 +481,6 @@ struct lpfc_rpi_hdr {
481 */ 481 */
482int lpfc_pci_function_reset(struct lpfc_hba *); 482int lpfc_pci_function_reset(struct lpfc_hba *);
483int lpfc_sli4_hba_setup(struct lpfc_hba *); 483int lpfc_sli4_hba_setup(struct lpfc_hba *);
484int lpfc_sli4_hba_down(struct lpfc_hba *);
485int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t, 484int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t,
486 uint8_t, uint32_t, bool); 485 uint8_t, uint32_t, bool);
487void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *); 486void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
@@ -514,7 +513,6 @@ int lpfc_sli4_queue_setup(struct lpfc_hba *);
514void lpfc_sli4_queue_unset(struct lpfc_hba *); 513void lpfc_sli4_queue_unset(struct lpfc_hba *);
515int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t); 514int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
516int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *); 515int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
517int lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *);
518uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *); 516uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
519int lpfc_sli4_post_async_mbox(struct lpfc_hba *); 517int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
520int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba); 518int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 61afb3420a96..f93120e4c796 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.16" 21#define LPFC_DRIVER_VERSION "8.3.17"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 1655507a682c..a5281ce893d0 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -580,7 +580,9 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
580 "static vport.\n"); 580 "static vport.\n");
581 return VPORT_ERROR; 581 return VPORT_ERROR;
582 } 582 }
583 583 spin_lock_irq(&phba->hbalock);
584 vport->load_flag |= FC_UNLOADING;
585 spin_unlock_irq(&phba->hbalock);
584 /* 586 /*
585 * If we are not unloading the driver then prevent the vport_delete 587 * If we are not unloading the driver then prevent the vport_delete
586 * from happening until after this vport's discovery is finished. 588 * from happening until after this vport's discovery is finished.
@@ -618,10 +620,6 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
618 scsi_host_put(shost); 620 scsi_host_put(shost);
619 return VPORT_INVAL; 621 return VPORT_INVAL;
620 } 622 }
621 spin_lock_irq(&phba->hbalock);
622 vport->load_flag |= FC_UNLOADING;
623 spin_unlock_irq(&phba->hbalock);
624
625 lpfc_free_sysfs_attr(vport); 623 lpfc_free_sysfs_attr(vport);
626 624
627 lpfc_debugfs_terminate(vport); 625 lpfc_debugfs_terminate(vport);
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 51e2579a743a..d3c9cdee292b 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -61,6 +61,11 @@ MODULE_VERSION(MEGASAS_VERSION);
61MODULE_AUTHOR("megaraidlinux@lsi.com"); 61MODULE_AUTHOR("megaraidlinux@lsi.com");
62MODULE_DESCRIPTION("LSI MegaRAID SAS Driver"); 62MODULE_DESCRIPTION("LSI MegaRAID SAS Driver");
63 63
64static int megasas_transition_to_ready(struct megasas_instance *instance);
65static int megasas_get_pd_list(struct megasas_instance *instance);
66static int megasas_issue_init_mfi(struct megasas_instance *instance);
67static int megasas_register_aen(struct megasas_instance *instance,
68 u32 seq_num, u32 class_locale_word);
64/* 69/*
65 * PCI ID table for all supported controllers 70 * PCI ID table for all supported controllers
66 */ 71 */
@@ -163,7 +168,7 @@ megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
163static inline void 168static inline void
164megasas_enable_intr_xscale(struct megasas_register_set __iomem * regs) 169megasas_enable_intr_xscale(struct megasas_register_set __iomem * regs)
165{ 170{
166 writel(1, &(regs)->outbound_intr_mask); 171 writel(0, &(regs)->outbound_intr_mask);
167 172
168 /* Dummy readl to force pci flush */ 173 /* Dummy readl to force pci flush */
169 readl(&regs->outbound_intr_mask); 174 readl(&regs->outbound_intr_mask);
@@ -199,24 +204,27 @@ static int
199megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs) 204megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
200{ 205{
201 u32 status; 206 u32 status;
207 u32 mfiStatus = 0;
202 /* 208 /*
203 * Check if it is our interrupt 209 * Check if it is our interrupt
204 */ 210 */
205 status = readl(&regs->outbound_intr_status); 211 status = readl(&regs->outbound_intr_status);
206 212
207 if (!(status & MFI_OB_INTR_STATUS_MASK)) { 213 if (status & MFI_OB_INTR_STATUS_MASK)
208 return 1; 214 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
209 } 215 if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
216 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
210 217
211 /* 218 /*
212 * Clear the interrupt by writing back the same value 219 * Clear the interrupt by writing back the same value
213 */ 220 */
214 writel(status, &regs->outbound_intr_status); 221 if (mfiStatus)
222 writel(status, &regs->outbound_intr_status);
215 223
216 /* Dummy readl to force pci flush */ 224 /* Dummy readl to force pci flush */
217 readl(&regs->outbound_intr_status); 225 readl(&regs->outbound_intr_status);
218 226
219 return 0; 227 return mfiStatus;
220} 228}
221 229
222/** 230/**
@@ -231,8 +239,69 @@ megasas_fire_cmd_xscale(struct megasas_instance *instance,
231 u32 frame_count, 239 u32 frame_count,
232 struct megasas_register_set __iomem *regs) 240 struct megasas_register_set __iomem *regs)
233{ 241{
242 unsigned long flags;
243 spin_lock_irqsave(&instance->hba_lock, flags);
234 writel((frame_phys_addr >> 3)|(frame_count), 244 writel((frame_phys_addr >> 3)|(frame_count),
235 &(regs)->inbound_queue_port); 245 &(regs)->inbound_queue_port);
246 spin_unlock_irqrestore(&instance->hba_lock, flags);
247}
248
249/**
250 * megasas_adp_reset_xscale - For controller reset
251 * @regs: MFI register set
252 */
253static int
254megasas_adp_reset_xscale(struct megasas_instance *instance,
255 struct megasas_register_set __iomem *regs)
256{
257 u32 i;
258 u32 pcidata;
259 writel(MFI_ADP_RESET, &regs->inbound_doorbell);
260
261 for (i = 0; i < 3; i++)
262 msleep(1000); /* sleep for 3 secs */
263 pcidata = 0;
264 pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
265 printk(KERN_NOTICE "pcidata = %x\n", pcidata);
266 if (pcidata & 0x2) {
267 printk(KERN_NOTICE "mfi 1068 offset read=%x\n", pcidata);
268 pcidata &= ~0x2;
269 pci_write_config_dword(instance->pdev,
270 MFI_1068_PCSR_OFFSET, pcidata);
271
272 for (i = 0; i < 2; i++)
273 msleep(1000); /* need to wait 2 secs again */
274
275 pcidata = 0;
276 pci_read_config_dword(instance->pdev,
277 MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
278 printk(KERN_NOTICE "1068 offset handshake read=%x\n", pcidata);
279 if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
280 printk(KERN_NOTICE "1068 offset pcidt=%x\n", pcidata);
281 pcidata = 0;
282 pci_write_config_dword(instance->pdev,
283 MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
284 }
285 }
286 return 0;
287}
288
289/**
290 * megasas_check_reset_xscale - For controller reset check
291 * @regs: MFI register set
292 */
293static int
294megasas_check_reset_xscale(struct megasas_instance *instance,
295 struct megasas_register_set __iomem *regs)
296{
297 u32 consumer;
298 consumer = *instance->consumer;
299
300 if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
301 (*instance->consumer == MEGASAS_ADPRESET_INPROG_SIGN)) {
302 return 1;
303 }
304 return 0;
236} 305}
237 306
238static struct megasas_instance_template megasas_instance_template_xscale = { 307static struct megasas_instance_template megasas_instance_template_xscale = {
@@ -242,6 +311,8 @@ static struct megasas_instance_template megasas_instance_template_xscale = {
242 .disable_intr = megasas_disable_intr_xscale, 311 .disable_intr = megasas_disable_intr_xscale,
243 .clear_intr = megasas_clear_intr_xscale, 312 .clear_intr = megasas_clear_intr_xscale,
244 .read_fw_status_reg = megasas_read_fw_status_reg_xscale, 313 .read_fw_status_reg = megasas_read_fw_status_reg_xscale,
314 .adp_reset = megasas_adp_reset_xscale,
315 .check_reset = megasas_check_reset_xscale,
245}; 316};
246 317
247/** 318/**
@@ -263,7 +334,7 @@ megasas_enable_intr_ppc(struct megasas_register_set __iomem * regs)
263{ 334{
264 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 335 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
265 336
266 writel(~0x80000004, &(regs)->outbound_intr_mask); 337 writel(~0x80000000, &(regs)->outbound_intr_mask);
267 338
268 /* Dummy readl to force pci flush */ 339 /* Dummy readl to force pci flush */
269 readl(&regs->outbound_intr_mask); 340 readl(&regs->outbound_intr_mask);
@@ -306,7 +377,7 @@ megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
306 status = readl(&regs->outbound_intr_status); 377 status = readl(&regs->outbound_intr_status);
307 378
308 if (!(status & MFI_REPLY_1078_MESSAGE_INTERRUPT)) { 379 if (!(status & MFI_REPLY_1078_MESSAGE_INTERRUPT)) {
309 return 1; 380 return 0;
310 } 381 }
311 382
312 /* 383 /*
@@ -317,7 +388,7 @@ megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
317 /* Dummy readl to force pci flush */ 388 /* Dummy readl to force pci flush */
318 readl(&regs->outbound_doorbell_clear); 389 readl(&regs->outbound_doorbell_clear);
319 390
320 return 0; 391 return 1;
321} 392}
322/** 393/**
323 * megasas_fire_cmd_ppc - Sends command to the FW 394 * megasas_fire_cmd_ppc - Sends command to the FW
@@ -331,10 +402,34 @@ megasas_fire_cmd_ppc(struct megasas_instance *instance,
331 u32 frame_count, 402 u32 frame_count,
332 struct megasas_register_set __iomem *regs) 403 struct megasas_register_set __iomem *regs)
333{ 404{
405 unsigned long flags;
406 spin_lock_irqsave(&instance->hba_lock, flags);
334 writel((frame_phys_addr | (frame_count<<1))|1, 407 writel((frame_phys_addr | (frame_count<<1))|1,
335 &(regs)->inbound_queue_port); 408 &(regs)->inbound_queue_port);
409 spin_unlock_irqrestore(&instance->hba_lock, flags);
410}
411
412/**
413 * megasas_adp_reset_ppc - For controller reset
414 * @regs: MFI register set
415 */
416static int
417megasas_adp_reset_ppc(struct megasas_instance *instance,
418 struct megasas_register_set __iomem *regs)
419{
420 return 0;
336} 421}
337 422
423/**
424 * megasas_check_reset_ppc - For controller reset check
425 * @regs: MFI register set
426 */
427static int
428megasas_check_reset_ppc(struct megasas_instance *instance,
429 struct megasas_register_set __iomem *regs)
430{
431 return 0;
432}
338static struct megasas_instance_template megasas_instance_template_ppc = { 433static struct megasas_instance_template megasas_instance_template_ppc = {
339 434
340 .fire_cmd = megasas_fire_cmd_ppc, 435 .fire_cmd = megasas_fire_cmd_ppc,
@@ -342,6 +437,8 @@ static struct megasas_instance_template megasas_instance_template_ppc = {
342 .disable_intr = megasas_disable_intr_ppc, 437 .disable_intr = megasas_disable_intr_ppc,
343 .clear_intr = megasas_clear_intr_ppc, 438 .clear_intr = megasas_clear_intr_ppc,
344 .read_fw_status_reg = megasas_read_fw_status_reg_ppc, 439 .read_fw_status_reg = megasas_read_fw_status_reg_ppc,
440 .adp_reset = megasas_adp_reset_ppc,
441 .check_reset = megasas_check_reset_ppc,
345}; 442};
346 443
347/** 444/**
@@ -396,7 +493,7 @@ megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
396 status = readl(&regs->outbound_intr_status); 493 status = readl(&regs->outbound_intr_status);
397 494
398 if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) { 495 if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
399 return 1; 496 return 0;
400 } 497 }
401 498
402 /* 499 /*
@@ -409,7 +506,7 @@ megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
409 */ 506 */
410 readl(&regs->outbound_intr_status); 507 readl(&regs->outbound_intr_status);
411 508
412 return 0; 509 return 1;
413} 510}
414 511
415/** 512/**
@@ -425,11 +522,33 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance,
425 struct megasas_register_set __iomem *regs) 522 struct megasas_register_set __iomem *regs)
426{ 523{
427 unsigned long flags; 524 unsigned long flags;
428 spin_lock_irqsave(&instance->fire_lock, flags); 525 spin_lock_irqsave(&instance->hba_lock, flags);
429 writel(0, &(regs)->inbound_high_queue_port); 526 writel(0, &(regs)->inbound_high_queue_port);
430 writel((frame_phys_addr | (frame_count<<1))|1, 527 writel((frame_phys_addr | (frame_count<<1))|1,
431 &(regs)->inbound_low_queue_port); 528 &(regs)->inbound_low_queue_port);
432 spin_unlock_irqrestore(&instance->fire_lock, flags); 529 spin_unlock_irqrestore(&instance->hba_lock, flags);
530}
531
532/**
533 * megasas_adp_reset_skinny - For controller reset
534 * @regs: MFI register set
535 */
536static int
537megasas_adp_reset_skinny(struct megasas_instance *instance,
538 struct megasas_register_set __iomem *regs)
539{
540 return 0;
541}
542
543/**
544 * megasas_check_reset_skinny - For controller reset check
545 * @regs: MFI register set
546 */
547static int
548megasas_check_reset_skinny(struct megasas_instance *instance,
549 struct megasas_register_set __iomem *regs)
550{
551 return 0;
433} 552}
434 553
435static struct megasas_instance_template megasas_instance_template_skinny = { 554static struct megasas_instance_template megasas_instance_template_skinny = {
@@ -439,6 +558,8 @@ static struct megasas_instance_template megasas_instance_template_skinny = {
439 .disable_intr = megasas_disable_intr_skinny, 558 .disable_intr = megasas_disable_intr_skinny,
440 .clear_intr = megasas_clear_intr_skinny, 559 .clear_intr = megasas_clear_intr_skinny,
441 .read_fw_status_reg = megasas_read_fw_status_reg_skinny, 560 .read_fw_status_reg = megasas_read_fw_status_reg_skinny,
561 .adp_reset = megasas_adp_reset_skinny,
562 .check_reset = megasas_check_reset_skinny,
442}; 563};
443 564
444 565
@@ -494,23 +615,29 @@ static int
494megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs) 615megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
495{ 616{
496 u32 status; 617 u32 status;
618 u32 mfiStatus = 0;
497 /* 619 /*
498 * Check if it is our interrupt 620 * Check if it is our interrupt
499 */ 621 */
500 status = readl(&regs->outbound_intr_status); 622 status = readl(&regs->outbound_intr_status);
501 623
502 if (!(status & MFI_GEN2_ENABLE_INTERRUPT_MASK)) 624 if (status & MFI_GEN2_ENABLE_INTERRUPT_MASK) {
503 return 1; 625 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
626 }
627 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
628 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
629 }
504 630
505 /* 631 /*
506 * Clear the interrupt by writing back the same value 632 * Clear the interrupt by writing back the same value
507 */ 633 */
508 writel(status, &regs->outbound_doorbell_clear); 634 if (mfiStatus)
635 writel(status, &regs->outbound_doorbell_clear);
509 636
510 /* Dummy readl to force pci flush */ 637 /* Dummy readl to force pci flush */
511 readl(&regs->outbound_intr_status); 638 readl(&regs->outbound_intr_status);
512 639
513 return 0; 640 return mfiStatus;
514} 641}
515/** 642/**
516 * megasas_fire_cmd_gen2 - Sends command to the FW 643 * megasas_fire_cmd_gen2 - Sends command to the FW
@@ -524,8 +651,74 @@ megasas_fire_cmd_gen2(struct megasas_instance *instance,
524 u32 frame_count, 651 u32 frame_count,
525 struct megasas_register_set __iomem *regs) 652 struct megasas_register_set __iomem *regs)
526{ 653{
654 unsigned long flags;
655 spin_lock_irqsave(&instance->hba_lock, flags);
527 writel((frame_phys_addr | (frame_count<<1))|1, 656 writel((frame_phys_addr | (frame_count<<1))|1,
528 &(regs)->inbound_queue_port); 657 &(regs)->inbound_queue_port);
658 spin_unlock_irqrestore(&instance->hba_lock, flags);
659}
660
661/**
662 * megasas_adp_reset_gen2 - For controller reset
663 * @regs: MFI register set
664 */
665static int
666megasas_adp_reset_gen2(struct megasas_instance *instance,
667 struct megasas_register_set __iomem *reg_set)
668{
669 u32 retry = 0 ;
670 u32 HostDiag;
671
672 writel(0, &reg_set->seq_offset);
673 writel(4, &reg_set->seq_offset);
674 writel(0xb, &reg_set->seq_offset);
675 writel(2, &reg_set->seq_offset);
676 writel(7, &reg_set->seq_offset);
677 writel(0xd, &reg_set->seq_offset);
678 msleep(1000);
679
680 HostDiag = (u32)readl(&reg_set->host_diag);
681
682 while ( !( HostDiag & DIAG_WRITE_ENABLE) ) {
683 msleep(100);
684 HostDiag = (u32)readl(&reg_set->host_diag);
685 printk(KERN_NOTICE "RESETGEN2: retry=%x, hostdiag=%x\n",
686 retry, HostDiag);
687
688 if (retry++ >= 100)
689 return 1;
690
691 }
692
693 printk(KERN_NOTICE "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
694
695 writel((HostDiag | DIAG_RESET_ADAPTER), &reg_set->host_diag);
696
697 ssleep(10);
698
699 HostDiag = (u32)readl(&reg_set->host_diag);
700 while ( ( HostDiag & DIAG_RESET_ADAPTER) ) {
701 msleep(100);
702 HostDiag = (u32)readl(&reg_set->host_diag);
703 printk(KERN_NOTICE "RESET_GEN2: retry=%x, hostdiag=%x\n",
704 retry, HostDiag);
705
706 if (retry++ >= 1000)
707 return 1;
708
709 }
710 return 0;
711}
712
713/**
714 * megasas_check_reset_gen2 - For controller reset check
715 * @regs: MFI register set
716 */
717static int
718megasas_check_reset_gen2(struct megasas_instance *instance,
719 struct megasas_register_set __iomem *regs)
720{
721 return 0;
529} 722}
530 723
531static struct megasas_instance_template megasas_instance_template_gen2 = { 724static struct megasas_instance_template megasas_instance_template_gen2 = {
@@ -535,11 +728,13 @@ static struct megasas_instance_template megasas_instance_template_gen2 = {
535 .disable_intr = megasas_disable_intr_gen2, 728 .disable_intr = megasas_disable_intr_gen2,
536 .clear_intr = megasas_clear_intr_gen2, 729 .clear_intr = megasas_clear_intr_gen2,
537 .read_fw_status_reg = megasas_read_fw_status_reg_gen2, 730 .read_fw_status_reg = megasas_read_fw_status_reg_gen2,
731 .adp_reset = megasas_adp_reset_gen2,
732 .check_reset = megasas_check_reset_gen2,
538}; 733};
539 734
540/** 735/**
541* This is the end of set of functions & definitions 736* This is the end of set of functions & definitions
542* specific to ppc (deviceid : 0x60) controllers 737* specific to gen2 (deviceid : 0x78, 0x79) controllers
543*/ 738*/
544 739
545/** 740/**
@@ -598,8 +793,7 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
598 instance->instancet->fire_cmd(instance, 793 instance->instancet->fire_cmd(instance,
599 cmd->frame_phys_addr, 0, instance->reg_set); 794 cmd->frame_phys_addr, 0, instance->reg_set);
600 795
601 wait_event_timeout(instance->int_cmd_wait_q, (cmd->cmd_status != ENODATA), 796 wait_event(instance->int_cmd_wait_q, cmd->cmd_status != ENODATA);
602 MEGASAS_INTERNAL_CMD_WAIT_TIME*HZ);
603 797
604 return 0; 798 return 0;
605} 799}
@@ -647,8 +841,8 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
647 /* 841 /*
648 * Wait for this cmd to complete 842 * Wait for this cmd to complete
649 */ 843 */
650 wait_event_timeout(instance->abort_cmd_wait_q, (cmd->cmd_status != 0xFF), 844 wait_event(instance->abort_cmd_wait_q, cmd->cmd_status != 0xFF);
651 MEGASAS_INTERNAL_CMD_WAIT_TIME*HZ); 845 cmd->sync_cmd = 0;
652 846
653 megasas_return_cmd(instance, cmd); 847 megasas_return_cmd(instance, cmd);
654 return 0; 848 return 0;
@@ -1130,14 +1324,22 @@ megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
1130 u32 frame_count; 1324 u32 frame_count;
1131 struct megasas_cmd *cmd; 1325 struct megasas_cmd *cmd;
1132 struct megasas_instance *instance; 1326 struct megasas_instance *instance;
1327 unsigned long flags;
1133 1328
1134 instance = (struct megasas_instance *) 1329 instance = (struct megasas_instance *)
1135 scmd->device->host->hostdata; 1330 scmd->device->host->hostdata;
1136 1331
1137 /* Don't process if we have already declared adapter dead */ 1332 if (instance->issuepend_done == 0)
1138 if (instance->hw_crit_error)
1139 return SCSI_MLQUEUE_HOST_BUSY; 1333 return SCSI_MLQUEUE_HOST_BUSY;
1140 1334
1335 spin_lock_irqsave(&instance->hba_lock, flags);
1336 if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
1337 spin_unlock_irqrestore(&instance->hba_lock, flags);
1338 return SCSI_MLQUEUE_HOST_BUSY;
1339 }
1340
1341 spin_unlock_irqrestore(&instance->hba_lock, flags);
1342
1141 scmd->scsi_done = done; 1343 scmd->scsi_done = done;
1142 scmd->result = 0; 1344 scmd->result = 0;
1143 1345
@@ -1273,6 +1475,18 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
1273 return 0; 1475 return 0;
1274} 1476}
1275 1477
1478static void megaraid_sas_kill_hba(struct megasas_instance *instance)
1479{
1480 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
1481 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
1482 writel(MFI_STOP_ADP,
1483 &instance->reg_set->reserved_0[0]);
1484 } else {
1485 writel(MFI_STOP_ADP,
1486 &instance->reg_set->inbound_doorbell);
1487 }
1488}
1489
1276/** 1490/**
1277 * megasas_complete_cmd_dpc - Returns FW's controller structure 1491 * megasas_complete_cmd_dpc - Returns FW's controller structure
1278 * @instance_addr: Address of adapter soft state 1492 * @instance_addr: Address of adapter soft state
@@ -1290,7 +1504,7 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
1290 unsigned long flags; 1504 unsigned long flags;
1291 1505
1292 /* If we have already declared adapter dead, donot complete cmds */ 1506 /* If we have already declared adapter dead, donot complete cmds */
1293 if (instance->hw_crit_error) 1507 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR )
1294 return; 1508 return;
1295 1509
1296 spin_lock_irqsave(&instance->completion_lock, flags); 1510 spin_lock_irqsave(&instance->completion_lock, flags);
@@ -1300,6 +1514,11 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
1300 1514
1301 while (consumer != producer) { 1515 while (consumer != producer) {
1302 context = instance->reply_queue[consumer]; 1516 context = instance->reply_queue[consumer];
1517 if (context >= instance->max_fw_cmds) {
1518 printk(KERN_ERR "Unexpected context value %x\n",
1519 context);
1520 BUG();
1521 }
1303 1522
1304 cmd = instance->cmd_list[context]; 1523 cmd = instance->cmd_list[context];
1305 1524
@@ -1349,7 +1568,76 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
1349static int megasas_wait_for_outstanding(struct megasas_instance *instance) 1568static int megasas_wait_for_outstanding(struct megasas_instance *instance)
1350{ 1569{
1351 int i; 1570 int i;
1571 u32 reset_index;
1352 u32 wait_time = MEGASAS_RESET_WAIT_TIME; 1572 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
1573 u8 adprecovery;
1574 unsigned long flags;
1575 struct list_head clist_local;
1576 struct megasas_cmd *reset_cmd;
1577
1578 spin_lock_irqsave(&instance->hba_lock, flags);
1579 adprecovery = instance->adprecovery;
1580 spin_unlock_irqrestore(&instance->hba_lock, flags);
1581
1582 if (adprecovery != MEGASAS_HBA_OPERATIONAL) {
1583
1584 INIT_LIST_HEAD(&clist_local);
1585 spin_lock_irqsave(&instance->hba_lock, flags);
1586 list_splice_init(&instance->internal_reset_pending_q,
1587 &clist_local);
1588 spin_unlock_irqrestore(&instance->hba_lock, flags);
1589
1590 printk(KERN_NOTICE "megasas: HBA reset wait ...\n");
1591 for (i = 0; i < wait_time; i++) {
1592 msleep(1000);
1593 spin_lock_irqsave(&instance->hba_lock, flags);
1594 adprecovery = instance->adprecovery;
1595 spin_unlock_irqrestore(&instance->hba_lock, flags);
1596 if (adprecovery == MEGASAS_HBA_OPERATIONAL)
1597 break;
1598 }
1599
1600 if (adprecovery != MEGASAS_HBA_OPERATIONAL) {
1601 printk(KERN_NOTICE "megasas: reset: Stopping HBA.\n");
1602 spin_lock_irqsave(&instance->hba_lock, flags);
1603 instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
1604 spin_unlock_irqrestore(&instance->hba_lock, flags);
1605 return FAILED;
1606 }
1607
1608 reset_index = 0;
1609 while (!list_empty(&clist_local)) {
1610 reset_cmd = list_entry((&clist_local)->next,
1611 struct megasas_cmd, list);
1612 list_del_init(&reset_cmd->list);
1613 if (reset_cmd->scmd) {
1614 reset_cmd->scmd->result = DID_RESET << 16;
1615 printk(KERN_NOTICE "%d:%p reset [%02x], %#lx\n",
1616 reset_index, reset_cmd,
1617 reset_cmd->scmd->cmnd[0],
1618 reset_cmd->scmd->serial_number);
1619
1620 reset_cmd->scmd->scsi_done(reset_cmd->scmd);
1621 megasas_return_cmd(instance, reset_cmd);
1622 } else if (reset_cmd->sync_cmd) {
1623 printk(KERN_NOTICE "megasas:%p synch cmds"
1624 "reset queue\n",
1625 reset_cmd);
1626
1627 reset_cmd->cmd_status = ENODATA;
1628 instance->instancet->fire_cmd(instance,
1629 reset_cmd->frame_phys_addr,
1630 0, instance->reg_set);
1631 } else {
1632 printk(KERN_NOTICE "megasas: %p unexpected"
1633 "cmds lst\n",
1634 reset_cmd);
1635 }
1636 reset_index++;
1637 }
1638
1639 return SUCCESS;
1640 }
1353 1641
1354 for (i = 0; i < wait_time; i++) { 1642 for (i = 0; i < wait_time; i++) {
1355 1643
@@ -1372,6 +1660,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
1372 } 1660 }
1373 1661
1374 if (atomic_read(&instance->fw_outstanding)) { 1662 if (atomic_read(&instance->fw_outstanding)) {
1663 printk(KERN_NOTICE "megaraid_sas: pending cmds after reset\n");
1375 /* 1664 /*
1376 * Send signal to FW to stop processing any pending cmds. 1665 * Send signal to FW to stop processing any pending cmds.
1377 * The controller will be taken offline by the OS now. 1666 * The controller will be taken offline by the OS now.
@@ -1387,10 +1676,14 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
1387 &instance->reg_set->inbound_doorbell); 1676 &instance->reg_set->inbound_doorbell);
1388 } 1677 }
1389 megasas_dump_pending_frames(instance); 1678 megasas_dump_pending_frames(instance);
1390 instance->hw_crit_error = 1; 1679 spin_lock_irqsave(&instance->hba_lock, flags);
1680 instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
1681 spin_unlock_irqrestore(&instance->hba_lock, flags);
1391 return FAILED; 1682 return FAILED;
1392 } 1683 }
1393 1684
1685 printk(KERN_NOTICE "megaraid_sas: no pending cmds after reset\n");
1686
1394 return SUCCESS; 1687 return SUCCESS;
1395} 1688}
1396 1689
@@ -1412,7 +1705,7 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd)
1412 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET -%ld cmd=%x retries=%x\n", 1705 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET -%ld cmd=%x retries=%x\n",
1413 scmd->serial_number, scmd->cmnd[0], scmd->retries); 1706 scmd->serial_number, scmd->cmnd[0], scmd->retries);
1414 1707
1415 if (instance->hw_crit_error) { 1708 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
1416 printk(KERN_ERR "megasas: cannot recover from previous reset " 1709 printk(KERN_ERR "megasas: cannot recover from previous reset "
1417 "failures\n"); 1710 "failures\n");
1418 return FAILED; 1711 return FAILED;
@@ -1567,7 +1860,8 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
1567 instance->aen_cmd = NULL; 1860 instance->aen_cmd = NULL;
1568 megasas_return_cmd(instance, cmd); 1861 megasas_return_cmd(instance, cmd);
1569 1862
1570 if (instance->unload == 0) { 1863 if ((instance->unload == 0) &&
1864 ((instance->issuepend_done == 1))) {
1571 struct megasas_aen_event *ev; 1865 struct megasas_aen_event *ev;
1572 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 1866 ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
1573 if (!ev) { 1867 if (!ev) {
@@ -1662,6 +1956,9 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
1662 struct megasas_header *hdr = &cmd->frame->hdr; 1956 struct megasas_header *hdr = &cmd->frame->hdr;
1663 unsigned long flags; 1957 unsigned long flags;
1664 1958
1959 /* flag for the retry reset */
1960 cmd->retry_for_fw_reset = 0;
1961
1665 if (cmd->scmd) 1962 if (cmd->scmd)
1666 cmd->scmd->SCp.ptr = NULL; 1963 cmd->scmd->SCp.ptr = NULL;
1667 1964
@@ -1782,39 +2079,301 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
1782} 2079}
1783 2080
1784/** 2081/**
2082 * megasas_issue_pending_cmds_again - issue all pending cmds
2083 * in FW again because of the fw reset
2084 * @instance: Adapter soft state
2085 */
2086static inline void
2087megasas_issue_pending_cmds_again(struct megasas_instance *instance)
2088{
2089 struct megasas_cmd *cmd;
2090 struct list_head clist_local;
2091 union megasas_evt_class_locale class_locale;
2092 unsigned long flags;
2093 u32 seq_num;
2094
2095 INIT_LIST_HEAD(&clist_local);
2096 spin_lock_irqsave(&instance->hba_lock, flags);
2097 list_splice_init(&instance->internal_reset_pending_q, &clist_local);
2098 spin_unlock_irqrestore(&instance->hba_lock, flags);
2099
2100 while (!list_empty(&clist_local)) {
2101 cmd = list_entry((&clist_local)->next,
2102 struct megasas_cmd, list);
2103 list_del_init(&cmd->list);
2104
2105 if (cmd->sync_cmd || cmd->scmd) {
2106 printk(KERN_NOTICE "megaraid_sas: command %p, %p:%d"
2107 "detected to be pending while HBA reset.\n",
2108 cmd, cmd->scmd, cmd->sync_cmd);
2109
2110 cmd->retry_for_fw_reset++;
2111
2112 if (cmd->retry_for_fw_reset == 3) {
2113 printk(KERN_NOTICE "megaraid_sas: cmd %p, %p:%d"
2114 "was tried multiple times during reset."
2115 "Shutting down the HBA\n",
2116 cmd, cmd->scmd, cmd->sync_cmd);
2117 megaraid_sas_kill_hba(instance);
2118
2119 instance->adprecovery =
2120 MEGASAS_HW_CRITICAL_ERROR;
2121 return;
2122 }
2123 }
2124
2125 if (cmd->sync_cmd == 1) {
2126 if (cmd->scmd) {
2127 printk(KERN_NOTICE "megaraid_sas: unexpected"
2128 "cmd attached to internal command!\n");
2129 }
2130 printk(KERN_NOTICE "megasas: %p synchronous cmd"
2131 "on the internal reset queue,"
2132 "issue it again.\n", cmd);
2133 cmd->cmd_status = ENODATA;
2134 instance->instancet->fire_cmd(instance,
2135 cmd->frame_phys_addr ,
2136 0, instance->reg_set);
2137 } else if (cmd->scmd) {
2138 printk(KERN_NOTICE "megasas: %p scsi cmd [%02x],%#lx"
2139 "detected on the internal queue, issue again.\n",
2140 cmd, cmd->scmd->cmnd[0], cmd->scmd->serial_number);
2141
2142 atomic_inc(&instance->fw_outstanding);
2143 instance->instancet->fire_cmd(instance,
2144 cmd->frame_phys_addr,
2145 cmd->frame_count-1, instance->reg_set);
2146 } else {
2147 printk(KERN_NOTICE "megasas: %p unexpected cmd on the"
2148 "internal reset defer list while re-issue!!\n",
2149 cmd);
2150 }
2151 }
2152
2153 if (instance->aen_cmd) {
2154 printk(KERN_NOTICE "megaraid_sas: aen_cmd in def process\n");
2155 megasas_return_cmd(instance, instance->aen_cmd);
2156
2157 instance->aen_cmd = NULL;
2158 }
2159
2160 /*
2161 * Initiate AEN (Asynchronous Event Notification)
2162 */
2163 seq_num = instance->last_seq_num;
2164 class_locale.members.reserved = 0;
2165 class_locale.members.locale = MR_EVT_LOCALE_ALL;
2166 class_locale.members.class = MR_EVT_CLASS_DEBUG;
2167
2168 megasas_register_aen(instance, seq_num, class_locale.word);
2169}
2170
2171/**
2172 * Move the internal reset pending commands to a deferred queue.
2173 *
2174 * We move the commands pending at internal reset time to a
2175 * pending queue. This queue would be flushed after successful
2176 * completion of the internal reset sequence. if the internal reset
2177 * did not complete in time, the kernel reset handler would flush
2178 * these commands.
2179 **/
2180static void
2181megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
2182{
2183 struct megasas_cmd *cmd;
2184 int i;
2185 u32 max_cmd = instance->max_fw_cmds;
2186 u32 defer_index;
2187 unsigned long flags;
2188
2189 defer_index = 0;
2190 spin_lock_irqsave(&instance->cmd_pool_lock, flags);
2191 for (i = 0; i < max_cmd; i++) {
2192 cmd = instance->cmd_list[i];
2193 if (cmd->sync_cmd == 1 || cmd->scmd) {
2194 printk(KERN_NOTICE "megasas: moving cmd[%d]:%p:%d:%p"
2195 "on the defer queue as internal\n",
2196 defer_index, cmd, cmd->sync_cmd, cmd->scmd);
2197
2198 if (!list_empty(&cmd->list)) {
2199 printk(KERN_NOTICE "megaraid_sas: ERROR while"
2200 " moving this cmd:%p, %d %p, it was"
2201 "discovered on some list?\n",
2202 cmd, cmd->sync_cmd, cmd->scmd);
2203
2204 list_del_init(&cmd->list);
2205 }
2206 defer_index++;
2207 list_add_tail(&cmd->list,
2208 &instance->internal_reset_pending_q);
2209 }
2210 }
2211 spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
2212}
2213
2214
2215static void
2216process_fw_state_change_wq(struct work_struct *work)
2217{
2218 struct megasas_instance *instance =
2219 container_of(work, struct megasas_instance, work_init);
2220 u32 wait;
2221 unsigned long flags;
2222
2223 if (instance->adprecovery != MEGASAS_ADPRESET_SM_INFAULT) {
2224 printk(KERN_NOTICE "megaraid_sas: error, recovery st %x \n",
2225 instance->adprecovery);
2226 return ;
2227 }
2228
2229 if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) {
2230 printk(KERN_NOTICE "megaraid_sas: FW detected to be in fault"
2231 "state, restarting it...\n");
2232
2233 instance->instancet->disable_intr(instance->reg_set);
2234 atomic_set(&instance->fw_outstanding, 0);
2235
2236 atomic_set(&instance->fw_reset_no_pci_access, 1);
2237 instance->instancet->adp_reset(instance, instance->reg_set);
2238 atomic_set(&instance->fw_reset_no_pci_access, 0 );
2239
2240 printk(KERN_NOTICE "megaraid_sas: FW restarted successfully,"
2241 "initiating next stage...\n");
2242
2243 printk(KERN_NOTICE "megaraid_sas: HBA recovery state machine,"
2244 "state 2 starting...\n");
2245
2246 /*waitting for about 20 second before start the second init*/
2247 for (wait = 0; wait < 30; wait++) {
2248 msleep(1000);
2249 }
2250
2251 if (megasas_transition_to_ready(instance)) {
2252 printk(KERN_NOTICE "megaraid_sas:adapter not ready\n");
2253
2254 megaraid_sas_kill_hba(instance);
2255 instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
2256 return ;
2257 }
2258
2259 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
2260 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
2261 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
2262 ) {
2263 *instance->consumer = *instance->producer;
2264 } else {
2265 *instance->consumer = 0;
2266 *instance->producer = 0;
2267 }
2268
2269 megasas_issue_init_mfi(instance);
2270
2271 spin_lock_irqsave(&instance->hba_lock, flags);
2272 instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
2273 spin_unlock_irqrestore(&instance->hba_lock, flags);
2274 instance->instancet->enable_intr(instance->reg_set);
2275
2276 megasas_issue_pending_cmds_again(instance);
2277 instance->issuepend_done = 1;
2278 }
2279 return ;
2280}
2281
2282/**
1785 * megasas_deplete_reply_queue - Processes all completed commands 2283 * megasas_deplete_reply_queue - Processes all completed commands
1786 * @instance: Adapter soft state 2284 * @instance: Adapter soft state
1787 * @alt_status: Alternate status to be returned to 2285 * @alt_status: Alternate status to be returned to
1788 * SCSI mid-layer instead of the status 2286 * SCSI mid-layer instead of the status
1789 * returned by the FW 2287 * returned by the FW
2288 * Note: this must be called with hba lock held
1790 */ 2289 */
1791static int 2290static int
1792megasas_deplete_reply_queue(struct megasas_instance *instance, u8 alt_status) 2291megasas_deplete_reply_queue(struct megasas_instance *instance,
2292 u8 alt_status)
1793{ 2293{
1794 /* 2294 u32 mfiStatus;
1795 * Check if it is our interrupt 2295 u32 fw_state;
1796 * Clear the interrupt 2296
1797 */ 2297 if ((mfiStatus = instance->instancet->check_reset(instance,
1798 if(instance->instancet->clear_intr(instance->reg_set)) 2298 instance->reg_set)) == 1) {
2299 return IRQ_HANDLED;
2300 }
2301
2302 if ((mfiStatus = instance->instancet->clear_intr(
2303 instance->reg_set)
2304 ) == 0) {
1799 return IRQ_NONE; 2305 return IRQ_NONE;
2306 }
2307
2308 instance->mfiStatus = mfiStatus;
2309
2310 if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
2311 fw_state = instance->instancet->read_fw_status_reg(
2312 instance->reg_set) & MFI_STATE_MASK;
2313
2314 if (fw_state != MFI_STATE_FAULT) {
2315 printk(KERN_NOTICE "megaraid_sas: fw state:%x\n",
2316 fw_state);
2317 }
2318
2319 if ((fw_state == MFI_STATE_FAULT) &&
2320 (instance->disableOnlineCtrlReset == 0)) {
2321 printk(KERN_NOTICE "megaraid_sas: wait adp restart\n");
2322
2323 if ((instance->pdev->device ==
2324 PCI_DEVICE_ID_LSI_SAS1064R) ||
2325 (instance->pdev->device ==
2326 PCI_DEVICE_ID_DELL_PERC5) ||
2327 (instance->pdev->device ==
2328 PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
2329
2330 *instance->consumer =
2331 MEGASAS_ADPRESET_INPROG_SIGN;
2332 }
2333
2334
2335 instance->instancet->disable_intr(instance->reg_set);
2336 instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
2337 instance->issuepend_done = 0;
2338
2339 atomic_set(&instance->fw_outstanding, 0);
2340 megasas_internal_reset_defer_cmds(instance);
2341
2342 printk(KERN_NOTICE "megasas: fwState=%x, stage:%d\n",
2343 fw_state, instance->adprecovery);
2344
2345 schedule_work(&instance->work_init);
2346 return IRQ_HANDLED;
2347
2348 } else {
2349 printk(KERN_NOTICE "megasas: fwstate:%x, dis_OCR=%x\n",
2350 fw_state, instance->disableOnlineCtrlReset);
2351 }
2352 }
1800 2353
1801 if (instance->hw_crit_error)
1802 goto out_done;
1803 /*
1804 * Schedule the tasklet for cmd completion
1805 */
1806 tasklet_schedule(&instance->isr_tasklet); 2354 tasklet_schedule(&instance->isr_tasklet);
1807out_done:
1808 return IRQ_HANDLED; 2355 return IRQ_HANDLED;
1809} 2356}
1810
1811/** 2357/**
1812 * megasas_isr - isr entry point 2358 * megasas_isr - isr entry point
1813 */ 2359 */
1814static irqreturn_t megasas_isr(int irq, void *devp) 2360static irqreturn_t megasas_isr(int irq, void *devp)
1815{ 2361{
1816 return megasas_deplete_reply_queue((struct megasas_instance *)devp, 2362 struct megasas_instance *instance;
1817 DID_OK); 2363 unsigned long flags;
2364 irqreturn_t rc;
2365
2366 if (atomic_read(
2367 &(((struct megasas_instance *)devp)->fw_reset_no_pci_access)))
2368 return IRQ_HANDLED;
2369
2370 instance = (struct megasas_instance *)devp;
2371
2372 spin_lock_irqsave(&instance->hba_lock, flags);
2373 rc = megasas_deplete_reply_queue(instance, DID_OK);
2374 spin_unlock_irqrestore(&instance->hba_lock, flags);
2375
2376 return rc;
1818} 2377}
1819 2378
1820/** 2379/**
@@ -1971,7 +2530,7 @@ megasas_transition_to_ready(struct megasas_instance* instance)
1971 "in %d secs\n", fw_state, max_wait); 2530 "in %d secs\n", fw_state, max_wait);
1972 return -ENODEV; 2531 return -ENODEV;
1973 } 2532 }
1974 }; 2533 }
1975 printk(KERN_INFO "megasas: FW now in Ready state\n"); 2534 printk(KERN_INFO "megasas: FW now in Ready state\n");
1976 2535
1977 return 0; 2536 return 0;
@@ -2053,6 +2612,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
2053 */ 2612 */
2054 sgl_sz = sge_sz * instance->max_num_sge; 2613 sgl_sz = sge_sz * instance->max_num_sge;
2055 frame_count = (sgl_sz + MEGAMFI_FRAME_SIZE - 1) / MEGAMFI_FRAME_SIZE; 2614 frame_count = (sgl_sz + MEGAMFI_FRAME_SIZE - 1) / MEGAMFI_FRAME_SIZE;
2615 frame_count = 15;
2056 2616
2057 /* 2617 /*
2058 * We need one extra frame for the MFI command 2618 * We need one extra frame for the MFI command
@@ -2200,6 +2760,7 @@ static int megasas_alloc_cmds(struct megasas_instance *instance)
2200 cmd = instance->cmd_list[i]; 2760 cmd = instance->cmd_list[i];
2201 memset(cmd, 0, sizeof(struct megasas_cmd)); 2761 memset(cmd, 0, sizeof(struct megasas_cmd));
2202 cmd->index = i; 2762 cmd->index = i;
2763 cmd->scmd = NULL;
2203 cmd->instance = instance; 2764 cmd->instance = instance;
2204 2765
2205 list_add_tail(&cmd->list, &instance->cmd_pool); 2766 list_add_tail(&cmd->list, &instance->cmd_pool);
@@ -2367,7 +2928,7 @@ megasas_get_ld_list(struct megasas_instance *instance)
2367 2928
2368 /* the following function will get the instance PD LIST */ 2929 /* the following function will get the instance PD LIST */
2369 2930
2370 if ((ret == 0) && (ci->ldCount < MAX_LOGICAL_DRIVES)) { 2931 if ((ret == 0) && (ci->ldCount <= MAX_LOGICAL_DRIVES)) {
2371 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 2932 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2372 2933
2373 for (ld_index = 0; ld_index < ci->ldCount; ld_index++) { 2934 for (ld_index = 0; ld_index < ci->ldCount; ld_index++) {
@@ -2681,6 +3242,21 @@ static int megasas_init_mfi(struct megasas_instance *instance)
2681 if (megasas_issue_init_mfi(instance)) 3242 if (megasas_issue_init_mfi(instance))
2682 goto fail_fw_init; 3243 goto fail_fw_init;
2683 3244
3245 instance->fw_support_ieee = 0;
3246 instance->fw_support_ieee =
3247 (instance->instancet->read_fw_status_reg(reg_set) &
3248 0x04000000);
3249
3250 printk(KERN_NOTICE "megasas_init_mfi: fw_support_ieee=%d",
3251 instance->fw_support_ieee);
3252
3253 if (instance->fw_support_ieee)
3254 instance->flag_ieee = 1;
3255
3256 /** for passthrough
3257 * the following function will get the PD LIST.
3258 */
3259
2684 memset(instance->pd_list, 0 , 3260 memset(instance->pd_list, 0 ,
2685 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); 3261 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
2686 megasas_get_pd_list(instance); 3262 megasas_get_pd_list(instance);
@@ -2707,6 +3283,8 @@ static int megasas_init_mfi(struct megasas_instance *instance)
2707 max_sectors_2 = ctrl_info->max_request_size; 3283 max_sectors_2 = ctrl_info->max_request_size;
2708 3284
2709 tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2); 3285 tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
3286 instance->disableOnlineCtrlReset =
3287 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
2710 } 3288 }
2711 3289
2712 instance->max_sectors_per_req = instance->max_num_sge * 3290 instance->max_sectors_per_req = instance->max_num_sge *
@@ -2928,6 +3506,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
2928 dcmd->flags = MFI_FRAME_DIR_READ; 3506 dcmd->flags = MFI_FRAME_DIR_READ;
2929 dcmd->timeout = 0; 3507 dcmd->timeout = 0;
2930 dcmd->pad_0 = 0; 3508 dcmd->pad_0 = 0;
3509 instance->last_seq_num = seq_num;
2931 dcmd->data_xfer_len = sizeof(struct megasas_evt_detail); 3510 dcmd->data_xfer_len = sizeof(struct megasas_evt_detail);
2932 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; 3511 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
2933 dcmd->mbox.w[0] = seq_num; 3512 dcmd->mbox.w[0] = seq_num;
@@ -3096,6 +3675,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3096 3675
3097 instance = (struct megasas_instance *)host->hostdata; 3676 instance = (struct megasas_instance *)host->hostdata;
3098 memset(instance, 0, sizeof(*instance)); 3677 memset(instance, 0, sizeof(*instance));
3678 atomic_set( &instance->fw_reset_no_pci_access, 0 );
3099 3679
3100 instance->producer = pci_alloc_consistent(pdev, sizeof(u32), 3680 instance->producer = pci_alloc_consistent(pdev, sizeof(u32),
3101 &instance->producer_h); 3681 &instance->producer_h);
@@ -3113,6 +3693,9 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3113 megasas_poll_wait_aen = 0; 3693 megasas_poll_wait_aen = 0;
3114 instance->flag_ieee = 0; 3694 instance->flag_ieee = 0;
3115 instance->ev = NULL; 3695 instance->ev = NULL;
3696 instance->issuepend_done = 1;
3697 instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
3698 megasas_poll_wait_aen = 0;
3116 3699
3117 instance->evt_detail = pci_alloc_consistent(pdev, 3700 instance->evt_detail = pci_alloc_consistent(pdev,
3118 sizeof(struct 3701 sizeof(struct
@@ -3129,6 +3712,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3129 * Initialize locks and queues 3712 * Initialize locks and queues
3130 */ 3713 */
3131 INIT_LIST_HEAD(&instance->cmd_pool); 3714 INIT_LIST_HEAD(&instance->cmd_pool);
3715 INIT_LIST_HEAD(&instance->internal_reset_pending_q);
3132 3716
3133 atomic_set(&instance->fw_outstanding,0); 3717 atomic_set(&instance->fw_outstanding,0);
3134 3718
@@ -3136,7 +3720,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3136 init_waitqueue_head(&instance->abort_cmd_wait_q); 3720 init_waitqueue_head(&instance->abort_cmd_wait_q);
3137 3721
3138 spin_lock_init(&instance->cmd_pool_lock); 3722 spin_lock_init(&instance->cmd_pool_lock);
3139 spin_lock_init(&instance->fire_lock); 3723 spin_lock_init(&instance->hba_lock);
3140 spin_lock_init(&instance->completion_lock); 3724 spin_lock_init(&instance->completion_lock);
3141 spin_lock_init(&poll_aen_lock); 3725 spin_lock_init(&poll_aen_lock);
3142 3726
@@ -3161,6 +3745,9 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3161 instance->flag = 0; 3745 instance->flag = 0;
3162 instance->unload = 1; 3746 instance->unload = 1;
3163 instance->last_time = 0; 3747 instance->last_time = 0;
3748 instance->disableOnlineCtrlReset = 1;
3749
3750 INIT_WORK(&instance->work_init, process_fw_state_change_wq);
3164 3751
3165 /* 3752 /*
3166 * Initialize MFI Firmware 3753 * Initialize MFI Firmware
@@ -3252,6 +3839,9 @@ static void megasas_flush_cache(struct megasas_instance *instance)
3252 struct megasas_cmd *cmd; 3839 struct megasas_cmd *cmd;
3253 struct megasas_dcmd_frame *dcmd; 3840 struct megasas_dcmd_frame *dcmd;
3254 3841
3842 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
3843 return;
3844
3255 cmd = megasas_get_cmd(instance); 3845 cmd = megasas_get_cmd(instance);
3256 3846
3257 if (!cmd) 3847 if (!cmd)
@@ -3289,6 +3879,9 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
3289 struct megasas_cmd *cmd; 3879 struct megasas_cmd *cmd;
3290 struct megasas_dcmd_frame *dcmd; 3880 struct megasas_dcmd_frame *dcmd;
3291 3881
3882 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
3883 return;
3884
3292 cmd = megasas_get_cmd(instance); 3885 cmd = megasas_get_cmd(instance);
3293 3886
3294 if (!cmd) 3887 if (!cmd)
@@ -3779,6 +4372,9 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
3779 struct megasas_iocpacket *ioc; 4372 struct megasas_iocpacket *ioc;
3780 struct megasas_instance *instance; 4373 struct megasas_instance *instance;
3781 int error; 4374 int error;
4375 int i;
4376 unsigned long flags;
4377 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
3782 4378
3783 ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); 4379 ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
3784 if (!ioc) 4380 if (!ioc)
@@ -3795,8 +4391,8 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
3795 goto out_kfree_ioc; 4391 goto out_kfree_ioc;
3796 } 4392 }
3797 4393
3798 if (instance->hw_crit_error == 1) { 4394 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
3799 printk(KERN_DEBUG "Controller in Crit ERROR\n"); 4395 printk(KERN_ERR "Controller in crit error\n");
3800 error = -ENODEV; 4396 error = -ENODEV;
3801 goto out_kfree_ioc; 4397 goto out_kfree_ioc;
3802 } 4398 }
@@ -3813,6 +4409,35 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
3813 error = -ERESTARTSYS; 4409 error = -ERESTARTSYS;
3814 goto out_kfree_ioc; 4410 goto out_kfree_ioc;
3815 } 4411 }
4412
4413 for (i = 0; i < wait_time; i++) {
4414
4415 spin_lock_irqsave(&instance->hba_lock, flags);
4416 if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
4417 spin_unlock_irqrestore(&instance->hba_lock, flags);
4418 break;
4419 }
4420 spin_unlock_irqrestore(&instance->hba_lock, flags);
4421
4422 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
4423 printk(KERN_NOTICE "megasas: waiting"
4424 "for controller reset to finish\n");
4425 }
4426
4427 msleep(1000);
4428 }
4429
4430 spin_lock_irqsave(&instance->hba_lock, flags);
4431 if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
4432 spin_unlock_irqrestore(&instance->hba_lock, flags);
4433
4434 printk(KERN_ERR "megaraid_sas: timed out while"
4435 "waiting for HBA to recover\n");
4436 error = -ENODEV;
4437 goto out_kfree_ioc;
4438 }
4439 spin_unlock_irqrestore(&instance->hba_lock, flags);
4440
3816 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); 4441 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
3817 up(&instance->ioctl_sem); 4442 up(&instance->ioctl_sem);
3818 4443
@@ -3826,6 +4451,9 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
3826 struct megasas_instance *instance; 4451 struct megasas_instance *instance;
3827 struct megasas_aen aen; 4452 struct megasas_aen aen;
3828 int error; 4453 int error;
4454 int i;
4455 unsigned long flags;
4456 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
3829 4457
3830 if (file->private_data != file) { 4458 if (file->private_data != file) {
3831 printk(KERN_DEBUG "megasas: fasync_helper was not " 4459 printk(KERN_DEBUG "megasas: fasync_helper was not "
@@ -3841,14 +4469,42 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
3841 if (!instance) 4469 if (!instance)
3842 return -ENODEV; 4470 return -ENODEV;
3843 4471
3844 if (instance->hw_crit_error == 1) { 4472 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
3845 error = -ENODEV; 4473 return -ENODEV;
3846 } 4474 }
3847 4475
3848 if (instance->unload == 1) { 4476 if (instance->unload == 1) {
3849 return -ENODEV; 4477 return -ENODEV;
3850 } 4478 }
3851 4479
4480 for (i = 0; i < wait_time; i++) {
4481
4482 spin_lock_irqsave(&instance->hba_lock, flags);
4483 if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
4484 spin_unlock_irqrestore(&instance->hba_lock,
4485 flags);
4486 break;
4487 }
4488
4489 spin_unlock_irqrestore(&instance->hba_lock, flags);
4490
4491 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
4492 printk(KERN_NOTICE "megasas: waiting for"
4493 "controller reset to finish\n");
4494 }
4495
4496 msleep(1000);
4497 }
4498
4499 spin_lock_irqsave(&instance->hba_lock, flags);
4500 if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
4501 spin_unlock_irqrestore(&instance->hba_lock, flags);
4502 printk(KERN_ERR "megaraid_sas: timed out while waiting"
4503 "for HBA to recover.\n");
4504 return -ENODEV;
4505 }
4506 spin_unlock_irqrestore(&instance->hba_lock, flags);
4507
3852 mutex_lock(&instance->aen_mutex); 4508 mutex_lock(&instance->aen_mutex);
3853 error = megasas_register_aen(instance, aen.seq_num, 4509 error = megasas_register_aen(instance, aen.seq_num,
3854 aen.class_locale_word); 4510 aen.class_locale_word);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 9d8b6bf605aa..16a4f68a34b0 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -60,6 +60,7 @@
60#define MFI_STATE_READY 0xB0000000 60#define MFI_STATE_READY 0xB0000000
61#define MFI_STATE_OPERATIONAL 0xC0000000 61#define MFI_STATE_OPERATIONAL 0xC0000000
62#define MFI_STATE_FAULT 0xF0000000 62#define MFI_STATE_FAULT 0xF0000000
63#define MFI_RESET_REQUIRED 0x00000001
63 64
64#define MEGAMFI_FRAME_SIZE 64 65#define MEGAMFI_FRAME_SIZE 64
65 66
@@ -73,6 +74,12 @@
73 * HOTPLUG : Resume from Hotplug 74 * HOTPLUG : Resume from Hotplug
74 * MFI_STOP_ADP : Send signal to FW to stop processing 75 * MFI_STOP_ADP : Send signal to FW to stop processing
75 */ 76 */
77#define WRITE_SEQUENCE_OFFSET (0x0000000FC) /* I20 */
78#define HOST_DIAGNOSTIC_OFFSET (0x000000F8) /* I20 */
79#define DIAG_WRITE_ENABLE (0x00000080)
80#define DIAG_RESET_ADAPTER (0x00000004)
81
82#define MFI_ADP_RESET 0x00000040
76#define MFI_INIT_ABORT 0x00000001 83#define MFI_INIT_ABORT 0x00000001
77#define MFI_INIT_READY 0x00000002 84#define MFI_INIT_READY 0x00000002
78#define MFI_INIT_MFIMODE 0x00000004 85#define MFI_INIT_MFIMODE 0x00000004
@@ -402,8 +409,40 @@ struct megasas_ctrl_prop {
402 u16 ecc_bucket_leak_rate; 409 u16 ecc_bucket_leak_rate;
403 u8 restore_hotspare_on_insertion; 410 u8 restore_hotspare_on_insertion;
404 u8 expose_encl_devices; 411 u8 expose_encl_devices;
405 u8 reserved[38]; 412 u8 maintainPdFailHistory;
413 u8 disallowHostRequestReordering;
414 u8 abortCCOnError;
415 u8 loadBalanceMode;
416 u8 disableAutoDetectBackplane;
417
418 u8 snapVDSpace;
419
420 /*
421 * Add properties that can be controlled by
422 * a bit in the following structure.
423 */
406 424
425 struct {
426 u32 copyBackDisabled : 1;
427 u32 SMARTerEnabled : 1;
428 u32 prCorrectUnconfiguredAreas : 1;
429 u32 useFdeOnly : 1;
430 u32 disableNCQ : 1;
431 u32 SSDSMARTerEnabled : 1;
432 u32 SSDPatrolReadEnabled : 1;
433 u32 enableSpinDownUnconfigured : 1;
434 u32 autoEnhancedImport : 1;
435 u32 enableSecretKeyControl : 1;
436 u32 disableOnlineCtrlReset : 1;
437 u32 allowBootWithPinnedCache : 1;
438 u32 disableSpinDownHS : 1;
439 u32 enableJBOD : 1;
440 u32 reserved :18;
441 } OnOffProperties;
442 u8 autoSnapVDSpace;
443 u8 viewSpace;
444 u16 spinDownTime;
445 u8 reserved[24];
407} __packed; 446} __packed;
408 447
409/* 448/*
@@ -704,6 +743,12 @@ struct megasas_ctrl_info {
704 */ 743 */
705#define IS_DMA64 (sizeof(dma_addr_t) == 8) 744#define IS_DMA64 (sizeof(dma_addr_t) == 8)
706 745
746#define MFI_XSCALE_OMR0_CHANGE_INTERRUPT 0x00000001
747
748#define MFI_INTR_FLAG_REPLY_MESSAGE 0x00000001
749#define MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE 0x00000002
750#define MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT 0x00000004
751
707#define MFI_OB_INTR_STATUS_MASK 0x00000002 752#define MFI_OB_INTR_STATUS_MASK 0x00000002
708#define MFI_POLL_TIMEOUT_SECS 60 753#define MFI_POLL_TIMEOUT_SECS 60
709#define MEGASAS_COMPLETION_TIMER_INTERVAL (HZ/10) 754#define MEGASAS_COMPLETION_TIMER_INTERVAL (HZ/10)
@@ -714,6 +759,9 @@ struct megasas_ctrl_info {
714#define MFI_REPLY_SKINNY_MESSAGE_INTERRUPT 0x40000000 759#define MFI_REPLY_SKINNY_MESSAGE_INTERRUPT 0x40000000
715#define MFI_SKINNY_ENABLE_INTERRUPT_MASK (0x00000001) 760#define MFI_SKINNY_ENABLE_INTERRUPT_MASK (0x00000001)
716 761
762#define MFI_1068_PCSR_OFFSET 0x84
763#define MFI_1068_FW_HANDSHAKE_OFFSET 0x64
764#define MFI_1068_FW_READY 0xDDDD0000
717/* 765/*
718* register set for both 1068 and 1078 controllers 766* register set for both 1068 and 1078 controllers
719* structure extended for 1078 registers 767* structure extended for 1078 registers
@@ -755,8 +803,10 @@ struct megasas_register_set {
755 u32 inbound_high_queue_port ; /*00C4h*/ 803 u32 inbound_high_queue_port ; /*00C4h*/
756 804
757 u32 reserved_5; /*00C8h*/ 805 u32 reserved_5; /*00C8h*/
758 u32 index_registers[820]; /*00CCh*/ 806 u32 res_6[11]; /*CCh*/
759 807 u32 host_diag;
808 u32 seq_offset;
809 u32 index_registers[807]; /*00CCh*/
760} __attribute__ ((packed)); 810} __attribute__ ((packed));
761 811
762struct megasas_sge32 { 812struct megasas_sge32 {
@@ -1226,11 +1276,12 @@ struct megasas_instance {
1226 1276
1227 struct megasas_cmd **cmd_list; 1277 struct megasas_cmd **cmd_list;
1228 struct list_head cmd_pool; 1278 struct list_head cmd_pool;
1279 /* used to sync fire the cmd to fw */
1229 spinlock_t cmd_pool_lock; 1280 spinlock_t cmd_pool_lock;
1281 /* used to sync fire the cmd to fw */
1282 spinlock_t hba_lock;
1230 /* used to synch producer, consumer ptrs in dpc */ 1283 /* used to synch producer, consumer ptrs in dpc */
1231 spinlock_t completion_lock; 1284 spinlock_t completion_lock;
1232 /* used to sync fire the cmd to fw */
1233 spinlock_t fire_lock;
1234 struct dma_pool *frame_dma_pool; 1285 struct dma_pool *frame_dma_pool;
1235 struct dma_pool *sense_dma_pool; 1286 struct dma_pool *sense_dma_pool;
1236 1287
@@ -1247,19 +1298,36 @@ struct megasas_instance {
1247 1298
1248 struct pci_dev *pdev; 1299 struct pci_dev *pdev;
1249 u32 unique_id; 1300 u32 unique_id;
1301 u32 fw_support_ieee;
1250 1302
1251 atomic_t fw_outstanding; 1303 atomic_t fw_outstanding;
1252 u32 hw_crit_error; 1304 atomic_t fw_reset_no_pci_access;
1253 1305
1254 struct megasas_instance_template *instancet; 1306 struct megasas_instance_template *instancet;
1255 struct tasklet_struct isr_tasklet; 1307 struct tasklet_struct isr_tasklet;
1308 struct work_struct work_init;
1256 1309
1257 u8 flag; 1310 u8 flag;
1258 u8 unload; 1311 u8 unload;
1259 u8 flag_ieee; 1312 u8 flag_ieee;
1313 u8 issuepend_done;
1314 u8 disableOnlineCtrlReset;
1315 u8 adprecovery;
1260 unsigned long last_time; 1316 unsigned long last_time;
1317 u32 mfiStatus;
1318 u32 last_seq_num;
1261 1319
1262 struct timer_list io_completion_timer; 1320 struct timer_list io_completion_timer;
1321 struct list_head internal_reset_pending_q;
1322};
1323
1324enum {
1325 MEGASAS_HBA_OPERATIONAL = 0,
1326 MEGASAS_ADPRESET_SM_INFAULT = 1,
1327 MEGASAS_ADPRESET_SM_FW_RESET_SUCCESS = 2,
1328 MEGASAS_ADPRESET_SM_OPERATIONAL = 3,
1329 MEGASAS_HW_CRITICAL_ERROR = 4,
1330 MEGASAS_ADPRESET_INPROG_SIGN = 0xDEADDEAD,
1263}; 1331};
1264 1332
1265struct megasas_instance_template { 1333struct megasas_instance_template {
@@ -1272,6 +1340,10 @@ struct megasas_instance_template {
1272 int (*clear_intr)(struct megasas_register_set __iomem *); 1340 int (*clear_intr)(struct megasas_register_set __iomem *);
1273 1341
1274 u32 (*read_fw_status_reg)(struct megasas_register_set __iomem *); 1342 u32 (*read_fw_status_reg)(struct megasas_register_set __iomem *);
1343 int (*adp_reset)(struct megasas_instance *, \
1344 struct megasas_register_set __iomem *);
1345 int (*check_reset)(struct megasas_instance *, \
1346 struct megasas_register_set __iomem *);
1275}; 1347};
1276 1348
1277#define MEGASAS_IS_LOGICAL(scp) \ 1349#define MEGASAS_IS_LOGICAL(scp) \
@@ -1291,7 +1363,9 @@ struct megasas_cmd {
1291 u32 index; 1363 u32 index;
1292 u8 sync_cmd; 1364 u8 sync_cmd;
1293 u8 cmd_status; 1365 u8 cmd_status;
1294 u16 abort_aen; 1366 u8 abort_aen;
1367 u8 retry_for_fw_reset;
1368
1295 1369
1296 struct list_head list; 1370 struct list_head list;
1297 struct scsi_cmnd *scmd; 1371 struct scsi_cmnd *scmd;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 57bcd5c9dcff..12faf64f91b0 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -534,7 +534,7 @@ _base_display_event_data(struct MPT2SAS_ADAPTER *ioc,
534 if (event_data->DiscoveryStatus) 534 if (event_data->DiscoveryStatus)
535 printk("discovery_status(0x%08x)", 535 printk("discovery_status(0x%08x)",
536 le32_to_cpu(event_data->DiscoveryStatus)); 536 le32_to_cpu(event_data->DiscoveryStatus));
537 printk("\n"); 537 printk("\n");
538 return; 538 return;
539 } 539 }
540 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: 540 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
diff --git a/drivers/scsi/pcmcia/Kconfig b/drivers/scsi/pcmcia/Kconfig
index 53857c6b6d4d..ecc855c550aa 100644
--- a/drivers/scsi/pcmcia/Kconfig
+++ b/drivers/scsi/pcmcia/Kconfig
@@ -11,7 +11,6 @@ if SCSI_LOWLEVEL_PCMCIA && SCSI && PCMCIA && m
11 11
12config PCMCIA_AHA152X 12config PCMCIA_AHA152X
13 tristate "Adaptec AHA152X PCMCIA support" 13 tristate "Adaptec AHA152X PCMCIA support"
14 depends on !64BIT
15 select SCSI_SPI_ATTRS 14 select SCSI_SPI_ATTRS
16 help 15 help
17 Say Y here if you intend to attach this type of PCMCIA SCSI host 16 Say Y here if you intend to attach this type of PCMCIA SCSI host
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 9793aa6afb10..d8db0137c0c7 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -4194,6 +4194,8 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
4194 4194
4195 nvmd_type = ioctl_payload->minor_function; 4195 nvmd_type = ioctl_payload->minor_function;
4196 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); 4196 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
4197 if (!fw_control_context)
4198 return -ENOMEM;
4197 fw_control_context->usrAddr = (u8 *)&ioctl_payload->func_specific[0]; 4199 fw_control_context->usrAddr = (u8 *)&ioctl_payload->func_specific[0];
4198 fw_control_context->len = ioctl_payload->length; 4200 fw_control_context->len = ioctl_payload->length;
4199 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4201 circularQ = &pm8001_ha->inbnd_q_tbl[0];
@@ -4272,6 +4274,8 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
4272 4274
4273 nvmd_type = ioctl_payload->minor_function; 4275 nvmd_type = ioctl_payload->minor_function;
4274 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); 4276 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
4277 if (!fw_control_context)
4278 return -ENOMEM;
4275 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4279 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4276 memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr, 4280 memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr,
4277 ioctl_payload->func_specific, 4281 ioctl_payload->func_specific,
@@ -4381,6 +4385,8 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
4381 struct pm8001_ioctl_payload *ioctl_payload = payload; 4385 struct pm8001_ioctl_payload *ioctl_payload = payload;
4382 4386
4383 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); 4387 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
4388 if (!fw_control_context)
4389 return -ENOMEM;
4384 fw_control = (struct fw_control_info *)&ioctl_payload->func_specific[0]; 4390 fw_control = (struct fw_control_info *)&ioctl_payload->func_specific[0];
4385 if (fw_control->len != 0) { 4391 if (fw_control->len != 0) {
4386 if (pm8001_mem_alloc(pm8001_ha->pdev, 4392 if (pm8001_mem_alloc(pm8001_ha->pdev,
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 114bc5a81171..2ff4342ae362 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1538,22 +1538,22 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1538 if (!fcport) 1538 if (!fcport)
1539 return; 1539 return;
1540 1540
1541 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1542 return;
1543
1544 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1545 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1546 return;
1547 }
1548
1549 /* 1541 /*
1550 * Transport has effectively 'deleted' the rport, clear 1542 * Transport has effectively 'deleted' the rport, clear
1551 * all local references. 1543 * all local references.
1552 */ 1544 */
1553 spin_lock_irq(host->host_lock); 1545 spin_lock_irq(host->host_lock);
1554 fcport->rport = NULL; 1546 fcport->rport = fcport->drport = NULL;
1555 *((fc_port_t **)rport->dd_data) = NULL; 1547 *((fc_port_t **)rport->dd_data) = NULL;
1556 spin_unlock_irq(host->host_lock); 1548 spin_unlock_irq(host->host_lock);
1549
1550 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1551 return;
1552
1553 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1554 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1555 return;
1556 }
1557} 1557}
1558 1558
1559static void 1559static void
@@ -1676,14 +1676,14 @@ static void
1676qla2x00_get_host_fabric_name(struct Scsi_Host *shost) 1676qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
1677{ 1677{
1678 scsi_qla_host_t *vha = shost_priv(shost); 1678 scsi_qla_host_t *vha = shost_priv(shost);
1679 u64 node_name; 1679 uint8_t node_name[WWN_SIZE] = { 0xFF, 0xFF, 0xFF, 0xFF, \
1680 0xFF, 0xFF, 0xFF, 0xFF};
1681 u64 fabric_name = wwn_to_u64(node_name);
1680 1682
1681 if (vha->device_flags & SWITCH_FOUND) 1683 if (vha->device_flags & SWITCH_FOUND)
1682 node_name = wwn_to_u64(vha->fabric_node_name); 1684 fabric_name = wwn_to_u64(vha->fabric_node_name);
1683 else
1684 node_name = wwn_to_u64(vha->node_name);
1685 1685
1686 fc_host_fabric_name(shost) = node_name; 1686 fc_host_fabric_name(shost) = fabric_name;
1687} 1687}
1688 1688
1689static void 1689static void
@@ -1776,6 +1776,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1776 } 1776 }
1777 1777
1778 /* initialize attributes */ 1778 /* initialize attributes */
1779 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
1779 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); 1780 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1780 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); 1781 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1781 fc_host_supported_classes(vha->host) = 1782 fc_host_supported_classes(vha->host) =
@@ -1984,6 +1985,7 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
1984 struct qla_hw_data *ha = vha->hw; 1985 struct qla_hw_data *ha = vha->hw;
1985 u32 speed = FC_PORTSPEED_UNKNOWN; 1986 u32 speed = FC_PORTSPEED_UNKNOWN;
1986 1987
1988 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
1987 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); 1989 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1988 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); 1990 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1989 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 1991 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 9067629817ea..fdfbf83a6330 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1254,10 +1254,9 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1254 return -EINVAL; 1254 return -EINVAL;
1255 } 1255 }
1256 1256
1257 if (fcport->loop_id == FC_NO_LOOP_ID) { 1257 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1258 DEBUG2(printk(KERN_ERR "%s(%ld): Invalid port loop id, " 1258 DEBUG2(printk(KERN_ERR "%s(%ld): Port not online\n",
1259 "loop_id = 0x%x\n", 1259 __func__, vha->host_no));
1260 __func__, vha->host_no, fcport->loop_id));
1261 return -EINVAL; 1260 return -EINVAL;
1262 } 1261 }
1263 1262
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index d2a4e1530708..e1d3ad40a946 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -706,6 +706,11 @@ typedef struct {
706#define MBC_SET_PORT_CONFIG 0x122 /* Set port configuration */ 706#define MBC_SET_PORT_CONFIG 0x122 /* Set port configuration */
707#define MBC_GET_PORT_CONFIG 0x123 /* Get port configuration */ 707#define MBC_GET_PORT_CONFIG 0x123 /* Get port configuration */
708 708
709/*
710 * ISP81xx mailbox commands
711 */
712#define MBC_WRITE_MPI_REGISTER 0x01 /* Write MPI Register. */
713
709/* Firmware return data sizes */ 714/* Firmware return data sizes */
710#define FCAL_MAP_SIZE 128 715#define FCAL_MAP_SIZE 128
711 716
@@ -2860,6 +2865,7 @@ typedef struct scsi_qla_host {
2860#define NPIV_CONFIG_NEEDED 16 2865#define NPIV_CONFIG_NEEDED 16
2861#define ISP_UNRECOVERABLE 17 2866#define ISP_UNRECOVERABLE 17
2862#define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */ 2867#define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */
2868#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */
2863 2869
2864 uint32_t device_flags; 2870 uint32_t device_flags;
2865#define SWITCH_FOUND BIT_0 2871#define SWITCH_FOUND BIT_0
@@ -3003,6 +3009,8 @@ typedef struct scsi_qla_host {
3003 3009
3004#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr) 3010#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
3005 3011
3012#define QLA_SG_ALL 1024
3013
3006enum nexus_wait_type { 3014enum nexus_wait_type {
3007 WAIT_HOST = 0, 3015 WAIT_HOST = 0,
3008 WAIT_TARGET, 3016 WAIT_TARGET,
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 1a1b281cea33..c33dec827e1e 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -352,6 +352,8 @@ qla2x00_read_ram_word(scsi_qla_host_t *, uint32_t, uint32_t *);
352extern int 352extern int
353qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t); 353qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t);
354 354
355extern int
356qla81xx_write_mpi_register(scsi_qla_host_t *, uint16_t *);
355extern int qla2x00_get_data_rate(scsi_qla_host_t *); 357extern int qla2x00_get_data_rate(scsi_qla_host_t *);
356extern int qla24xx_set_fcp_prio(scsi_qla_host_t *, uint16_t, uint16_t, 358extern int qla24xx_set_fcp_prio(scsi_qla_host_t *, uint16_t, uint16_t,
357 uint16_t *); 359 uint16_t *);
@@ -501,7 +503,6 @@ extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
501/* PCI related functions */ 503/* PCI related functions */
502extern int qla82xx_pci_config(struct scsi_qla_host *); 504extern int qla82xx_pci_config(struct scsi_qla_host *);
503extern int qla82xx_pci_mem_read_2M(struct qla_hw_data *, u64, void *, int); 505extern int qla82xx_pci_mem_read_2M(struct qla_hw_data *, u64, void *, int);
504extern int qla82xx_pci_mem_write_2M(struct qla_hw_data *, u64, void *, int);
505extern char *qla82xx_pci_info_str(struct scsi_qla_host *, char *); 506extern char *qla82xx_pci_info_str(struct scsi_qla_host *, char *);
506extern int qla82xx_pci_region_offset(struct pci_dev *, int); 507extern int qla82xx_pci_region_offset(struct pci_dev *, int);
507extern int qla82xx_iospace_config(struct qla_hw_data *); 508extern int qla82xx_iospace_config(struct qla_hw_data *);
@@ -509,8 +510,8 @@ extern int qla82xx_iospace_config(struct qla_hw_data *);
509/* Initialization related functions */ 510/* Initialization related functions */
510extern void qla82xx_reset_chip(struct scsi_qla_host *); 511extern void qla82xx_reset_chip(struct scsi_qla_host *);
511extern void qla82xx_config_rings(struct scsi_qla_host *); 512extern void qla82xx_config_rings(struct scsi_qla_host *);
512extern int qla82xx_pinit_from_rom(scsi_qla_host_t *);
513extern void qla82xx_watchdog(scsi_qla_host_t *); 513extern void qla82xx_watchdog(scsi_qla_host_t *);
514extern int qla82xx_start_firmware(scsi_qla_host_t *);
514 515
515/* Firmware and flash related functions */ 516/* Firmware and flash related functions */
516extern int qla82xx_load_risc(scsi_qla_host_t *, uint32_t *); 517extern int qla82xx_load_risc(scsi_qla_host_t *, uint32_t *);
@@ -533,25 +534,17 @@ extern irqreturn_t qla82xx_msix_default(int, void *);
533extern irqreturn_t qla82xx_msix_rsp_q(int, void *); 534extern irqreturn_t qla82xx_msix_rsp_q(int, void *);
534extern void qla82xx_enable_intrs(struct qla_hw_data *); 535extern void qla82xx_enable_intrs(struct qla_hw_data *);
535extern void qla82xx_disable_intrs(struct qla_hw_data *); 536extern void qla82xx_disable_intrs(struct qla_hw_data *);
536extern void qla82xx_mbx_completion(scsi_qla_host_t *, uint16_t);
537extern void qla82xx_poll(int, void *); 537extern void qla82xx_poll(int, void *);
538extern void qla82xx_init_flags(struct qla_hw_data *); 538extern void qla82xx_init_flags(struct qla_hw_data *);
539 539
540/* ISP 8021 hardware related */ 540/* ISP 8021 hardware related */
541extern int qla82xx_crb_win_lock(struct qla_hw_data *); 541extern void qla82xx_set_drv_active(scsi_qla_host_t *);
542extern void qla82xx_crb_win_unlock(struct qla_hw_data *); 542extern void qla82xx_crb_win_unlock(struct qla_hw_data *);
543extern int qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *, ulong *);
544extern int qla82xx_wr_32(struct qla_hw_data *, ulong, u32); 543extern int qla82xx_wr_32(struct qla_hw_data *, ulong, u32);
545extern int qla82xx_rd_32(struct qla_hw_data *, ulong); 544extern int qla82xx_rd_32(struct qla_hw_data *, ulong);
546extern int qla82xx_rdmem(struct qla_hw_data *, u64, void *, int); 545extern int qla82xx_rdmem(struct qla_hw_data *, u64, void *, int);
547extern int qla82xx_wrmem(struct qla_hw_data *, u64, void *, int); 546extern int qla82xx_wrmem(struct qla_hw_data *, u64, void *, int);
548extern int qla82xx_check_for_bad_spd(struct qla_hw_data *);
549extern int qla82xx_load_fw(scsi_qla_host_t *);
550extern int qla82xx_rom_lock(struct qla_hw_data *);
551extern void qla82xx_rom_unlock(struct qla_hw_data *); 547extern void qla82xx_rom_unlock(struct qla_hw_data *);
552extern int qla82xx_rom_fast_read(struct qla_hw_data *, int , int *);
553extern int qla82xx_do_rom_fast_read(struct qla_hw_data *, int, int *);
554extern unsigned long qla82xx_decode_crb_addr(unsigned long);
555 548
556/* ISP 8021 IDC */ 549/* ISP 8021 IDC */
557extern void qla82xx_clear_drv_active(struct qla_hw_data *); 550extern void qla82xx_clear_drv_active(struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 9c383baebe27..3cafbef40737 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -954,6 +954,19 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
954} 954}
955 955
956/** 956/**
957 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
958 *
959 * Returns 0 on success.
960 */
961int
962qla81xx_reset_mpi(scsi_qla_host_t *vha)
963{
964 uint16_t mb[4] = {0x1010, 0, 1, 0};
965
966 return qla81xx_write_mpi_register(vha, mb);
967}
968
969/**
957 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC. 970 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
958 * @ha: HA context 971 * @ha: HA context
959 * 972 *
@@ -967,6 +980,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
967 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 980 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
968 uint32_t cnt, d2; 981 uint32_t cnt, d2;
969 uint16_t wd; 982 uint16_t wd;
983 static int abts_cnt; /* ISP abort retry counts */
970 984
971 spin_lock_irqsave(&ha->hardware_lock, flags); 985 spin_lock_irqsave(&ha->hardware_lock, flags);
972 986
@@ -1000,6 +1014,23 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
1000 barrier(); 1014 barrier();
1001 } 1015 }
1002 1016
1017 /* If required, do an MPI FW reset now */
1018 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
1019 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
1020 if (++abts_cnt < 5) {
1021 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1022 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
1023 } else {
1024 /*
1025 * We exhausted the ISP abort retries. We have to
1026 * set the board offline.
1027 */
1028 abts_cnt = 0;
1029 vha->flags.online = 0;
1030 }
1031 }
1032 }
1033
1003 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET); 1034 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
1004 RD_REG_DWORD(&reg->hccr); 1035 RD_REG_DWORD(&reg->hccr);
1005 1036
@@ -2799,6 +2830,9 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2799 if (!IS_IIDMA_CAPABLE(ha)) 2830 if (!IS_IIDMA_CAPABLE(ha))
2800 return; 2831 return;
2801 2832
2833 if (atomic_read(&fcport->state) != FCS_ONLINE)
2834 return;
2835
2802 if (fcport->fp_speed == PORT_SPEED_UNKNOWN || 2836 if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
2803 fcport->fp_speed > ha->link_data_rate) 2837 fcport->fp_speed > ha->link_data_rate)
2804 return; 2838 return;
@@ -3878,17 +3912,19 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
3878 LOOP_DOWN_TIME); 3912 LOOP_DOWN_TIME);
3879 } 3913 }
3880 3914
3881 /* Make sure for ISP 82XX IO DMA is complete */ 3915 if (!ha->flags.eeh_busy) {
3882 if (IS_QLA82XX(ha)) { 3916 /* Make sure for ISP 82XX IO DMA is complete */
3883 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, 3917 if (IS_QLA82XX(ha)) {
3884 WAIT_HOST) == QLA_SUCCESS) { 3918 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
3885 DEBUG2(qla_printk(KERN_INFO, ha, 3919 WAIT_HOST) == QLA_SUCCESS) {
3886 "Done wait for pending commands\n")); 3920 DEBUG2(qla_printk(KERN_INFO, ha,
3921 "Done wait for pending commands\n"));
3922 }
3887 } 3923 }
3888 }
3889 3924
3890 /* Requeue all commands in outstanding command list. */ 3925 /* Requeue all commands in outstanding command list. */
3891 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 3926 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
3927 }
3892} 3928}
3893 3929
3894/* 3930/*
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 28f65be19dad..e0e43d9e7ed1 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -412,8 +412,14 @@ skip_rio:
412 "Unrecoverable Hardware Error: adapter " 412 "Unrecoverable Hardware Error: adapter "
413 "marked OFFLINE!\n"); 413 "marked OFFLINE!\n");
414 vha->flags.online = 0; 414 vha->flags.online = 0;
415 } else 415 } else {
416 /* Check to see if MPI timeout occured */
417 if ((mbx & MBX_3) && (ha->flags.port0))
418 set_bit(MPI_RESET_NEEDED,
419 &vha->dpc_flags);
420
416 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 421 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
422 }
417 } else if (mb[1] == 0) { 423 } else if (mb[1] == 0) {
418 qla_printk(KERN_INFO, ha, 424 qla_printk(KERN_INFO, ha,
419 "Unrecoverable Hardware Error: adapter marked " 425 "Unrecoverable Hardware Error: adapter marked "
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index a595ec8264f8..effd8a1403d9 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -3828,8 +3828,6 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3828 3828
3829 /* Copy mailbox information */ 3829 /* Copy mailbox information */
3830 memcpy( mresp, mcp->mb, 64); 3830 memcpy( mresp, mcp->mb, 64);
3831 mresp[3] = mcp->mb[18];
3832 mresp[4] = mcp->mb[19];
3833 return rval; 3831 return rval;
3834} 3832}
3835 3833
@@ -3890,9 +3888,10 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3890 } 3888 }
3891 3889
3892 /* Copy mailbox information */ 3890 /* Copy mailbox information */
3893 memcpy( mresp, mcp->mb, 32); 3891 memcpy(mresp, mcp->mb, 64);
3894 return rval; 3892 return rval;
3895} 3893}
3894
3896int 3895int
3897qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic) 3896qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic)
3898{ 3897{
@@ -3953,6 +3952,67 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3953} 3952}
3954 3953
3955int 3954int
3955qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
3956{
3957 int rval;
3958 uint32_t stat, timer;
3959 uint16_t mb0 = 0;
3960 struct qla_hw_data *ha = vha->hw;
3961 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3962
3963 rval = QLA_SUCCESS;
3964
3965 DEBUG11(qla_printk(KERN_INFO, ha,
3966 "%s(%ld): entered.\n", __func__, vha->host_no));
3967
3968 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
3969
3970 /* Write the MBC data to the registers */
3971 WRT_REG_WORD(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
3972 WRT_REG_WORD(&reg->mailbox1, mb[0]);
3973 WRT_REG_WORD(&reg->mailbox2, mb[1]);
3974 WRT_REG_WORD(&reg->mailbox3, mb[2]);
3975 WRT_REG_WORD(&reg->mailbox4, mb[3]);
3976
3977 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
3978
3979 /* Poll for MBC interrupt */
3980 for (timer = 6000000; timer; timer--) {
3981 /* Check for pending interrupts. */
3982 stat = RD_REG_DWORD(&reg->host_status);
3983 if (stat & HSRX_RISC_INT) {
3984 stat &= 0xff;
3985
3986 if (stat == 0x1 || stat == 0x2 ||
3987 stat == 0x10 || stat == 0x11) {
3988 set_bit(MBX_INTERRUPT,
3989 &ha->mbx_cmd_flags);
3990 mb0 = RD_REG_WORD(&reg->mailbox0);
3991 WRT_REG_DWORD(&reg->hccr,
3992 HCCRX_CLR_RISC_INT);
3993 RD_REG_DWORD(&reg->hccr);
3994 break;
3995 }
3996 }
3997 udelay(5);
3998 }
3999
4000 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
4001 rval = mb0 & MBS_MASK;
4002 else
4003 rval = QLA_FUNCTION_FAILED;
4004
4005 if (rval != QLA_SUCCESS) {
4006 DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n",
4007 __func__, vha->host_no, rval, mb[0]));
4008 } else {
4009 DEBUG11(printk(KERN_INFO
4010 "%s(%ld): done.\n", __func__, vha->host_no));
4011 }
4012
4013 return rval;
4014}
4015int
3956qla2x00_get_data_rate(scsi_qla_host_t *vha) 4016qla2x00_get_data_rate(scsi_qla_host_t *vha)
3957{ 4017{
3958 int rval; 4018 int rval;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 0a71cc71eab2..8d9edfb39803 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -403,6 +403,54 @@ qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
403 return off; 403 return off;
404} 404}
405 405
406static int
407qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off)
408{
409 struct crb_128M_2M_sub_block_map *m;
410
411 if (*off >= QLA82XX_CRB_MAX)
412 return -1;
413
414 if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) {
415 *off = (*off - QLA82XX_PCI_CAMQM) +
416 QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
417 return 0;
418 }
419
420 if (*off < QLA82XX_PCI_CRBSPACE)
421 return -1;
422
423 *off -= QLA82XX_PCI_CRBSPACE;
424
425 /* Try direct map */
426 m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
427
428 if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
429 *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase;
430 return 0;
431 }
432 /* Not in direct map, use crb window */
433 return 1;
434}
435
436#define CRB_WIN_LOCK_TIMEOUT 100000000
437static int qla82xx_crb_win_lock(struct qla_hw_data *ha)
438{
439 int done = 0, timeout = 0;
440
441 while (!done) {
442 /* acquire semaphore3 from PCI HW block */
443 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
444 if (done == 1)
445 break;
446 if (timeout >= CRB_WIN_LOCK_TIMEOUT)
447 return -1;
448 timeout++;
449 }
450 qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum);
451 return 0;
452}
453
406int 454int
407qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data) 455qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data)
408{ 456{
@@ -453,24 +501,6 @@ qla82xx_rd_32(struct qla_hw_data *ha, ulong off)
453 return data; 501 return data;
454} 502}
455 503
456#define CRB_WIN_LOCK_TIMEOUT 100000000
457int qla82xx_crb_win_lock(struct qla_hw_data *ha)
458{
459 int done = 0, timeout = 0;
460
461 while (!done) {
462 /* acquire semaphore3 from PCI HW block */
463 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
464 if (done == 1)
465 break;
466 if (timeout >= CRB_WIN_LOCK_TIMEOUT)
467 return -1;
468 timeout++;
469 }
470 qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum);
471 return 0;
472}
473
474#define IDC_LOCK_TIMEOUT 100000000 504#define IDC_LOCK_TIMEOUT 100000000
475int qla82xx_idc_lock(struct qla_hw_data *ha) 505int qla82xx_idc_lock(struct qla_hw_data *ha)
476{ 506{
@@ -504,36 +534,6 @@ void qla82xx_idc_unlock(struct qla_hw_data *ha)
504 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK)); 534 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
505} 535}
506 536
507int
508qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off)
509{
510 struct crb_128M_2M_sub_block_map *m;
511
512 if (*off >= QLA82XX_CRB_MAX)
513 return -1;
514
515 if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) {
516 *off = (*off - QLA82XX_PCI_CAMQM) +
517 QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
518 return 0;
519 }
520
521 if (*off < QLA82XX_PCI_CRBSPACE)
522 return -1;
523
524 *off -= QLA82XX_PCI_CRBSPACE;
525
526 /* Try direct map */
527 m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
528
529 if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
530 *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase;
531 return 0;
532 }
533 /* Not in direct map, use crb window */
534 return 1;
535}
536
537/* PCI Windowing for DDR regions. */ 537/* PCI Windowing for DDR regions. */
538#define QLA82XX_ADDR_IN_RANGE(addr, low, high) \ 538#define QLA82XX_ADDR_IN_RANGE(addr, low, high) \
539 (((addr) <= (high)) && ((addr) >= (low))) 539 (((addr) <= (high)) && ((addr) >= (low)))
@@ -557,7 +557,7 @@ qla82xx_pci_mem_bound_check(struct qla_hw_data *ha,
557 557
558int qla82xx_pci_set_window_warning_count; 558int qla82xx_pci_set_window_warning_count;
559 559
560unsigned long 560static unsigned long
561qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr) 561qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
562{ 562{
563 int window; 563 int window;
@@ -798,7 +798,8 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
798} 798}
799 799
800#define MTU_FUDGE_FACTOR 100 800#define MTU_FUDGE_FACTOR 100
801unsigned long qla82xx_decode_crb_addr(unsigned long addr) 801static unsigned long
802qla82xx_decode_crb_addr(unsigned long addr)
802{ 803{
803 int i; 804 int i;
804 unsigned long base_addr, offset, pci_base; 805 unsigned long base_addr, offset, pci_base;
@@ -824,7 +825,7 @@ unsigned long qla82xx_decode_crb_addr(unsigned long addr)
824static long rom_max_timeout = 100; 825static long rom_max_timeout = 100;
825static long qla82xx_rom_lock_timeout = 100; 826static long qla82xx_rom_lock_timeout = 100;
826 827
827int 828static int
828qla82xx_rom_lock(struct qla_hw_data *ha) 829qla82xx_rom_lock(struct qla_hw_data *ha)
829{ 830{
830 int done = 0, timeout = 0; 831 int done = 0, timeout = 0;
@@ -842,7 +843,7 @@ qla82xx_rom_lock(struct qla_hw_data *ha)
842 return 0; 843 return 0;
843} 844}
844 845
845int 846static int
846qla82xx_wait_rom_busy(struct qla_hw_data *ha) 847qla82xx_wait_rom_busy(struct qla_hw_data *ha)
847{ 848{
848 long timeout = 0; 849 long timeout = 0;
@@ -862,7 +863,7 @@ qla82xx_wait_rom_busy(struct qla_hw_data *ha)
862 return 0; 863 return 0;
863} 864}
864 865
865int 866static int
866qla82xx_wait_rom_done(struct qla_hw_data *ha) 867qla82xx_wait_rom_done(struct qla_hw_data *ha)
867{ 868{
868 long timeout = 0; 869 long timeout = 0;
@@ -882,7 +883,7 @@ qla82xx_wait_rom_done(struct qla_hw_data *ha)
882 return 0; 883 return 0;
883} 884}
884 885
885int 886static int
886qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) 887qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
887{ 888{
888 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr); 889 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
@@ -905,7 +906,7 @@ qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
905 return 0; 906 return 0;
906} 907}
907 908
908int 909static int
909qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) 910qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
910{ 911{
911 int ret, loops = 0; 912 int ret, loops = 0;
@@ -926,7 +927,7 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
926 return ret; 927 return ret;
927} 928}
928 929
929int 930static int
930qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val) 931qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
931{ 932{
932 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR); 933 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
@@ -940,7 +941,7 @@ qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
940 return 0; 941 return 0;
941} 942}
942 943
943int 944static int
944qla82xx_flash_wait_write_finish(struct qla_hw_data *ha) 945qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
945{ 946{
946 long timeout = 0; 947 long timeout = 0;
@@ -964,7 +965,7 @@ qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
964 return ret; 965 return ret;
965} 966}
966 967
967int 968static int
968qla82xx_flash_set_write_enable(struct qla_hw_data *ha) 969qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
969{ 970{
970 uint32_t val; 971 uint32_t val;
@@ -981,7 +982,7 @@ qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
981 return 0; 982 return 0;
982} 983}
983 984
984int 985static int
985qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val) 986qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
986{ 987{
987 if (qla82xx_flash_set_write_enable(ha)) 988 if (qla82xx_flash_set_write_enable(ha))
@@ -996,7 +997,7 @@ qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
996 return qla82xx_flash_wait_write_finish(ha); 997 return qla82xx_flash_wait_write_finish(ha);
997} 998}
998 999
999int 1000static int
1000qla82xx_write_disable_flash(struct qla_hw_data *ha) 1001qla82xx_write_disable_flash(struct qla_hw_data *ha)
1001{ 1002{
1002 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI); 1003 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
@@ -1008,7 +1009,7 @@ qla82xx_write_disable_flash(struct qla_hw_data *ha)
1008 return 0; 1009 return 0;
1009} 1010}
1010 1011
1011int 1012static int
1012ql82xx_rom_lock_d(struct qla_hw_data *ha) 1013ql82xx_rom_lock_d(struct qla_hw_data *ha)
1013{ 1014{
1014 int loops = 0; 1015 int loops = 0;
@@ -1024,7 +1025,7 @@ ql82xx_rom_lock_d(struct qla_hw_data *ha)
1024 return 0;; 1025 return 0;;
1025} 1026}
1026 1027
1027int 1028static int
1028qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr, 1029qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
1029 uint32_t data) 1030 uint32_t data)
1030{ 1031{
@@ -1061,7 +1062,8 @@ done_write:
1061/* This routine does CRB initialize sequence 1062/* This routine does CRB initialize sequence
1062 * to put the ISP into operational state 1063 * to put the ISP into operational state
1063 */ 1064 */
1064int qla82xx_pinit_from_rom(scsi_qla_host_t *vha) 1065static int
1066qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1065{ 1067{
1066 int addr, val; 1068 int addr, val;
1067 int i ; 1069 int i ;
@@ -1207,7 +1209,8 @@ int qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1207 return 0; 1209 return 0;
1208} 1210}
1209 1211
1210int qla82xx_check_for_bad_spd(struct qla_hw_data *ha) 1212static int
1213qla82xx_check_for_bad_spd(struct qla_hw_data *ha)
1211{ 1214{
1212 u32 val = 0; 1215 u32 val = 0;
1213 val = qla82xx_rd_32(ha, BOOT_LOADER_DIMM_STATUS); 1216 val = qla82xx_rd_32(ha, BOOT_LOADER_DIMM_STATUS);
@@ -1225,7 +1228,116 @@ int qla82xx_check_for_bad_spd(struct qla_hw_data *ha)
1225 return 0; 1228 return 0;
1226} 1229}
1227 1230
1228int 1231static int
1232qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
1233 u64 off, void *data, int size)
1234{
1235 int i, j, ret = 0, loop, sz[2], off0;
1236 int scale, shift_amount, startword;
1237 uint32_t temp;
1238 uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
1239
1240 /*
1241 * If not MN, go check for MS or invalid.
1242 */
1243 if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1244 mem_crb = QLA82XX_CRB_QDR_NET;
1245 else {
1246 mem_crb = QLA82XX_CRB_DDR_NET;
1247 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
1248 return qla82xx_pci_mem_write_direct(ha,
1249 off, data, size);
1250 }
1251
1252 off0 = off & 0x7;
1253 sz[0] = (size < (8 - off0)) ? size : (8 - off0);
1254 sz[1] = size - sz[0];
1255
1256 off8 = off & 0xfffffff0;
1257 loop = (((off & 0xf) + size - 1) >> 4) + 1;
1258 shift_amount = 4;
1259 scale = 2;
1260 startword = (off & 0xf)/8;
1261
1262 for (i = 0; i < loop; i++) {
1263 if (qla82xx_pci_mem_read_2M(ha, off8 +
1264 (i << shift_amount), &word[i * scale], 8))
1265 return -1;
1266 }
1267
1268 switch (size) {
1269 case 1:
1270 tmpw = *((uint8_t *)data);
1271 break;
1272 case 2:
1273 tmpw = *((uint16_t *)data);
1274 break;
1275 case 4:
1276 tmpw = *((uint32_t *)data);
1277 break;
1278 case 8:
1279 default:
1280 tmpw = *((uint64_t *)data);
1281 break;
1282 }
1283
1284 if (sz[0] == 8) {
1285 word[startword] = tmpw;
1286 } else {
1287 word[startword] &=
1288 ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
1289 word[startword] |= tmpw << (off0 * 8);
1290 }
1291 if (sz[1] != 0) {
1292 word[startword+1] &= ~(~0ULL << (sz[1] * 8));
1293 word[startword+1] |= tmpw >> (sz[0] * 8);
1294 }
1295
1296 /*
1297 * don't lock here - write_wx gets the lock if each time
1298 * write_lock_irqsave(&adapter->adapter_lock, flags);
1299 * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
1300 */
1301 for (i = 0; i < loop; i++) {
1302 temp = off8 + (i << shift_amount);
1303 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
1304 temp = 0;
1305 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
1306 temp = word[i * scale] & 0xffffffff;
1307 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
1308 temp = (word[i * scale] >> 32) & 0xffffffff;
1309 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
1310 temp = word[i*scale + 1] & 0xffffffff;
1311 qla82xx_wr_32(ha, mem_crb +
1312 MIU_TEST_AGT_WRDATA_UPPER_LO, temp);
1313 temp = (word[i*scale + 1] >> 32) & 0xffffffff;
1314 qla82xx_wr_32(ha, mem_crb +
1315 MIU_TEST_AGT_WRDATA_UPPER_HI, temp);
1316
1317 temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1318 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1319 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1320 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1321
1322 for (j = 0; j < MAX_CTL_CHECK; j++) {
1323 temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1324 if ((temp & MIU_TA_CTL_BUSY) == 0)
1325 break;
1326 }
1327
1328 if (j >= MAX_CTL_CHECK) {
1329 if (printk_ratelimit())
1330 dev_err(&ha->pdev->dev,
1331 "failed to write through agent\n");
1332 ret = -1;
1333 break;
1334 }
1335 }
1336
1337 return ret;
1338}
1339
1340static int
1229qla82xx_fw_load_from_flash(struct qla_hw_data *ha) 1341qla82xx_fw_load_from_flash(struct qla_hw_data *ha)
1230{ 1342{
1231 int i; 1343 int i;
@@ -1357,114 +1469,6 @@ qla82xx_pci_mem_read_2M(struct qla_hw_data *ha,
1357 return 0; 1469 return 0;
1358} 1470}
1359 1471
1360int
1361qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
1362 u64 off, void *data, int size)
1363{
1364 int i, j, ret = 0, loop, sz[2], off0;
1365 int scale, shift_amount, startword;
1366 uint32_t temp;
1367 uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
1368
1369 /*
1370 * If not MN, go check for MS or invalid.
1371 */
1372 if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1373 mem_crb = QLA82XX_CRB_QDR_NET;
1374 else {
1375 mem_crb = QLA82XX_CRB_DDR_NET;
1376 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
1377 return qla82xx_pci_mem_write_direct(ha,
1378 off, data, size);
1379 }
1380
1381 off0 = off & 0x7;
1382 sz[0] = (size < (8 - off0)) ? size : (8 - off0);
1383 sz[1] = size - sz[0];
1384
1385 off8 = off & 0xfffffff0;
1386 loop = (((off & 0xf) + size - 1) >> 4) + 1;
1387 shift_amount = 4;
1388 scale = 2;
1389 startword = (off & 0xf)/8;
1390
1391 for (i = 0; i < loop; i++) {
1392 if (qla82xx_pci_mem_read_2M(ha, off8 +
1393 (i << shift_amount), &word[i * scale], 8))
1394 return -1;
1395 }
1396
1397 switch (size) {
1398 case 1:
1399 tmpw = *((uint8_t *)data);
1400 break;
1401 case 2:
1402 tmpw = *((uint16_t *)data);
1403 break;
1404 case 4:
1405 tmpw = *((uint32_t *)data);
1406 break;
1407 case 8:
1408 default:
1409 tmpw = *((uint64_t *)data);
1410 break;
1411 }
1412
1413 if (sz[0] == 8) {
1414 word[startword] = tmpw;
1415 } else {
1416 word[startword] &=
1417 ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
1418 word[startword] |= tmpw << (off0 * 8);
1419 }
1420 if (sz[1] != 0) {
1421 word[startword+1] &= ~(~0ULL << (sz[1] * 8));
1422 word[startword+1] |= tmpw >> (sz[0] * 8);
1423 }
1424
1425 /*
1426 * don't lock here - write_wx gets the lock if each time
1427 * write_lock_irqsave(&adapter->adapter_lock, flags);
1428 * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
1429 */
1430 for (i = 0; i < loop; i++) {
1431 temp = off8 + (i << shift_amount);
1432 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
1433 temp = 0;
1434 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
1435 temp = word[i * scale] & 0xffffffff;
1436 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
1437 temp = (word[i * scale] >> 32) & 0xffffffff;
1438 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
1439 temp = word[i*scale + 1] & 0xffffffff;
1440 qla82xx_wr_32(ha, mem_crb +
1441 MIU_TEST_AGT_WRDATA_UPPER_LO, temp);
1442 temp = (word[i*scale + 1] >> 32) & 0xffffffff;
1443 qla82xx_wr_32(ha, mem_crb +
1444 MIU_TEST_AGT_WRDATA_UPPER_HI, temp);
1445
1446 temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1447 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1448 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1449 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1450
1451 for (j = 0; j < MAX_CTL_CHECK; j++) {
1452 temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1453 if ((temp & MIU_TA_CTL_BUSY) == 0)
1454 break;
1455 }
1456
1457 if (j >= MAX_CTL_CHECK) {
1458 if (printk_ratelimit())
1459 dev_err(&ha->pdev->dev,
1460 "failed to write through agent\n");
1461 ret = -1;
1462 break;
1463 }
1464 }
1465
1466 return ret;
1467}
1468 1472
1469static struct qla82xx_uri_table_desc * 1473static struct qla82xx_uri_table_desc *
1470qla82xx_get_table_desc(const u8 *unirom, int section) 1474qla82xx_get_table_desc(const u8 *unirom, int section)
@@ -1725,7 +1729,8 @@ void qla82xx_reset_adapter(struct scsi_qla_host *vha)
1725 ha->isp_ops->disable_intrs(ha); 1729 ha->isp_ops->disable_intrs(ha);
1726} 1730}
1727 1731
1728int qla82xx_fw_load_from_blob(struct qla_hw_data *ha) 1732static int
1733qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
1729{ 1734{
1730 u64 *ptr64; 1735 u64 *ptr64;
1731 u32 i, flashaddr, size; 1736 u32 i, flashaddr, size;
@@ -1836,7 +1841,8 @@ qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type)
1836 return 0; 1841 return 0;
1837} 1842}
1838 1843
1839int qla82xx_check_cmdpeg_state(struct qla_hw_data *ha) 1844static int
1845qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
1840{ 1846{
1841 u32 val = 0; 1847 u32 val = 0;
1842 int retries = 60; 1848 int retries = 60;
@@ -1874,7 +1880,8 @@ int qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
1874 return QLA_FUNCTION_FAILED; 1880 return QLA_FUNCTION_FAILED;
1875} 1881}
1876 1882
1877int qla82xx_check_rcvpeg_state(struct qla_hw_data *ha) 1883static int
1884qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
1878{ 1885{
1879 u32 val = 0; 1886 u32 val = 0;
1880 int retries = 60; 1887 int retries = 60;
@@ -1933,7 +1940,7 @@ static struct qla82xx_legacy_intr_set legacy_intr[] = \
1933 * @ha: SCSI driver HA context 1940 * @ha: SCSI driver HA context
1934 * @mb0: Mailbox0 register 1941 * @mb0: Mailbox0 register
1935 */ 1942 */
1936void 1943static void
1937qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 1944qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1938{ 1945{
1939 uint16_t cnt; 1946 uint16_t cnt;
@@ -2257,7 +2264,7 @@ void qla82xx_init_flags(struct qla_hw_data *ha)
2257 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; 2264 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
2258} 2265}
2259 2266
2260static inline void 2267inline void
2261qla82xx_set_drv_active(scsi_qla_host_t *vha) 2268qla82xx_set_drv_active(scsi_qla_host_t *vha)
2262{ 2269{
2263 uint32_t drv_active; 2270 uint32_t drv_active;
@@ -2267,10 +2274,11 @@ qla82xx_set_drv_active(scsi_qla_host_t *vha)
2267 2274
2268 /* If reset value is all FF's, initialize DRV_ACTIVE */ 2275 /* If reset value is all FF's, initialize DRV_ACTIVE */
2269 if (drv_active == 0xffffffff) { 2276 if (drv_active == 0xffffffff) {
2270 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, 0); 2277 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE,
2278 QLA82XX_DRV_NOT_ACTIVE);
2271 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 2279 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2272 } 2280 }
2273 drv_active |= (1 << (ha->portnum * 4)); 2281 drv_active |= (QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
2274 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); 2282 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
2275} 2283}
2276 2284
@@ -2280,7 +2288,7 @@ qla82xx_clear_drv_active(struct qla_hw_data *ha)
2280 uint32_t drv_active; 2288 uint32_t drv_active;
2281 2289
2282 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 2290 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2283 drv_active &= ~(1 << (ha->portnum * 4)); 2291 drv_active &= ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
2284 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); 2292 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
2285} 2293}
2286 2294
@@ -2291,7 +2299,7 @@ qla82xx_need_reset(struct qla_hw_data *ha)
2291 int rval; 2299 int rval;
2292 2300
2293 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2301 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2294 rval = drv_state & (1 << (ha->portnum * 4)); 2302 rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2295 return rval; 2303 return rval;
2296} 2304}
2297 2305
@@ -2305,7 +2313,7 @@ qla82xx_set_rst_ready(struct qla_hw_data *ha)
2305 2313
2306 /* If reset value is all FF's, initialize DRV_STATE */ 2314 /* If reset value is all FF's, initialize DRV_STATE */
2307 if (drv_state == 0xffffffff) { 2315 if (drv_state == 0xffffffff) {
2308 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0); 2316 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, QLA82XX_DRVST_NOT_RDY);
2309 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2317 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2310 } 2318 }
2311 drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); 2319 drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
@@ -2335,7 +2343,8 @@ qla82xx_set_qsnt_ready(struct qla_hw_data *ha)
2335 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state); 2343 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
2336} 2344}
2337 2345
2338int qla82xx_load_fw(scsi_qla_host_t *vha) 2346static int
2347qla82xx_load_fw(scsi_qla_host_t *vha)
2339{ 2348{
2340 int rst; 2349 int rst;
2341 struct fw_blob *blob; 2350 struct fw_blob *blob;
@@ -2411,7 +2420,7 @@ fw_load_failed:
2411 return QLA_FUNCTION_FAILED; 2420 return QLA_FUNCTION_FAILED;
2412} 2421}
2413 2422
2414static int 2423int
2415qla82xx_start_firmware(scsi_qla_host_t *vha) 2424qla82xx_start_firmware(scsi_qla_host_t *vha)
2416{ 2425{
2417 int pcie_cap; 2426 int pcie_cap;
@@ -2419,7 +2428,7 @@ qla82xx_start_firmware(scsi_qla_host_t *vha)
2419 struct qla_hw_data *ha = vha->hw; 2428 struct qla_hw_data *ha = vha->hw;
2420 2429
2421 /* scrub dma mask expansion register */ 2430 /* scrub dma mask expansion register */
2422 qla82xx_wr_32(ha, CRB_DMA_SHIFT, 0x55555555); 2431 qla82xx_wr_32(ha, CRB_DMA_SHIFT, QLA82XX_DMA_SHIFT_VALUE);
2423 2432
2424 /* Put both the PEG CMD and RCV PEG to default state 2433 /* Put both the PEG CMD and RCV PEG to default state
2425 * of 0 before resetting the hardware 2434 * of 0 before resetting the hardware
@@ -2882,7 +2891,7 @@ queuing_error:
2882 return QLA_FUNCTION_FAILED; 2891 return QLA_FUNCTION_FAILED;
2883} 2892}
2884 2893
2885uint32_t * 2894static uint32_t *
2886qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, 2895qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
2887 uint32_t length) 2896 uint32_t length)
2888{ 2897{
@@ -2903,7 +2912,7 @@ done_read:
2903 return dwptr; 2912 return dwptr;
2904} 2913}
2905 2914
2906int 2915static int
2907qla82xx_unprotect_flash(struct qla_hw_data *ha) 2916qla82xx_unprotect_flash(struct qla_hw_data *ha)
2908{ 2917{
2909 int ret; 2918 int ret;
@@ -2934,7 +2943,7 @@ done_unprotect:
2934 return ret; 2943 return ret;
2935} 2944}
2936 2945
2937int 2946static int
2938qla82xx_protect_flash(struct qla_hw_data *ha) 2947qla82xx_protect_flash(struct qla_hw_data *ha)
2939{ 2948{
2940 int ret; 2949 int ret;
@@ -2963,7 +2972,7 @@ done_protect:
2963 return ret; 2972 return ret;
2964} 2973}
2965 2974
2966int 2975static int
2967qla82xx_erase_sector(struct qla_hw_data *ha, int addr) 2976qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
2968{ 2977{
2969 int ret = 0; 2978 int ret = 0;
@@ -3156,6 +3165,20 @@ qla82xx_start_iocbs(srb_t *sp)
3156 } 3165 }
3157} 3166}
3158 3167
3168void qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
3169{
3170 if (qla82xx_rom_lock(ha))
3171 /* Someone else is holding the lock. */
3172 qla_printk(KERN_INFO, ha, "Resetting rom_lock\n");
3173
3174 /*
3175 * Either we got the lock, or someone
3176 * else died while holding it.
3177 * In either case, unlock.
3178 */
3179 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
3180}
3181
3159/* 3182/*
3160 * qla82xx_device_bootstrap 3183 * qla82xx_device_bootstrap
3161 * Initialize device, set DEV_READY, start fw 3184 * Initialize device, set DEV_READY, start fw
@@ -3170,12 +3193,13 @@ qla82xx_start_iocbs(srb_t *sp)
3170static int 3193static int
3171qla82xx_device_bootstrap(scsi_qla_host_t *vha) 3194qla82xx_device_bootstrap(scsi_qla_host_t *vha)
3172{ 3195{
3173 int rval, i, timeout; 3196 int rval = QLA_SUCCESS;
3197 int i, timeout;
3174 uint32_t old_count, count; 3198 uint32_t old_count, count;
3175 struct qla_hw_data *ha = vha->hw; 3199 struct qla_hw_data *ha = vha->hw;
3200 int need_reset = 0, peg_stuck = 1;
3176 3201
3177 if (qla82xx_need_reset(ha)) 3202 need_reset = qla82xx_need_reset(ha);
3178 goto dev_initialize;
3179 3203
3180 old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); 3204 old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3181 3205
@@ -3189,9 +3213,27 @@ qla82xx_device_bootstrap(scsi_qla_host_t *vha)
3189 3213
3190 count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); 3214 count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3191 if (count != old_count) 3215 if (count != old_count)
3216 peg_stuck = 0;
3217 }
3218
3219 if (need_reset) {
3220 /* We are trying to perform a recovery here. */
3221 if (peg_stuck)
3222 qla82xx_rom_lock_recovery(ha);
3223 goto dev_initialize;
3224 } else {
3225 /* Start of day for this ha context. */
3226 if (peg_stuck) {
3227 /* Either we are the first or recovery in progress. */
3228 qla82xx_rom_lock_recovery(ha);
3229 goto dev_initialize;
3230 } else
3231 /* Firmware already running. */
3192 goto dev_ready; 3232 goto dev_ready;
3193 } 3233 }
3194 3234
3235 return rval;
3236
3195dev_initialize: 3237dev_initialize:
3196 /* set to DEV_INITIALIZING */ 3238 /* set to DEV_INITIALIZING */
3197 qla_printk(KERN_INFO, ha, "HW State: INITIALIZING\n"); 3239 qla_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");
@@ -3304,6 +3346,9 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3304 struct qla_hw_data *ha = vha->hw; 3346 struct qla_hw_data *ha = vha->hw;
3305 3347
3306 fw_heartbeat_counter = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); 3348 fw_heartbeat_counter = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3349 /* all 0xff, assume AER/EEH in progress, ignore */
3350 if (fw_heartbeat_counter == 0xffffffff)
3351 return;
3307 if (vha->fw_heartbeat_counter == fw_heartbeat_counter) { 3352 if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
3308 vha->seconds_since_last_heartbeat++; 3353 vha->seconds_since_last_heartbeat++;
3309 /* FW not alive after 2 seconds */ 3354 /* FW not alive after 2 seconds */
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 15559cab39f8..51ec0c5380e8 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -26,6 +26,7 @@
26#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c) 26#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
27#define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54) 27#define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54)
28#define CRB_DMA_SHIFT QLA82XX_REG(0xcc) 28#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
29#define QLA82XX_DMA_SHIFT_VALUE 0x55555555
29 30
30#define QLA82XX_HW_H0_CH_HUB_ADR 0x05 31#define QLA82XX_HW_H0_CH_HUB_ADR 0x05
31#define QLA82XX_HW_H1_CH_HUB_ADR 0x0E 32#define QLA82XX_HW_H1_CH_HUB_ADR 0x0E
@@ -583,6 +584,10 @@
583#define QLA82XX_DRVST_RST_RDY 1 584#define QLA82XX_DRVST_RST_RDY 1
584#define QLA82XX_DRVST_QSNT_RDY 2 585#define QLA82XX_DRVST_QSNT_RDY 2
585 586
587/* Different drive active state */
588#define QLA82XX_DRV_NOT_ACTIVE 0
589#define QLA82XX_DRV_ACTIVE 1
590
586/* 591/*
587 * The PCI VendorID and DeviceID for our board. 592 * The PCI VendorID and DeviceID for our board.
588 */ 593 */
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 9946fac54255..800ea9269752 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1295,17 +1295,12 @@ static int
1295qla2xxx_slave_configure(struct scsi_device *sdev) 1295qla2xxx_slave_configure(struct scsi_device *sdev)
1296{ 1296{
1297 scsi_qla_host_t *vha = shost_priv(sdev->host); 1297 scsi_qla_host_t *vha = shost_priv(sdev->host);
1298 struct qla_hw_data *ha = vha->hw;
1299 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1300 struct req_que *req = vha->req; 1298 struct req_que *req = vha->req;
1301 1299
1302 if (sdev->tagged_supported) 1300 if (sdev->tagged_supported)
1303 scsi_activate_tcq(sdev, req->max_q_depth); 1301 scsi_activate_tcq(sdev, req->max_q_depth);
1304 else 1302 else
1305 scsi_deactivate_tcq(sdev, req->max_q_depth); 1303 scsi_deactivate_tcq(sdev, req->max_q_depth);
1306
1307 rport->dev_loss_tmo = ha->port_down_retry_count;
1308
1309 return 0; 1304 return 0;
1310} 1305}
1311 1306
@@ -2141,8 +2136,16 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2141 else 2136 else
2142 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER + 2137 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
2143 base_vha->vp_idx; 2138 base_vha->vp_idx;
2144 if (IS_QLA2100(ha)) 2139
2145 host->sg_tablesize = 32; 2140 /* Set the SG table size based on ISP type */
2141 if (!IS_FWI2_CAPABLE(ha)) {
2142 if (IS_QLA2100(ha))
2143 host->sg_tablesize = 32;
2144 } else {
2145 if (!IS_QLA82XX(ha))
2146 host->sg_tablesize = QLA_SG_ALL;
2147 }
2148
2146 host->max_id = max_id; 2149 host->max_id = max_id;
2147 host->this_id = 255; 2150 host->this_id = 255;
2148 host->cmd_per_lun = 3; 2151 host->cmd_per_lun = 3;
@@ -3553,6 +3556,11 @@ qla2x00_timer(scsi_qla_host_t *vha)
3553 struct qla_hw_data *ha = vha->hw; 3556 struct qla_hw_data *ha = vha->hw;
3554 struct req_que *req; 3557 struct req_que *req;
3555 3558
3559 if (ha->flags.eeh_busy) {
3560 qla2x00_restart_timer(vha, WATCH_INTERVAL);
3561 return;
3562 }
3563
3556 if (IS_QLA82XX(ha)) 3564 if (IS_QLA82XX(ha))
3557 qla82xx_watchdog(vha); 3565 qla82xx_watchdog(vha);
3558 3566
@@ -3782,8 +3790,21 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3782 return PCI_ERS_RESULT_CAN_RECOVER; 3790 return PCI_ERS_RESULT_CAN_RECOVER;
3783 case pci_channel_io_frozen: 3791 case pci_channel_io_frozen:
3784 ha->flags.eeh_busy = 1; 3792 ha->flags.eeh_busy = 1;
3793 /* For ISP82XX complete any pending mailbox cmd */
3794 if (IS_QLA82XX(ha)) {
3795 ha->flags.fw_hung = 1;
3796 if (ha->flags.mbox_busy) {
3797 ha->flags.mbox_int = 1;
3798 DEBUG2(qla_printk(KERN_ERR, ha,
3799 "Due to pci channel io frozen, doing premature "
3800 "completion of mbx command\n"));
3801 complete(&ha->mbx_intr_comp);
3802 }
3803 }
3785 qla2x00_free_irqs(vha); 3804 qla2x00_free_irqs(vha);
3786 pci_disable_device(pdev); 3805 pci_disable_device(pdev);
3806 /* Return back all IOs */
3807 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
3787 return PCI_ERS_RESULT_NEED_RESET; 3808 return PCI_ERS_RESULT_NEED_RESET;
3788 case pci_channel_io_perm_failure: 3809 case pci_channel_io_perm_failure:
3789 ha->flags.pci_channel_io_perm_failure = 1; 3810 ha->flags.pci_channel_io_perm_failure = 1;
@@ -3804,6 +3825,9 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
3804 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 3825 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3805 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 3826 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
3806 3827
3828 if (IS_QLA82XX(ha))
3829 return PCI_ERS_RESULT_RECOVERED;
3830
3807 spin_lock_irqsave(&ha->hardware_lock, flags); 3831 spin_lock_irqsave(&ha->hardware_lock, flags);
3808 if (IS_QLA2100(ha) || IS_QLA2200(ha)){ 3832 if (IS_QLA2100(ha) || IS_QLA2200(ha)){
3809 stat = RD_REG_DWORD(&reg->hccr); 3833 stat = RD_REG_DWORD(&reg->hccr);
@@ -3830,6 +3854,109 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
3830 return PCI_ERS_RESULT_RECOVERED; 3854 return PCI_ERS_RESULT_RECOVERED;
3831} 3855}
3832 3856
3857uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3858{
3859 uint32_t rval = QLA_FUNCTION_FAILED;
3860 uint32_t drv_active = 0;
3861 struct qla_hw_data *ha = base_vha->hw;
3862 int fn;
3863 struct pci_dev *other_pdev = NULL;
3864
3865 DEBUG17(qla_printk(KERN_INFO, ha,
3866 "scsi(%ld): In qla82xx_error_recovery\n", base_vha->host_no));
3867
3868 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3869
3870 if (base_vha->flags.online) {
3871 /* Abort all outstanding commands,
3872 * so as to be requeued later */
3873 qla2x00_abort_isp_cleanup(base_vha);
3874 }
3875
3876
3877 fn = PCI_FUNC(ha->pdev->devfn);
3878 while (fn > 0) {
3879 fn--;
3880 DEBUG17(qla_printk(KERN_INFO, ha,
3881 "Finding pci device at function = 0x%x\n", fn));
3882 other_pdev =
3883 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
3884 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
3885 fn));
3886
3887 if (!other_pdev)
3888 continue;
3889 if (atomic_read(&other_pdev->enable_cnt)) {
3890 DEBUG17(qla_printk(KERN_INFO, ha,
3891 "Found PCI func availabe and enabled at 0x%x\n",
3892 fn));
3893 pci_dev_put(other_pdev);
3894 break;
3895 }
3896 pci_dev_put(other_pdev);
3897 }
3898
3899 if (!fn) {
3900 /* Reset owner */
3901 DEBUG17(qla_printk(KERN_INFO, ha,
3902 "This devfn is reset owner = 0x%x\n", ha->pdev->devfn));
3903 qla82xx_idc_lock(ha);
3904
3905 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3906 QLA82XX_DEV_INITIALIZING);
3907
3908 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
3909 QLA82XX_IDC_VERSION);
3910
3911 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3912 DEBUG17(qla_printk(KERN_INFO, ha,
3913 "drv_active = 0x%x\n", drv_active));
3914
3915 qla82xx_idc_unlock(ha);
3916 /* Reset if device is not already reset
3917 * drv_active would be 0 if a reset has already been done
3918 */
3919 if (drv_active)
3920 rval = qla82xx_start_firmware(base_vha);
3921 else
3922 rval = QLA_SUCCESS;
3923 qla82xx_idc_lock(ha);
3924
3925 if (rval != QLA_SUCCESS) {
3926 qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
3927 qla82xx_clear_drv_active(ha);
3928 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3929 QLA82XX_DEV_FAILED);
3930 } else {
3931 qla_printk(KERN_INFO, ha, "HW State: READY\n");
3932 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3933 QLA82XX_DEV_READY);
3934 qla82xx_idc_unlock(ha);
3935 ha->flags.fw_hung = 0;
3936 rval = qla82xx_restart_isp(base_vha);
3937 qla82xx_idc_lock(ha);
3938 /* Clear driver state register */
3939 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
3940 qla82xx_set_drv_active(base_vha);
3941 }
3942 qla82xx_idc_unlock(ha);
3943 } else {
3944 DEBUG17(qla_printk(KERN_INFO, ha,
3945 "This devfn is not reset owner = 0x%x\n", ha->pdev->devfn));
3946 if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
3947 QLA82XX_DEV_READY)) {
3948 ha->flags.fw_hung = 0;
3949 rval = qla82xx_restart_isp(base_vha);
3950 qla82xx_idc_lock(ha);
3951 qla82xx_set_drv_active(base_vha);
3952 qla82xx_idc_unlock(ha);
3953 }
3954 }
3955 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3956
3957 return rval;
3958}
3959
3833static pci_ers_result_t 3960static pci_ers_result_t
3834qla2xxx_pci_slot_reset(struct pci_dev *pdev) 3961qla2xxx_pci_slot_reset(struct pci_dev *pdev)
3835{ 3962{
@@ -3862,15 +3989,23 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
3862 if (rc) { 3989 if (rc) {
3863 qla_printk(KERN_WARNING, ha, 3990 qla_printk(KERN_WARNING, ha,
3864 "Can't re-enable PCI device after reset.\n"); 3991 "Can't re-enable PCI device after reset.\n");
3865 return ret; 3992 goto exit_slot_reset;
3866 } 3993 }
3867 3994
3868 rsp = ha->rsp_q_map[0]; 3995 rsp = ha->rsp_q_map[0];
3869 if (qla2x00_request_irqs(ha, rsp)) 3996 if (qla2x00_request_irqs(ha, rsp))
3870 return ret; 3997 goto exit_slot_reset;
3871 3998
3872 if (ha->isp_ops->pci_config(base_vha)) 3999 if (ha->isp_ops->pci_config(base_vha))
3873 return ret; 4000 goto exit_slot_reset;
4001
4002 if (IS_QLA82XX(ha)) {
4003 if (qla82xx_error_recovery(base_vha) == QLA_SUCCESS) {
4004 ret = PCI_ERS_RESULT_RECOVERED;
4005 goto exit_slot_reset;
4006 } else
4007 goto exit_slot_reset;
4008 }
3874 4009
3875 while (ha->flags.mbox_busy && retries--) 4010 while (ha->flags.mbox_busy && retries--)
3876 msleep(1000); 4011 msleep(1000);
@@ -3881,6 +4016,7 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
3881 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 4016 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3882 4017
3883 4018
4019exit_slot_reset:
3884 DEBUG17(qla_printk(KERN_WARNING, ha, 4020 DEBUG17(qla_printk(KERN_WARNING, ha,
3885 "slot_reset-return:ret=%x\n", ret)); 4021 "slot_reset-return:ret=%x\n", ret));
3886 4022
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 5d4a3822382d..449256f2c5f8 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -5,6 +5,7 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7#include <linux/delay.h> 7#include <linux/delay.h>
8#include <linux/io.h>
8#include <linux/pci.h> 9#include <linux/pci.h>
9#include "ql4_def.h" 10#include "ql4_def.h"
10#include "ql4_glbl.h" 11#include "ql4_glbl.h"
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index b02bdc6c2cd1..2c36bae3bd4b 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -109,10 +109,12 @@ static const char * scsi_debug_version_date = "20100324";
109#define DEF_PHYSBLK_EXP 0 109#define DEF_PHYSBLK_EXP 0
110#define DEF_LOWEST_ALIGNED 0 110#define DEF_LOWEST_ALIGNED 0
111#define DEF_OPT_BLKS 64 111#define DEF_OPT_BLKS 64
112#define DEF_UNMAP_MAX_BLOCKS 0 112#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
113#define DEF_UNMAP_MAX_DESC 0 113#define DEF_UNMAP_MAX_DESC 256
114#define DEF_UNMAP_GRANULARITY 0 114#define DEF_UNMAP_GRANULARITY 1
115#define DEF_UNMAP_ALIGNMENT 0 115#define DEF_UNMAP_ALIGNMENT 0
116#define DEF_TPWS 0
117#define DEF_TPU 0
116 118
117/* bit mask values for scsi_debug_opts */ 119/* bit mask values for scsi_debug_opts */
118#define SCSI_DEBUG_OPT_NOISE 1 120#define SCSI_DEBUG_OPT_NOISE 1
@@ -177,10 +179,12 @@ static int scsi_debug_ato = DEF_ATO;
177static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP; 179static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
178static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED; 180static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
179static int scsi_debug_opt_blks = DEF_OPT_BLKS; 181static int scsi_debug_opt_blks = DEF_OPT_BLKS;
180static int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC; 182static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
181static int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; 183static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
182static int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY; 184static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
183static int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT; 185static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
186static unsigned int scsi_debug_tpws = DEF_TPWS;
187static unsigned int scsi_debug_tpu = DEF_TPU;
184 188
185static int scsi_debug_cmnd_count = 0; 189static int scsi_debug_cmnd_count = 0;
186 190
@@ -723,16 +727,9 @@ static int inquiry_evpd_b0(unsigned char * arr)
723 /* Optimal Transfer Length */ 727 /* Optimal Transfer Length */
724 put_unaligned_be32(scsi_debug_opt_blks, &arr[8]); 728 put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
725 729
726 if (scsi_debug_unmap_max_desc) { 730 if (scsi_debug_tpu) {
727 unsigned int blocks;
728
729 if (scsi_debug_unmap_max_blocks)
730 blocks = scsi_debug_unmap_max_blocks;
731 else
732 blocks = 0xffffffff;
733
734 /* Maximum Unmap LBA Count */ 731 /* Maximum Unmap LBA Count */
735 put_unaligned_be32(blocks, &arr[16]); 732 put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
736 733
737 /* Maximum Unmap Block Descriptor Count */ 734 /* Maximum Unmap Block Descriptor Count */
738 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]); 735 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
@@ -745,10 +742,9 @@ static int inquiry_evpd_b0(unsigned char * arr)
745 } 742 }
746 743
747 /* Optimal Unmap Granularity */ 744 /* Optimal Unmap Granularity */
748 if (scsi_debug_unmap_granularity) { 745 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
749 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]); 746
750 return 0x3c; /* Mandatory page length for thin provisioning */ 747 return 0x3c; /* Mandatory page length for thin provisioning */
751 }
752 748
753 return sizeof(vpdb0_data); 749 return sizeof(vpdb0_data);
754} 750}
@@ -765,6 +761,21 @@ static int inquiry_evpd_b1(unsigned char *arr)
765 return 0x3c; 761 return 0x3c;
766} 762}
767 763
764/* Thin provisioning VPD page (SBC-3) */
765static int inquiry_evpd_b2(unsigned char *arr)
766{
767 memset(arr, 0, 0x8);
768 arr[0] = 0; /* threshold exponent */
769
770 if (scsi_debug_tpu)
771 arr[1] = 1 << 7;
772
773 if (scsi_debug_tpws)
774 arr[1] |= 1 << 6;
775
776 return 0x8;
777}
778
768#define SDEBUG_LONG_INQ_SZ 96 779#define SDEBUG_LONG_INQ_SZ 96
769#define SDEBUG_MAX_INQ_ARR_SZ 584 780#define SDEBUG_MAX_INQ_ARR_SZ 584
770 781
@@ -820,6 +831,7 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
820 arr[n++] = 0x89; /* ATA information */ 831 arr[n++] = 0x89; /* ATA information */
821 arr[n++] = 0xb0; /* Block limits (SBC) */ 832 arr[n++] = 0xb0; /* Block limits (SBC) */
822 arr[n++] = 0xb1; /* Block characteristics (SBC) */ 833 arr[n++] = 0xb1; /* Block characteristics (SBC) */
834 arr[n++] = 0xb2; /* Thin provisioning (SBC) */
823 arr[3] = n - 4; /* number of supported VPD pages */ 835 arr[3] = n - 4; /* number of supported VPD pages */
824 } else if (0x80 == cmd[2]) { /* unit serial number */ 836 } else if (0x80 == cmd[2]) { /* unit serial number */
825 arr[1] = cmd[2]; /*sanity */ 837 arr[1] = cmd[2]; /*sanity */
@@ -867,6 +879,9 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
867 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */ 879 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
868 arr[1] = cmd[2]; /*sanity */ 880 arr[1] = cmd[2]; /*sanity */
869 arr[3] = inquiry_evpd_b1(&arr[4]); 881 arr[3] = inquiry_evpd_b1(&arr[4]);
882 } else if (0xb2 == cmd[2]) { /* Thin provisioning (SBC) */
883 arr[1] = cmd[2]; /*sanity */
884 arr[3] = inquiry_evpd_b2(&arr[4]);
870 } else { 885 } else {
871 /* Illegal request, invalid field in cdb */ 886 /* Illegal request, invalid field in cdb */
872 mk_sense_buffer(devip, ILLEGAL_REQUEST, 887 mk_sense_buffer(devip, ILLEGAL_REQUEST,
@@ -1038,7 +1053,7 @@ static int resp_readcap16(struct scsi_cmnd * scp,
1038 arr[13] = scsi_debug_physblk_exp & 0xf; 1053 arr[13] = scsi_debug_physblk_exp & 0xf;
1039 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f; 1054 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1040 1055
1041 if (scsi_debug_unmap_granularity) 1056 if (scsi_debug_tpu || scsi_debug_tpws)
1042 arr[14] |= 0x80; /* TPE */ 1057 arr[14] |= 0x80; /* TPE */
1043 1058
1044 arr[15] = scsi_debug_lowest_aligned & 0xff; 1059 arr[15] = scsi_debug_lowest_aligned & 0xff;
@@ -2708,6 +2723,8 @@ module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
2708module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO); 2723module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
2709module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO); 2724module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
2710module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO); 2725module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
2726module_param_named(tpu, scsi_debug_tpu, int, S_IRUGO);
2727module_param_named(tpws, scsi_debug_tpws, int, S_IRUGO);
2711 2728
2712MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); 2729MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2713MODULE_DESCRIPTION("SCSI debug adapter driver"); 2730MODULE_DESCRIPTION("SCSI debug adapter driver");
@@ -2739,10 +2756,12 @@ MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2739MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); 2756MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
2740MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); 2757MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
2741MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); 2758MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
2742MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0)"); 2759MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
2743MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=0)"); 2760MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
2744MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=0)"); 2761MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
2745MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)"); 2762MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
2763MODULE_PARM_DESC(tpu, "enable TP, support UNMAP command (def=0)");
2764MODULE_PARM_DESC(tpws, "enable TP, support WRITE SAME(16) with UNMAP bit (def=0)");
2746 2765
2747static char sdebug_info[256]; 2766static char sdebug_info[256];
2748 2767
@@ -3130,7 +3149,7 @@ static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)
3130{ 3149{
3131 ssize_t count; 3150 ssize_t count;
3132 3151
3133 if (scsi_debug_unmap_granularity == 0) 3152 if (scsi_debug_tpu == 0 && scsi_debug_tpws == 0)
3134 return scnprintf(buf, PAGE_SIZE, "0-%u\n", 3153 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3135 sdebug_store_sectors); 3154 sdebug_store_sectors);
3136 3155
@@ -3207,16 +3226,7 @@ static void do_remove_driverfs_files(void)
3207 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_add_host); 3226 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3208} 3227}
3209 3228
3210static void pseudo_0_release(struct device *dev) 3229struct device *pseudo_primary;
3211{
3212 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3213 printk(KERN_INFO "scsi_debug: pseudo_0_release() called\n");
3214}
3215
3216static struct device pseudo_primary = {
3217 .init_name = "pseudo_0",
3218 .release = pseudo_0_release,
3219};
3220 3230
3221static int __init scsi_debug_init(void) 3231static int __init scsi_debug_init(void)
3222{ 3232{
@@ -3322,10 +3332,21 @@ static int __init scsi_debug_init(void)
3322 memset(dif_storep, 0xff, dif_size); 3332 memset(dif_storep, 0xff, dif_size);
3323 } 3333 }
3324 3334
3325 if (scsi_debug_unmap_granularity) { 3335 /* Thin Provisioning */
3336 if (scsi_debug_tpu || scsi_debug_tpws) {
3326 unsigned int map_bytes; 3337 unsigned int map_bytes;
3327 3338
3328 if (scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) { 3339 scsi_debug_unmap_max_blocks =
3340 clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
3341
3342 scsi_debug_unmap_max_desc =
3343 clamp(scsi_debug_unmap_max_desc, 0U, 256U);
3344
3345 scsi_debug_unmap_granularity =
3346 clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
3347
3348 if (scsi_debug_unmap_alignment &&
3349 scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) {
3329 printk(KERN_ERR 3350 printk(KERN_ERR
3330 "%s: ERR: unmap_granularity < unmap_alignment\n", 3351 "%s: ERR: unmap_granularity < unmap_alignment\n",
3331 __func__); 3352 __func__);
@@ -3352,10 +3373,10 @@ static int __init scsi_debug_init(void)
3352 map_region(0, 2); 3373 map_region(0, 2);
3353 } 3374 }
3354 3375
3355 ret = device_register(&pseudo_primary); 3376 pseudo_primary = root_device_register("pseudo_0");
3356 if (ret < 0) { 3377 if (IS_ERR(pseudo_primary)) {
3357 printk(KERN_WARNING "scsi_debug: device_register error: %d\n", 3378 printk(KERN_WARNING "scsi_debug: root_device_register() error\n");
3358 ret); 3379 ret = PTR_ERR(pseudo_primary);
3359 goto free_vm; 3380 goto free_vm;
3360 } 3381 }
3361 ret = bus_register(&pseudo_lld_bus); 3382 ret = bus_register(&pseudo_lld_bus);
@@ -3402,7 +3423,7 @@ del_files:
3402bus_unreg: 3423bus_unreg:
3403 bus_unregister(&pseudo_lld_bus); 3424 bus_unregister(&pseudo_lld_bus);
3404dev_unreg: 3425dev_unreg:
3405 device_unregister(&pseudo_primary); 3426 root_device_unregister(pseudo_primary);
3406free_vm: 3427free_vm:
3407 if (map_storep) 3428 if (map_storep)
3408 vfree(map_storep); 3429 vfree(map_storep);
@@ -3423,7 +3444,7 @@ static void __exit scsi_debug_exit(void)
3423 do_remove_driverfs_files(); 3444 do_remove_driverfs_files();
3424 driver_unregister(&sdebug_driverfs_driver); 3445 driver_unregister(&sdebug_driverfs_driver);
3425 bus_unregister(&pseudo_lld_bus); 3446 bus_unregister(&pseudo_lld_bus);
3426 device_unregister(&pseudo_primary); 3447 root_device_unregister(pseudo_primary);
3427 3448
3428 if (dif_storep) 3449 if (dif_storep)
3429 vfree(dif_storep); 3450 vfree(dif_storep);
@@ -3474,7 +3495,7 @@ static int sdebug_add_adapter(void)
3474 spin_unlock(&sdebug_host_list_lock); 3495 spin_unlock(&sdebug_host_list_lock);
3475 3496
3476 sdbg_host->dev.bus = &pseudo_lld_bus; 3497 sdbg_host->dev.bus = &pseudo_lld_bus;
3477 sdbg_host->dev.parent = &pseudo_primary; 3498 sdbg_host->dev.parent = pseudo_primary;
3478 sdbg_host->dev.release = &sdebug_release_adapter; 3499 sdbg_host->dev.release = &sdebug_release_adapter;
3479 dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host); 3500 dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
3480 3501
@@ -3642,7 +3663,7 @@ int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done)
3642 errsts = resp_readcap16(SCpnt, devip); 3663 errsts = resp_readcap16(SCpnt, devip);
3643 else if (cmd[1] == SAI_GET_LBA_STATUS) { 3664 else if (cmd[1] == SAI_GET_LBA_STATUS) {
3644 3665
3645 if (scsi_debug_unmap_max_desc == 0) { 3666 if (scsi_debug_tpu == 0 && scsi_debug_tpws == 0) {
3646 mk_sense_buffer(devip, ILLEGAL_REQUEST, 3667 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3647 INVALID_COMMAND_OPCODE, 0); 3668 INVALID_COMMAND_OPCODE, 0);
3648 errsts = check_condition_result; 3669 errsts = check_condition_result;
@@ -3753,8 +3774,16 @@ write:
3753 } 3774 }
3754 break; 3775 break;
3755 case WRITE_SAME_16: 3776 case WRITE_SAME_16:
3756 if (cmd[1] & 0x8) 3777 if (cmd[1] & 0x8) {
3757 unmap = 1; 3778 if (scsi_debug_tpws == 0) {
3779 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3780 INVALID_FIELD_IN_CDB, 0);
3781 errsts = check_condition_result;
3782 } else
3783 unmap = 1;
3784 }
3785 if (errsts)
3786 break;
3758 /* fall through */ 3787 /* fall through */
3759 case WRITE_SAME: 3788 case WRITE_SAME:
3760 errsts = check_readiness(SCpnt, 0, devip); 3789 errsts = check_readiness(SCpnt, 0, devip);
@@ -3768,7 +3797,7 @@ write:
3768 if (errsts) 3797 if (errsts)
3769 break; 3798 break;
3770 3799
3771 if (scsi_debug_unmap_max_desc == 0) { 3800 if (scsi_debug_unmap_max_desc == 0 || scsi_debug_tpu == 0) {
3772 mk_sense_buffer(devip, ILLEGAL_REQUEST, 3801 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3773 INVALID_COMMAND_OPCODE, 0); 3802 INVALID_COMMAND_OPCODE, 0);
3774 errsts = check_condition_result; 3803 errsts = check_condition_result;
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index d7e470a06180..998c01be3234 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -53,6 +53,25 @@ static void fc_bsg_remove(struct request_queue *);
53static void fc_bsg_goose_queue(struct fc_rport *); 53static void fc_bsg_goose_queue(struct fc_rport *);
54 54
55/* 55/*
56 * Module Parameters
57 */
58
59/*
60 * dev_loss_tmo: the default number of seconds that the FC transport
61 * should insulate the loss of a remote port.
62 * The maximum will be capped by the value of SCSI_DEVICE_BLOCK_MAX_TIMEOUT.
63 */
64static unsigned int fc_dev_loss_tmo = 60; /* seconds */
65
66module_param_named(dev_loss_tmo, fc_dev_loss_tmo, uint, S_IRUGO|S_IWUSR);
67MODULE_PARM_DESC(dev_loss_tmo,
68 "Maximum number of seconds that the FC transport should"
69 " insulate the loss of a remote port. Once this value is"
70 " exceeded, the scsi target is removed. Value should be"
71 " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT if"
72 " fast_io_fail_tmo is not set.");
73
74/*
56 * Redefine so that we can have same named attributes in the 75 * Redefine so that we can have same named attributes in the
57 * sdev/starget/host objects. 76 * sdev/starget/host objects.
58 */ 77 */
@@ -408,6 +427,7 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
408 if (!fc_host->work_q) 427 if (!fc_host->work_q)
409 return -ENOMEM; 428 return -ENOMEM;
410 429
430 fc_host->dev_loss_tmo = fc_dev_loss_tmo;
411 snprintf(fc_host->devloss_work_q_name, 431 snprintf(fc_host->devloss_work_q_name,
412 sizeof(fc_host->devloss_work_q_name), 432 sizeof(fc_host->devloss_work_q_name),
413 "fc_dl_%d", shost->host_no); 433 "fc_dl_%d", shost->host_no);
@@ -462,25 +482,6 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
462 NULL); 482 NULL);
463 483
464/* 484/*
465 * Module Parameters
466 */
467
468/*
469 * dev_loss_tmo: the default number of seconds that the FC transport
470 * should insulate the loss of a remote port.
471 * The maximum will be capped by the value of SCSI_DEVICE_BLOCK_MAX_TIMEOUT.
472 */
473static unsigned int fc_dev_loss_tmo = 60; /* seconds */
474
475module_param_named(dev_loss_tmo, fc_dev_loss_tmo, uint, S_IRUGO|S_IWUSR);
476MODULE_PARM_DESC(dev_loss_tmo,
477 "Maximum number of seconds that the FC transport should"
478 " insulate the loss of a remote port. Once this value is"
479 " exceeded, the scsi target is removed. Value should be"
480 " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT if"
481 " fast_io_fail_tmo is not set.");
482
483/*
484 * Netlink Infrastructure 485 * Netlink Infrastructure
485 */ 486 */
486 487
@@ -830,24 +831,32 @@ static FC_DEVICE_ATTR(rport, supported_classes, S_IRUGO,
830/* 831/*
831 * dev_loss_tmo attribute 832 * dev_loss_tmo attribute
832 */ 833 */
833fc_rport_show_function(dev_loss_tmo, "%d\n", 20, ) 834static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
834static ssize_t 835{
835store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr, 836 char *cp;
836 const char *buf, size_t count) 837
838 *val = simple_strtoul(buf, &cp, 0);
839 if ((*cp && (*cp != '\n')) || (*val < 0))
840 return -EINVAL;
841 /*
842 * Check for overflow; dev_loss_tmo is u32
843 */
844 if (*val > UINT_MAX)
845 return -EINVAL;
846
847 return 0;
848}
849
850static int fc_rport_set_dev_loss_tmo(struct fc_rport *rport,
851 unsigned long val)
837{ 852{
838 unsigned long val;
839 struct fc_rport *rport = transport_class_to_rport(dev);
840 struct Scsi_Host *shost = rport_to_shost(rport); 853 struct Scsi_Host *shost = rport_to_shost(rport);
841 struct fc_internal *i = to_fc_internal(shost->transportt); 854 struct fc_internal *i = to_fc_internal(shost->transportt);
842 char *cp; 855
843 if ((rport->port_state == FC_PORTSTATE_BLOCKED) || 856 if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||
844 (rport->port_state == FC_PORTSTATE_DELETED) || 857 (rport->port_state == FC_PORTSTATE_DELETED) ||
845 (rport->port_state == FC_PORTSTATE_NOTPRESENT)) 858 (rport->port_state == FC_PORTSTATE_NOTPRESENT))
846 return -EBUSY; 859 return -EBUSY;
847 val = simple_strtoul(buf, &cp, 0);
848 if ((*cp && (*cp != '\n')) || (val < 0))
849 return -EINVAL;
850
851 /* 860 /*
852 * Check for overflow; dev_loss_tmo is u32 861 * Check for overflow; dev_loss_tmo is u32
853 */ 862 */
@@ -863,6 +872,25 @@ store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
863 return -EINVAL; 872 return -EINVAL;
864 873
865 i->f->set_rport_dev_loss_tmo(rport, val); 874 i->f->set_rport_dev_loss_tmo(rport, val);
875 return 0;
876}
877
878fc_rport_show_function(dev_loss_tmo, "%d\n", 20, )
879static ssize_t
880store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
881 const char *buf, size_t count)
882{
883 struct fc_rport *rport = transport_class_to_rport(dev);
884 unsigned long val;
885 int rc;
886
887 rc = fc_str_to_dev_loss(buf, &val);
888 if (rc)
889 return rc;
890
891 rc = fc_rport_set_dev_loss_tmo(rport, val);
892 if (rc)
893 return rc;
866 return count; 894 return count;
867} 895}
868static FC_DEVICE_ATTR(rport, dev_loss_tmo, S_IRUGO | S_IWUSR, 896static FC_DEVICE_ATTR(rport, dev_loss_tmo, S_IRUGO | S_IWUSR,
@@ -1608,8 +1636,35 @@ store_fc_private_host_issue_lip(struct device *dev,
1608static FC_DEVICE_ATTR(host, issue_lip, S_IWUSR, NULL, 1636static FC_DEVICE_ATTR(host, issue_lip, S_IWUSR, NULL,
1609 store_fc_private_host_issue_lip); 1637 store_fc_private_host_issue_lip);
1610 1638
1611fc_private_host_rd_attr(npiv_vports_inuse, "%u\n", 20); 1639static ssize_t
1640store_fc_private_host_dev_loss_tmo(struct device *dev,
1641 struct device_attribute *attr,
1642 const char *buf, size_t count)
1643{
1644 struct Scsi_Host *shost = transport_class_to_shost(dev);
1645 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
1646 struct fc_rport *rport;
1647 unsigned long val, flags;
1648 int rc;
1612 1649
1650 rc = fc_str_to_dev_loss(buf, &val);
1651 if (rc)
1652 return rc;
1653
1654 fc_host_dev_loss_tmo(shost) = val;
1655 spin_lock_irqsave(shost->host_lock, flags);
1656 list_for_each_entry(rport, &fc_host->rports, peers)
1657 fc_rport_set_dev_loss_tmo(rport, val);
1658 spin_unlock_irqrestore(shost->host_lock, flags);
1659 return count;
1660}
1661
1662fc_private_host_show_function(dev_loss_tmo, "%d\n", 20, );
1663static FC_DEVICE_ATTR(host, dev_loss_tmo, S_IRUGO | S_IWUSR,
1664 show_fc_host_dev_loss_tmo,
1665 store_fc_private_host_dev_loss_tmo);
1666
1667fc_private_host_rd_attr(npiv_vports_inuse, "%u\n", 20);
1613 1668
1614/* 1669/*
1615 * Host Statistics Management 1670 * Host Statistics Management
@@ -2165,6 +2220,7 @@ fc_attach_transport(struct fc_function_template *ft)
2165 SETUP_HOST_ATTRIBUTE_RW(system_hostname); 2220 SETUP_HOST_ATTRIBUTE_RW(system_hostname);
2166 2221
2167 /* Transport-managed attributes */ 2222 /* Transport-managed attributes */
2223 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(dev_loss_tmo);
2168 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type); 2224 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type);
2169 if (ft->issue_fc_host_lip) 2225 if (ft->issue_fc_host_lip)
2170 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(issue_lip); 2226 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(issue_lip);
@@ -2525,7 +2581,7 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
2525 2581
2526 rport->maxframe_size = -1; 2582 rport->maxframe_size = -1;
2527 rport->supported_classes = FC_COS_UNSPECIFIED; 2583 rport->supported_classes = FC_COS_UNSPECIFIED;
2528 rport->dev_loss_tmo = fc_dev_loss_tmo; 2584 rport->dev_loss_tmo = fc_host->dev_loss_tmo;
2529 memcpy(&rport->node_name, &ids->node_name, sizeof(rport->node_name)); 2585 memcpy(&rport->node_name, &ids->node_name, sizeof(rport->node_name));
2530 memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name)); 2586 memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name));
2531 rport->port_id = ids->port_id; 2587 rport->port_id = ids->port_id;
@@ -4044,11 +4100,54 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
4044/** 4100/**
4045 * fc_bsg_remove - Deletes the bsg hooks on fchosts/rports 4101 * fc_bsg_remove - Deletes the bsg hooks on fchosts/rports
4046 * @q: the request_queue that is to be torn down. 4102 * @q: the request_queue that is to be torn down.
4103 *
4104 * Notes:
4105 * Before unregistering the queue empty any requests that are blocked
4106 *
4107 *
4047 */ 4108 */
4048static void 4109static void
4049fc_bsg_remove(struct request_queue *q) 4110fc_bsg_remove(struct request_queue *q)
4050{ 4111{
4112 struct request *req; /* block request */
4113 int counts; /* totals for request_list count and starved */
4114
4051 if (q) { 4115 if (q) {
4116 /* Stop taking in new requests */
4117 spin_lock_irq(q->queue_lock);
4118 blk_stop_queue(q);
4119
4120 /* drain all requests in the queue */
4121 while (1) {
4122 /* need the lock to fetch a request
4123 * this may fetch the same reqeust as the previous pass
4124 */
4125 req = blk_fetch_request(q);
4126 /* save requests in use and starved */
4127 counts = q->rq.count[0] + q->rq.count[1] +
4128 q->rq.starved[0] + q->rq.starved[1];
4129 spin_unlock_irq(q->queue_lock);
4130 /* any requests still outstanding? */
4131 if (counts == 0)
4132 break;
4133
4134 /* This may be the same req as the previous iteration,
4135 * always send the blk_end_request_all after a prefetch.
4136 * It is not okay to not end the request because the
4137 * prefetch started the request.
4138 */
4139 if (req) {
4140 /* return -ENXIO to indicate that this queue is
4141 * going away
4142 */
4143 req->errors = -ENXIO;
4144 blk_end_request_all(req, -ENXIO);
4145 }
4146
4147 msleep(200); /* allow bsg to possibly finish */
4148 spin_lock_irq(q->queue_lock);
4149 }
4150
4052 bsg_unregister_queue(q); 4151 bsg_unregister_queue(q);
4053 blk_cleanup_queue(q); 4152 blk_cleanup_queue(q);
4054 } 4153 }
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index e84026def1f4..332387a6bc25 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -537,7 +537,7 @@ static void iscsi_scan_session(struct work_struct *work)
537 537
538/** 538/**
539 * iscsi_block_scsi_eh - block scsi eh until session state has transistioned 539 * iscsi_block_scsi_eh - block scsi eh until session state has transistioned
540 * cmd: scsi cmd passed to scsi eh handler 540 * @cmd: scsi cmd passed to scsi eh handler
541 * 541 *
542 * If the session is down this function will wait for the recovery 542 * If the session is down this function will wait for the recovery
543 * timer to fire or for the session to be logged back in. If the 543 * timer to fire or for the session to be logged back in. If the
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 20514c47a5aa..20295774bf70 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -477,7 +477,7 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
477 477
478static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq) 478static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
479{ 479{
480 rq->timeout = SD_TIMEOUT; 480 rq->timeout = SD_FLUSH_TIMEOUT;
481 rq->retries = SD_MAX_RETRIES; 481 rq->retries = SD_MAX_RETRIES;
482 rq->cmd[0] = SYNCHRONIZE_CACHE; 482 rq->cmd[0] = SYNCHRONIZE_CACHE;
483 rq->cmd_len = 10; 483 rq->cmd_len = 10;
@@ -1072,7 +1072,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
1072 * flush everything. 1072 * flush everything.
1073 */ 1073 */
1074 res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, 1074 res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
1075 SD_TIMEOUT, SD_MAX_RETRIES, NULL); 1075 SD_FLUSH_TIMEOUT, SD_MAX_RETRIES, NULL);
1076 if (res == 0) 1076 if (res == 0)
1077 break; 1077 break;
1078 } 1078 }
@@ -1554,7 +1554,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
1554 } 1554 }
1555 1555
1556 /* Logical blocks per physical block exponent */ 1556 /* Logical blocks per physical block exponent */
1557 sdkp->hw_sector_size = (1 << (buffer[13] & 0xf)) * sector_size; 1557 sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size;
1558 1558
1559 /* Lowest aligned logical block */ 1559 /* Lowest aligned logical block */
1560 alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size; 1560 alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size;
@@ -1567,7 +1567,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
1567 struct request_queue *q = sdp->request_queue; 1567 struct request_queue *q = sdp->request_queue;
1568 1568
1569 sdkp->thin_provisioning = 1; 1569 sdkp->thin_provisioning = 1;
1570 q->limits.discard_granularity = sdkp->hw_sector_size; 1570 q->limits.discard_granularity = sdkp->physical_block_size;
1571 q->limits.max_discard_sectors = 0xffffffff; 1571 q->limits.max_discard_sectors = 0xffffffff;
1572 1572
1573 if (buffer[14] & 0x40) /* TPRZ */ 1573 if (buffer[14] & 0x40) /* TPRZ */
@@ -1635,7 +1635,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
1635 } 1635 }
1636 1636
1637 sdkp->capacity = lba + 1; 1637 sdkp->capacity = lba + 1;
1638 sdkp->hw_sector_size = sector_size; 1638 sdkp->physical_block_size = sector_size;
1639 return sector_size; 1639 return sector_size;
1640} 1640}
1641 1641
@@ -1756,10 +1756,10 @@ got_data:
1756 (unsigned long long)sdkp->capacity, 1756 (unsigned long long)sdkp->capacity,
1757 sector_size, cap_str_10, cap_str_2); 1757 sector_size, cap_str_10, cap_str_2);
1758 1758
1759 if (sdkp->hw_sector_size != sector_size) 1759 if (sdkp->physical_block_size != sector_size)
1760 sd_printk(KERN_NOTICE, sdkp, 1760 sd_printk(KERN_NOTICE, sdkp,
1761 "%u-byte physical blocks\n", 1761 "%u-byte physical blocks\n",
1762 sdkp->hw_sector_size); 1762 sdkp->physical_block_size);
1763 } 1763 }
1764 } 1764 }
1765 1765
@@ -1773,7 +1773,8 @@ got_data:
1773 else if (sector_size == 256) 1773 else if (sector_size == 256)
1774 sdkp->capacity >>= 1; 1774 sdkp->capacity >>= 1;
1775 1775
1776 blk_queue_physical_block_size(sdp->request_queue, sdkp->hw_sector_size); 1776 blk_queue_physical_block_size(sdp->request_queue,
1777 sdkp->physical_block_size);
1777 sdkp->device->sector_size = sector_size; 1778 sdkp->device->sector_size = sector_size;
1778} 1779}
1779 1780
@@ -2039,14 +2040,24 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
2039 lba_count = get_unaligned_be32(&buffer[20]); 2040 lba_count = get_unaligned_be32(&buffer[20]);
2040 desc_count = get_unaligned_be32(&buffer[24]); 2041 desc_count = get_unaligned_be32(&buffer[24]);
2041 2042
2042 if (lba_count) { 2043 if (lba_count && desc_count) {
2043 q->limits.max_discard_sectors = 2044 if (sdkp->tpvpd && !sdkp->tpu)
2044 lba_count * sector_sz >> 9; 2045 sdkp->unmap = 0;
2045 2046 else
2046 if (desc_count)
2047 sdkp->unmap = 1; 2047 sdkp->unmap = 1;
2048 } 2048 }
2049 2049
2050 if (sdkp->tpvpd && !sdkp->tpu && !sdkp->tpws) {
2051 sd_printk(KERN_ERR, sdkp, "Thin provisioning is " \
2052 "enabled but neither TPU, nor TPWS are " \
2053 "set. Disabling discard!\n");
2054 goto out;
2055 }
2056
2057 if (lba_count)
2058 q->limits.max_discard_sectors =
2059 lba_count * sector_sz >> 9;
2060
2050 granularity = get_unaligned_be32(&buffer[28]); 2061 granularity = get_unaligned_be32(&buffer[28]);
2051 2062
2052 if (granularity) 2063 if (granularity)
@@ -2087,6 +2098,31 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
2087 kfree(buffer); 2098 kfree(buffer);
2088} 2099}
2089 2100
2101/**
2102 * sd_read_thin_provisioning - Query thin provisioning VPD page
2103 * @disk: disk to query
2104 */
2105static void sd_read_thin_provisioning(struct scsi_disk *sdkp)
2106{
2107 unsigned char *buffer;
2108 const int vpd_len = 8;
2109
2110 if (sdkp->thin_provisioning == 0)
2111 return;
2112
2113 buffer = kmalloc(vpd_len, GFP_KERNEL);
2114
2115 if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb2, buffer, vpd_len))
2116 goto out;
2117
2118 sdkp->tpvpd = 1;
2119 sdkp->tpu = (buffer[5] >> 7) & 1; /* UNMAP */
2120 sdkp->tpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */
2121
2122 out:
2123 kfree(buffer);
2124}
2125
2090static int sd_try_extended_inquiry(struct scsi_device *sdp) 2126static int sd_try_extended_inquiry(struct scsi_device *sdp)
2091{ 2127{
2092 /* 2128 /*
@@ -2138,6 +2174,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
2138 sd_read_capacity(sdkp, buffer); 2174 sd_read_capacity(sdkp, buffer);
2139 2175
2140 if (sd_try_extended_inquiry(sdp)) { 2176 if (sd_try_extended_inquiry(sdp)) {
2177 sd_read_thin_provisioning(sdkp);
2141 sd_read_block_limits(sdkp); 2178 sd_read_block_limits(sdkp);
2142 sd_read_block_characteristics(sdkp); 2179 sd_read_block_characteristics(sdkp);
2143 } 2180 }
@@ -2250,11 +2287,10 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
2250 index = sdkp->index; 2287 index = sdkp->index;
2251 dev = &sdp->sdev_gendev; 2288 dev = &sdp->sdev_gendev;
2252 2289
2253 if (index < SD_MAX_DISKS) { 2290 gd->major = sd_major((index & 0xf0) >> 4);
2254 gd->major = sd_major((index & 0xf0) >> 4); 2291 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
2255 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); 2292 gd->minors = SD_MINORS;
2256 gd->minors = SD_MINORS; 2293
2257 }
2258 gd->fops = &sd_fops; 2294 gd->fops = &sd_fops;
2259 gd->private_data = &sdkp->driver; 2295 gd->private_data = &sdkp->driver;
2260 gd->queue = sdkp->device->request_queue; 2296 gd->queue = sdkp->device->request_queue;
@@ -2344,6 +2380,12 @@ static int sd_probe(struct device *dev)
2344 if (error) 2380 if (error)
2345 goto out_put; 2381 goto out_put;
2346 2382
2383 if (index >= SD_MAX_DISKS) {
2384 error = -ENODEV;
2385 sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name space exhausted.\n");
2386 goto out_free_index;
2387 }
2388
2347 error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN); 2389 error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
2348 if (error) 2390 if (error)
2349 goto out_free_index; 2391 goto out_free_index;
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index f81a9309e6de..55488faf0815 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -19,6 +19,7 @@
19 */ 19 */
20#define SD_TIMEOUT (30 * HZ) 20#define SD_TIMEOUT (30 * HZ)
21#define SD_MOD_TIMEOUT (75 * HZ) 21#define SD_MOD_TIMEOUT (75 * HZ)
22#define SD_FLUSH_TIMEOUT (60 * HZ)
22 23
23/* 24/*
24 * Number of allowed retries 25 * Number of allowed retries
@@ -50,7 +51,7 @@ struct scsi_disk {
50 atomic_t openers; 51 atomic_t openers;
51 sector_t capacity; /* size in 512-byte sectors */ 52 sector_t capacity; /* size in 512-byte sectors */
52 u32 index; 53 u32 index;
53 unsigned short hw_sector_size; 54 unsigned int physical_block_size;
54 u8 media_present; 55 u8 media_present;
55 u8 write_prot; 56 u8 write_prot;
56 u8 protection_type;/* Data Integrity Field */ 57 u8 protection_type;/* Data Integrity Field */
@@ -62,6 +63,9 @@ struct scsi_disk {
62 unsigned first_scan : 1; 63 unsigned first_scan : 1;
63 unsigned thin_provisioning : 1; 64 unsigned thin_provisioning : 1;
64 unsigned unmap : 1; 65 unsigned unmap : 1;
66 unsigned tpws : 1;
67 unsigned tpu : 1;
68 unsigned tpvpd : 1;
65}; 69};
66#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev) 70#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
67 71
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index afdc3f5d915c..5b7388f1c835 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -9,7 +9,7 @@
9 Steve Hirsch, Andreas Koppenh"ofer, Michael Leodolter, Eyal Lebedinsky, 9 Steve Hirsch, Andreas Koppenh"ofer, Michael Leodolter, Eyal Lebedinsky,
10 Michael Schaefer, J"org Weule, and Eric Youngdale. 10 Michael Schaefer, J"org Weule, and Eric Youngdale.
11 11
12 Copyright 1992 - 2008 Kai Makisara 12 Copyright 1992 - 2010 Kai Makisara
13 email Kai.Makisara@kolumbus.fi 13 email Kai.Makisara@kolumbus.fi
14 14
15 Some small formal changes - aeb, 950809 15 Some small formal changes - aeb, 950809
@@ -17,7 +17,7 @@
17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support 17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
18 */ 18 */
19 19
20static const char *verstr = "20081215"; 20static const char *verstr = "20100829";
21 21
22#include <linux/module.h> 22#include <linux/module.h>
23 23
@@ -2696,18 +2696,21 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2696 } 2696 }
2697 break; 2697 break;
2698 case MTWEOF: 2698 case MTWEOF:
2699 case MTWEOFI:
2699 case MTWSM: 2700 case MTWSM:
2700 if (STp->write_prot) 2701 if (STp->write_prot)
2701 return (-EACCES); 2702 return (-EACCES);
2702 cmd[0] = WRITE_FILEMARKS; 2703 cmd[0] = WRITE_FILEMARKS;
2703 if (cmd_in == MTWSM) 2704 if (cmd_in == MTWSM)
2704 cmd[1] = 2; 2705 cmd[1] = 2;
2706 if (cmd_in == MTWEOFI)
2707 cmd[1] |= 1;
2705 cmd[2] = (arg >> 16); 2708 cmd[2] = (arg >> 16);
2706 cmd[3] = (arg >> 8); 2709 cmd[3] = (arg >> 8);
2707 cmd[4] = arg; 2710 cmd[4] = arg;
2708 timeout = STp->device->request_queue->rq_timeout; 2711 timeout = STp->device->request_queue->rq_timeout;
2709 DEBC( 2712 DEBC(
2710 if (cmd_in == MTWEOF) 2713 if (cmd_in != MTWSM)
2711 printk(ST_DEB_MSG "%s: Writing %d filemarks.\n", name, 2714 printk(ST_DEB_MSG "%s: Writing %d filemarks.\n", name,
2712 cmd[2] * 65536 + cmd[3] * 256 + cmd[4]); 2715 cmd[2] * 65536 + cmd[3] * 256 + cmd[4]);
2713 else 2716 else
@@ -2883,8 +2886,8 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2883 else if (chg_eof) 2886 else if (chg_eof)
2884 STps->eof = ST_NOEOF; 2887 STps->eof = ST_NOEOF;
2885 2888
2886 if (cmd_in == MTWEOF) 2889 if (cmd_in == MTWEOF || cmd_in == MTWEOFI)
2887 STps->rw = ST_IDLE; 2890 STps->rw = ST_IDLE; /* prevent automatic WEOF at close */
2888 } else { /* SCSI command was not completely successful. Don't return 2891 } else { /* SCSI command was not completely successful. Don't return
2889 from this block without releasing the SCSI command block! */ 2892 from this block without releasing the SCSI command block! */
2890 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; 2893 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
@@ -2901,7 +2904,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2901 else 2904 else
2902 undone = 0; 2905 undone = 0;
2903 2906
2904 if (cmd_in == MTWEOF && 2907 if ((cmd_in == MTWEOF || cmd_in == MTWEOFI) &&
2905 cmdstatp->have_sense && 2908 cmdstatp->have_sense &&
2906 (cmdstatp->flags & SENSE_EOM)) { 2909 (cmdstatp->flags & SENSE_EOM)) {
2907 if (cmdstatp->sense_hdr.sense_key == NO_SENSE || 2910 if (cmdstatp->sense_hdr.sense_key == NO_SENSE ||